diff options
Diffstat (limited to '')
351 files changed, 21421 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/__init__.py b/src/ceph-volume/ceph_volume/__init__.py new file mode 100644 index 00000000..640fb1e6 --- /dev/null +++ b/src/ceph-volume/ceph_volume/__init__.py @@ -0,0 +1,22 @@ +from collections import namedtuple + + +sys_info = namedtuple('sys_info', ['devices']) +sys_info.devices = dict() + + +class UnloadedConfig(object): + """ + This class is used as the default value for conf.ceph so that if + a configuration file is not successfully loaded then it will give + a nice error message when values from the config are used. + """ + def __getattr__(self, *a): + raise RuntimeError("No valid ceph configuration file was loaded.") + +conf = namedtuple('config', ['ceph', 'cluster', 'verbosity', 'path', 'log_path']) +conf.ceph = UnloadedConfig() + +__version__ = "1.0.0" + +__release__ = "nautilus" diff --git a/src/ceph-volume/ceph_volume/api/__init__.py b/src/ceph-volume/ceph_volume/api/__init__.py new file mode 100644 index 00000000..ecc97129 --- /dev/null +++ b/src/ceph-volume/ceph_volume/api/__init__.py @@ -0,0 +1,3 @@ +""" +Device API that can be shared among other implementations. +""" diff --git a/src/ceph-volume/ceph_volume/api/lvm.py b/src/ceph-volume/ceph_volume/api/lvm.py new file mode 100644 index 00000000..30362f1b --- /dev/null +++ b/src/ceph-volume/ceph_volume/api/lvm.py @@ -0,0 +1,1136 @@ +""" +API for CRUD lvm tag operations. Follows the Ceph LVM tag naming convention +that prefixes tags with ``ceph.`` and uses ``=`` for assignment, and provides +set of utilities for interacting with LVM. +""" +import logging +import os +import uuid +from itertools import repeat +from math import floor +from ceph_volume import process, util +from ceph_volume.exceptions import SizeAllocationError + +logger = logging.getLogger(__name__) + + +def convert_filters_to_str(filters): + """ + Convert filter args from dictionary to following format - + filters={filter_name=filter_val,...} + """ + if not filters: + return filters + + filter_arg = '' + for k, v in filters.items(): + filter_arg += k + '=' + v + ',' + # get rid of extra comma at the end + filter_arg = filter_arg[:len(filter_arg) - 1] + + return filter_arg + + +def convert_tags_to_str(tags): + """ + Convert tags from dictionary to following format - + tags={tag_name=tag_val,...} + """ + if not tags: + return tags + + tag_arg = 'tags={' + for k, v in tags.items(): + tag_arg += k + '=' + v + ',' + # get rid of extra comma at the end + tag_arg = tag_arg[:len(tag_arg) - 1] + '}' + + return tag_arg + + +def make_filters_lvmcmd_ready(filters, tags): + """ + Convert filters (including tags) from dictionary to following format - + filter_name=filter_val...,tags={tag_name=tag_val,...} + + The command will look as follows = + lvs -S filter_name=filter_val...,tags={tag_name=tag_val,...} + """ + filters = convert_filters_to_str(filters) + tags = convert_tags_to_str(tags) + + if filters and tags: + return filters + ',' + tags + if filters and not tags: + return filters + if not filters and tags: + return tags + else: + return '' + + +def _output_parser(output, fields): + """ + Newer versions of LVM allow ``--reportformat=json``, but older versions, + like the one included in Xenial do not. LVM has the ability to filter and + format its output so we assume the output will be in a format this parser + can handle (using ';' as a delimiter) + + :param fields: A string, possibly using ',' to group many items, as it + would be used on the CLI + :param output: The CLI output from the LVM call + """ + field_items = fields.split(',') + report = [] + for line in output: + # clear the leading/trailing whitespace + line = line.strip() + + # remove the extra '"' in each field + line = line.replace('"', '') + + # prevent moving forward with empty contents + if not line: + continue + + # splitting on ';' because that is what the lvm call uses as + # '--separator' + output_items = [i.strip() for i in line.split(';')] + # map the output to the fields + report.append( + dict(zip(field_items, output_items)) + ) + + return report + + +def _splitname_parser(line): + """ + Parses the output from ``dmsetup splitname``, that should contain prefixes + (--nameprefixes) and set the separator to ";" + + Output for /dev/mapper/vg-lv will usually look like:: + + DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER='' + + + The ``VG_NAME`` will usually not be what other callers need (e.g. just 'vg' + in the example), so this utility will split ``/dev/mapper/`` out, so that + the actual volume group name is kept + + :returns: dictionary with stripped prefixes + """ + parsed = {} + try: + parts = line[0].split(';') + except IndexError: + logger.exception('Unable to parse mapper device: %s', line) + return parsed + + for part in parts: + part = part.replace("'", '') + key, value = part.split('=') + if 'DM_VG_NAME' in key: + value = value.split('/dev/mapper/')[-1] + key = key.split('DM_')[-1] + parsed[key] = value + + return parsed + + +def sizing(device_size, parts=None, size=None): + """ + Calculate proper sizing to fully utilize the volume group in the most + efficient way possible. To prevent situations where LVM might accept + a percentage that is beyond the vg's capabilities, it will refuse with + an error when requesting a larger-than-possible parameter, in addition + to rounding down calculations. + + A dictionary with different sizing parameters is returned, to make it + easier for others to choose what they need in order to create logical + volumes:: + + >>> sizing(100, parts=2) + >>> {'parts': 2, 'percentages': 50, 'sizes': 50} + + """ + if parts is not None and size is not None: + raise ValueError( + "Cannot process sizing with both parts (%s) and size (%s)" % (parts, size) + ) + + if size and size > device_size: + raise SizeAllocationError(size, device_size) + + def get_percentage(parts): + return int(floor(100 / float(parts))) + + if parts is not None: + # Prevent parts being 0, falling back to 1 (100% usage) + parts = parts or 1 + percentages = get_percentage(parts) + + if size: + parts = int(device_size / size) or 1 + percentages = get_percentage(parts) + + sizes = device_size / parts if parts else int(floor(device_size)) + + return { + 'parts': parts, + 'percentages': percentages, + 'sizes': int(sizes/1024/1024/1024), + } + + +def parse_tags(lv_tags): + """ + Return a dictionary mapping of all the tags associated with + a Volume from the comma-separated tags coming from the LVM API + + Input look like:: + + "ceph.osd_fsid=aaa-fff-bbbb,ceph.osd_id=0" + + For the above example, the expected return value would be:: + + { + "ceph.osd_fsid": "aaa-fff-bbbb", + "ceph.osd_id": "0" + } + """ + if not lv_tags: + return {} + tag_mapping = {} + tags = lv_tags.split(',') + for tag_assignment in tags: + if not tag_assignment.startswith('ceph.'): + continue + key, value = tag_assignment.split('=', 1) + tag_mapping[key] = value + + return tag_mapping + + +def _vdo_parents(devices): + """ + It is possible we didn't get a logical volume, or a mapper path, but + a device like /dev/sda2, to resolve this, we must look at all the slaves of + every single device in /sys/block and if any of those devices is related to + VDO devices, then we can add the parent + """ + parent_devices = [] + for parent in os.listdir('/sys/block'): + for slave in os.listdir('/sys/block/%s/slaves' % parent): + if slave in devices: + parent_devices.append('/dev/%s' % parent) + parent_devices.append(parent) + return parent_devices + + +def _vdo_slaves(vdo_names): + """ + find all the slaves associated with each vdo name (from realpath) by going + into /sys/block/<realpath>/slaves + """ + devices = [] + for vdo_name in vdo_names: + mapper_path = '/dev/mapper/%s' % vdo_name + if not os.path.exists(mapper_path): + continue + # resolve the realpath and realname of the vdo mapper + vdo_realpath = os.path.realpath(mapper_path) + vdo_realname = vdo_realpath.split('/')[-1] + slaves_path = '/sys/block/%s/slaves' % vdo_realname + if not os.path.exists(slaves_path): + continue + devices.append(vdo_realpath) + devices.append(mapper_path) + devices.append(vdo_realname) + for slave in os.listdir(slaves_path): + devices.append('/dev/%s' % slave) + devices.append(slave) + return devices + + +def _is_vdo(path): + """ + A VDO device can be composed from many different devices, go through each + one of those devices and its slaves (if any) and correlate them back to + /dev/mapper and their realpaths, and then check if they appear as part of + /sys/kvdo/<name>/statistics + + From the realpath of a logical volume, determine if it is a VDO device or + not, by correlating it to the presence of the name in + /sys/kvdo/<name>/statistics and all the previously captured devices + """ + if not os.path.isdir('/sys/kvdo'): + return False + realpath = os.path.realpath(path) + realpath_name = realpath.split('/')[-1] + devices = [] + vdo_names = set() + # get all the vdo names + for dirname in os.listdir('/sys/kvdo/'): + if os.path.isdir('/sys/kvdo/%s/statistics' % dirname): + vdo_names.add(dirname) + + # find all the slaves associated with each vdo name (from realpath) by + # going into /sys/block/<realpath>/slaves + devices.extend(_vdo_slaves(vdo_names)) + + # Find all possible parents, looking into slaves that are related to VDO + devices.extend(_vdo_parents(devices)) + + return any([ + path in devices, + realpath in devices, + realpath_name in devices]) + + +def is_vdo(path): + """ + Detect if a path is backed by VDO, proxying the actual call to _is_vdo so + that we can prevent an exception breaking OSD creation. If an exception is + raised, it will get captured and logged to file, while returning + a ``False``. + """ + try: + if _is_vdo(path): + return '1' + return '0' + except Exception: + logger.exception('Unable to properly detect device as VDO: %s', path) + return '0' + + +def dmsetup_splitname(dev): + """ + Run ``dmsetup splitname`` and parse the results. + + .. warning:: This call does not ensure that the device is correct or that + it exists. ``dmsetup`` will happily take a non existing path and still + return a 0 exit status. + """ + command = [ + 'dmsetup', 'splitname', '--noheadings', + "--separator=';'", '--nameprefixes', dev + ] + out, err, rc = process.call(command) + return _splitname_parser(out) + + +def is_ceph_device(lv): + try: + lv.tags['ceph.osd_id'] + except (KeyError, AttributeError): + logger.warning('device is not part of ceph: %s', lv) + return False + + if lv.tags['ceph.osd_id'] == 'null': + return False + else: + return True + + +#################################### +# +# Code for LVM Physical Volumes +# +################################ + +PV_FIELDS = 'pv_name,pv_tags,pv_uuid,vg_name,lv_uuid' + +class PVolume(object): + """ + Represents a Physical Volume from LVM, with some top-level attributes like + ``pv_name`` and parsed tags as a dictionary of key/value pairs. + """ + + def __init__(self, **kw): + for k, v in kw.items(): + setattr(self, k, v) + self.pv_api = kw + self.name = kw['pv_name'] + self.tags = parse_tags(kw['pv_tags']) + + def __str__(self): + return '<%s>' % self.pv_api['pv_name'] + + def __repr__(self): + return self.__str__() + + def set_tags(self, tags): + """ + :param tags: A dictionary of tag names and values, like:: + + { + "ceph.osd_fsid": "aaa-fff-bbbb", + "ceph.osd_id": "0" + } + + At the end of all modifications, the tags are refreshed to reflect + LVM's most current view. + """ + for k, v in tags.items(): + self.set_tag(k, v) + # after setting all the tags, refresh them for the current object, use the + # pv_* identifiers to filter because those shouldn't change + pv_object = self.get_first_pv(filter={'pv_name': self.pv_name, + 'pv_uuid': self.pv_uuid}) + self.tags = pv_object.tags + + def set_tag(self, key, value): + """ + Set the key/value pair as an LVM tag. Does not "refresh" the values of + the current object for its tags. Meant to be a "fire and forget" type + of modification. + + **warning**: Altering tags on a PV has to be done ensuring that the + device is actually the one intended. ``pv_name`` is *not* a persistent + value, only ``pv_uuid`` is. Using ``pv_uuid`` is the best way to make + sure the device getting changed is the one needed. + """ + # remove it first if it exists + if self.tags.get(key): + current_value = self.tags[key] + tag = "%s=%s" % (key, current_value) + process.call(['pvchange', '--deltag', tag, self.pv_name]) + + process.call( + [ + 'pvchange', + '--addtag', '%s=%s' % (key, value), self.pv_name + ] + ) + + +def create_pv(device): + """ + Create a physical volume from a device, useful when devices need to be later mapped + to journals. + """ + process.run([ + 'pvcreate', + '-v', # verbose + '-f', # force it + '--yes', # answer yes to any prompts + device + ]) + + +def remove_pv(pv_name): + """ + Removes a physical volume using a double `-f` to prevent prompts and fully + remove anything related to LVM. This is tremendously destructive, but so is all other actions + when zapping a device. + + In the case where multiple PVs are found, it will ignore that fact and + continue with the removal, specifically in the case of messages like:: + + WARNING: PV $UUID /dev/DEV-1 was already found on /dev/DEV-2 + + These situations can be avoided with custom filtering rules, which this API + cannot handle while accommodating custom user filters. + """ + fail_msg = "Unable to remove vg %s" % pv_name + process.run( + [ + 'pvremove', + '-v', # verbose + '-f', # force it + '-f', # force it + pv_name + ], + fail_msg=fail_msg, + ) + + +def get_pvs(fields=PV_FIELDS, filters='', tags=None): + """ + Return a list of PVs that are available on the system and match the + filters and tags passed. Argument filters takes a dictionary containing + arguments required by -S option of LVM. Passing a list of LVM tags can be + quite tricky to pass as a dictionary within dictionary, therefore pass + dictionary of tags via tags argument and tricky part will be taken care of + by the helper methods. + + :param fields: string containing list of fields to be displayed by the + pvs command + :param sep: string containing separator to be used between two fields + :param filters: dictionary containing LVM filters + :param tags: dictionary containng LVM tags + :returns: list of class PVolume object representing pvs on the system + """ + filters = make_filters_lvmcmd_ready(filters, tags) + args = ['pvs', '--no-heading', '--readonly', '--separator=";"', '-S', + filters, '-o', fields] + + stdout, stderr, returncode = process.call(args, verbose_on_failure=False) + pvs_report = _output_parser(stdout, fields) + return [PVolume(**pv_report) for pv_report in pvs_report] + + +def get_first_pv(fields=PV_FIELDS, filters=None, tags=None): + """ + Wrapper of get_pv meant to be a convenience method to avoid the phrase:: + pvs = get_pvs() + if len(pvs) >= 1: + pv = pvs[0] + """ + pvs = get_pvs(fields=fields, filters=filters, tags=tags) + return pvs[0] if len(pvs) > 0 else [] + + +################################ +# +# Code for LVM Volume Groups +# +############################# + +VG_FIELDS = 'vg_name,pv_count,lv_count,vg_attr,vg_extent_count,vg_free_count,vg_extent_size' +VG_CMD_OPTIONS = ['--noheadings', '--readonly', '--units=b', '--nosuffix', '--separator=";"'] + + +class VolumeGroup(object): + """ + Represents an LVM group, with some top-level attributes like ``vg_name`` + """ + + def __init__(self, **kw): + for k, v in kw.items(): + setattr(self, k, v) + self.name = kw['vg_name'] + if not self.name: + raise ValueError('VolumeGroup must have a non-empty name') + self.tags = parse_tags(kw.get('vg_tags', '')) + + def __str__(self): + return '<%s>' % self.name + + def __repr__(self): + return self.__str__() + + @property + def free(self): + """ + Return free space in VG in bytes + """ + return int(self.vg_extent_size) * int(self.vg_free_count) + + @property + def free_percent(self): + """ + Return free space in VG in bytes + """ + return int(self.vg_free_count) / int(self.vg_extent_count) + + @property + def size(self): + """ + Returns VG size in bytes + """ + return int(self.vg_extent_size) * int(self.vg_extent_count) + + def sizing(self, parts=None, size=None): + """ + Calculate proper sizing to fully utilize the volume group in the most + efficient way possible. To prevent situations where LVM might accept + a percentage that is beyond the vg's capabilities, it will refuse with + an error when requesting a larger-than-possible parameter, in addition + to rounding down calculations. + + A dictionary with different sizing parameters is returned, to make it + easier for others to choose what they need in order to create logical + volumes:: + + >>> data_vg.free + 1024 + >>> data_vg.sizing(parts=4) + {'parts': 4, 'sizes': 256, 'percentages': 25} + >>> data_vg.sizing(size=512) + {'parts': 2, 'sizes': 512, 'percentages': 50} + + + :param parts: Number of parts to create LVs from + :param size: Size in gigabytes to divide the VG into + + :raises SizeAllocationError: When requested size cannot be allocated with + :raises ValueError: If both ``parts`` and ``size`` are given + """ + if parts is not None and size is not None: + raise ValueError( + "Cannot process sizing with both parts (%s) and size (%s)" % (parts, size) + ) + + # if size is given we need to map that to extents so that we avoid + # issues when trying to get this right with a size in gigabytes find + # the percentage first, cheating, because these values are thrown out + vg_free_count = util.str_to_int(self.vg_free_count) + + if size: + size = size * 1024 * 1024 * 1024 + extents = int(size / int(self.vg_extent_size)) + disk_sizing = sizing(self.free, size=size, parts=parts) + else: + if parts is not None: + # Prevent parts being 0, falling back to 1 (100% usage) + parts = parts or 1 + size = int(self.free / parts) + extents = size * vg_free_count / self.free + disk_sizing = sizing(self.free, parts=parts) + + extent_sizing = sizing(vg_free_count, size=extents) + + disk_sizing['extents'] = int(extents) + disk_sizing['percentages'] = extent_sizing['percentages'] + return disk_sizing + + def bytes_to_extents(self, size): + ''' + Return a how many free extents we can fit into a size in bytes. This has + some uncertainty involved. If size/extent_size is within 1% of the + actual free extents we will return the extent count, otherwise we'll + throw an error. + This accomodates for the size calculation in batch. We need to report + the OSD layout but have not yet created any LVM structures. We use the + disk size in batch if no VG is present and that will overshoot the + actual free_extent count due to LVM overhead. + + ''' + b_to_ext = int(size / int(self.vg_extent_size)) + if b_to_ext < int(self.vg_free_count): + # return bytes in extents if there is more space + return b_to_ext + elif b_to_ext / int(self.vg_free_count) - 1 < 0.01: + # return vg_fre_count if its less then 1% off + logger.info( + 'bytes_to_extents results in {} but only {} ' + 'are available, adjusting the latter'.format(b_to_ext, + self.vg_free_count)) + return int(self.vg_free_count) + # else raise an exception + raise RuntimeError('Can\'t convert {} to free extents, only {} ({} ' + 'bytes) are free'.format(size, self.vg_free_count, + self.free)) + + def slots_to_extents(self, slots): + ''' + Return how many extents fit the VG slot times + ''' + return int(int(self.vg_extent_count) / slots) + + +def create_vg(devices, name=None, name_prefix=None): + """ + Create a Volume Group. Command looks like:: + + vgcreate --force --yes group_name device + + Once created the volume group is returned as a ``VolumeGroup`` object + + :param devices: A list of devices to create a VG. Optionally, a single + device (as a string) can be used. + :param name: Optionally set the name of the VG, defaults to 'ceph-{uuid}' + :param name_prefix: Optionally prefix the name of the VG, which will get combined + with a UUID string + """ + if isinstance(devices, set): + devices = list(devices) + if not isinstance(devices, list): + devices = [devices] + if name_prefix: + name = "%s-%s" % (name_prefix, str(uuid.uuid4())) + elif name is None: + name = "ceph-%s" % str(uuid.uuid4()) + process.run([ + 'vgcreate', + '--force', + '--yes', + name] + devices + ) + + return get_first_vg(filters={'vg_name': name}) + + +def extend_vg(vg, devices): + """ + Extend a Volume Group. Command looks like:: + + vgextend --force --yes group_name [device, ...] + + Once created the volume group is extended and returned as a ``VolumeGroup`` object + + :param vg: A VolumeGroup object + :param devices: A list of devices to extend the VG. Optionally, a single + device (as a string) can be used. + """ + if not isinstance(devices, list): + devices = [devices] + process.run([ + 'vgextend', + '--force', + '--yes', + vg.name] + devices + ) + + return get_first_vg(filters={'vg_name': vg.name}) + + +def reduce_vg(vg, devices): + """ + Reduce a Volume Group. Command looks like:: + + vgreduce --force --yes group_name [device, ...] + + :param vg: A VolumeGroup object + :param devices: A list of devices to remove from the VG. Optionally, a + single device (as a string) can be used. + """ + if not isinstance(devices, list): + devices = [devices] + process.run([ + 'vgreduce', + '--force', + '--yes', + vg.name] + devices + ) + + return get_first_vg(filter={'vg_name': vg.name}) + + +def remove_vg(vg_name): + """ + Removes a volume group. + """ + if not vg_name: + logger.warning('Skipping removal of invalid VG name: "%s"', vg_name) + return + fail_msg = "Unable to remove vg %s" % vg_name + process.run( + [ + 'vgremove', + '-v', # verbose + '-f', # force it + vg_name + ], + fail_msg=fail_msg, + ) + + +def get_vgs(fields=VG_FIELDS, filters='', tags=None): + """ + Return a list of VGs that are available on the system and match the + filters and tags passed. Argument filters takes a dictionary containing + arguments required by -S option of LVM. Passing a list of LVM tags can be + quite tricky to pass as a dictionary within dictionary, therefore pass + dictionary of tags via tags argument and tricky part will be taken care of + by the helper methods. + + :param fields: string containing list of fields to be displayed by the + vgs command + :param sep: string containing separator to be used between two fields + :param filters: dictionary containing LVM filters + :param tags: dictionary containng LVM tags + :returns: list of class VolumeGroup object representing vgs on the system + """ + filters = make_filters_lvmcmd_ready(filters, tags) + args = ['vgs'] + VG_CMD_OPTIONS + ['-S', filters, '-o', fields] + + stdout, stderr, returncode = process.call(args, verbose_on_failure=False) + vgs_report =_output_parser(stdout, fields) + return [VolumeGroup(**vg_report) for vg_report in vgs_report] + + +def get_first_vg(fields=VG_FIELDS, filters=None, tags=None): + """ + Wrapper of get_vg meant to be a convenience method to avoid the phrase:: + vgs = get_vgs() + if len(vgs) >= 1: + vg = vgs[0] + """ + vgs = get_vgs(fields=fields, filters=filters, tags=tags) + return vgs[0] if len(vgs) > 0 else [] + + +def get_device_vgs(device, name_prefix=''): + stdout, stderr, returncode = process.call( + ['pvs'] + VG_CMD_OPTIONS + ['-o', VG_FIELDS, device], + verbose_on_failure=False + ) + vgs = _output_parser(stdout, VG_FIELDS) + return [VolumeGroup(**vg) for vg in vgs if vg['vg_name'] and vg['vg_name'].startswith(name_prefix)] + + +################################# +# +# Code for LVM Logical Volumes +# +############################### + +LV_FIELDS = 'lv_tags,lv_path,lv_name,vg_name,lv_uuid,lv_size' +LV_CMD_OPTIONS = ['--noheadings', '--readonly', '--separator=";"', '-a', + '--units=b', '--nosuffix'] + + +class Volume(object): + """ + Represents a Logical Volume from LVM, with some top-level attributes like + ``lv_name`` and parsed tags as a dictionary of key/value pairs. + """ + + def __init__(self, **kw): + for k, v in kw.items(): + setattr(self, k, v) + self.lv_api = kw + self.name = kw['lv_name'] + if not self.name: + raise ValueError('Volume must have a non-empty name') + self.tags = parse_tags(kw['lv_tags']) + self.encrypted = self.tags.get('ceph.encrypted', '0') == '1' + self.used_by_ceph = 'ceph.osd_id' in self.tags + + def __str__(self): + return '<%s>' % self.lv_api['lv_path'] + + def __repr__(self): + return self.__str__() + + def as_dict(self): + obj = {} + obj.update(self.lv_api) + obj['tags'] = self.tags + obj['name'] = self.name + obj['type'] = self.tags['ceph.type'] + obj['path'] = self.lv_path + return obj + + def report(self): + if not self.used_by_ceph: + return { + 'name': self.lv_name, + 'comment': 'not used by ceph' + } + else: + type_ = self.tags['ceph.type'] + report = { + 'name': self.lv_name, + 'osd_id': self.tags['ceph.osd_id'], + 'cluster_name': self.tags['ceph.cluster_name'], + 'type': type_, + 'osd_fsid': self.tags['ceph.osd_fsid'], + 'cluster_fsid': self.tags['ceph.cluster_fsid'], + 'osdspec_affinity': self.tags.get('ceph.osdspec_affinity', ''), + } + type_uuid = '{}_uuid'.format(type_) + report[type_uuid] = self.tags['ceph.{}'.format(type_uuid)] + return report + + def _format_tag_args(self, op, tags): + tag_args = ['{}={}'.format(k, v) for k, v in tags.items()] + # weird but efficient way of ziping two lists and getting a flat list + return list(sum(zip(repeat(op), tag_args), ())) + + def clear_tags(self, keys=None): + """ + Removes all or passed tags from the Logical Volume. + """ + if not keys: + keys = self.tags.keys() + + del_tags = {k: self.tags[k] for k in keys if k in self.tags} + if not del_tags: + # nothing to clear + return + del_tag_args = self._format_tag_args('--deltag', del_tags) + # --deltag returns successful even if the to be deleted tag is not set + process.call(['lvchange'] + del_tag_args + [self.lv_path]) + for k in del_tags.keys(): + del self.tags[k] + + + def set_tags(self, tags): + """ + :param tags: A dictionary of tag names and values, like:: + + { + "ceph.osd_fsid": "aaa-fff-bbbb", + "ceph.osd_id": "0" + } + + At the end of all modifications, the tags are refreshed to reflect + LVM's most current view. + """ + self.clear_tags(tags.keys()) + add_tag_args = self._format_tag_args('--addtag', tags) + process.call(['lvchange'] + add_tag_args + [self.lv_path]) + for k, v in tags.items(): + self.tags[k] = v + + + def clear_tag(self, key): + if self.tags.get(key): + current_value = self.tags[key] + tag = "%s=%s" % (key, current_value) + process.call(['lvchange', '--deltag', tag, self.lv_path]) + del self.tags[key] + + + def set_tag(self, key, value): + """ + Set the key/value pair as an LVM tag. + """ + # remove it first if it exists + self.clear_tag(key) + + process.call( + [ + 'lvchange', + '--addtag', '%s=%s' % (key, value), self.lv_path + ] + ) + self.tags[key] = value + + def deactivate(self): + """ + Deactivate the LV by calling lvchange -an + """ + process.call(['lvchange', '-an', self.lv_path]) + + +def create_lv(name_prefix, + uuid, + vg=None, + device=None, + slots=None, + extents=None, + size=None, + tags=None): + """ + Create a Logical Volume in a Volume Group. Command looks like:: + + lvcreate -L 50G -n gfslv vg0 + + ``name_prefix`` is required. If ``size`` is provided its expected to be a + byte count. Tags are an optional dictionary and is expected to + conform to the convention of prefixing them with "ceph." like:: + + {"ceph.block_device": "/dev/ceph/osd-1"} + + :param name_prefix: name prefix for the LV, typically somehting like ceph-osd-block + :param uuid: UUID to ensure uniqueness; is combined with name_prefix to + form the LV name + :param vg: optional, pass an existing VG to create LV + :param device: optional, device to use. Either device of vg must be passed + :param slots: optional, number of slots to divide vg up, LV will occupy one + one slot if enough space is available + :param extends: optional, how many lvm extends to use, supersedes slots + :param size: optional, target LV size in bytes, supersedes extents, + resulting LV might be smaller depending on extent + size of the underlying VG + :param tags: optional, a dict of lvm tags to set on the LV + """ + name = '{}-{}'.format(name_prefix, uuid) + if not vg: + if not device: + raise RuntimeError("Must either specify vg or device, none given") + # check if a vgs starting with ceph already exists + vgs = get_device_vgs(device, 'ceph') + if vgs: + vg = vgs[0] + else: + # create on if not + vg = create_vg(device, name_prefix='ceph') + assert(vg) + + if size: + extents = vg.bytes_to_extents(size) + logger.debug('size was passed: {} -> {}'.format(size, extents)) + elif slots and not extents: + extents = vg.slots_to_extents(slots) + logger.debug('slots was passed: {} -> {}'.format(slots, extents)) + + if extents: + command = [ + 'lvcreate', + '--yes', + '-l', + '{}'.format(extents), + '-n', name, vg.vg_name + ] + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + command = [ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', name, vg.vg_name + ] + process.run(command) + + lv = get_first_lv(filters={'lv_name': name, 'vg_name': vg.vg_name}) + + if tags is None: + tags = { + "ceph.osd_id": "null", + "ceph.type": "null", + "ceph.cluster_fsid": "null", + "ceph.osd_fsid": "null", + } + # when creating a distinct type, the caller doesn't know what the path will + # be so this function will set it after creation using the mapping + # XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations + type_path_tag = { + 'journal': 'ceph.journal_device', + 'data': 'ceph.data_device', + 'block': 'ceph.block_device', + 'wal': 'ceph.wal_device', + 'db': 'ceph.db_device', + 'lockbox': 'ceph.lockbox_device', # XXX might not ever need this lockbox sorcery + } + path_tag = type_path_tag.get(tags.get('ceph.type')) + if path_tag: + tags.update({path_tag: lv.lv_path}) + + lv.set_tags(tags) + + return lv + + +def create_lvs(volume_group, parts=None, size=None, name_prefix='ceph-lv'): + """ + Create multiple Logical Volumes from a Volume Group by calculating the + proper extents from ``parts`` or ``size``. A custom prefix can be used + (defaults to ``ceph-lv``), these names are always suffixed with a uuid. + + LV creation in ceph-volume will require tags, this is expected to be + pre-computed by callers who know Ceph metadata like OSD IDs and FSIDs. It + will probably not be the case when mass-creating LVs, so common/default + tags will be set to ``"null"``. + + .. note:: LVs that are not in use can be detected by querying LVM for tags that are + set to ``"null"``. + + :param volume_group: The volume group (vg) to use for LV creation + :type group: ``VolumeGroup()`` object + :param parts: Number of LVs to create *instead of* ``size``. + :type parts: int + :param size: Size (in gigabytes) of LVs to create, e.g. "as many 10gb LVs as possible" + :type size: int + :param extents: The number of LVM extents to use to create the LV. Useful if looking to have + accurate LV sizes (LVM rounds sizes otherwise) + """ + if parts is None and size is None: + # fallback to just one part (using 100% of the vg) + parts = 1 + lvs = [] + tags = { + "ceph.osd_id": "null", + "ceph.type": "null", + "ceph.cluster_fsid": "null", + "ceph.osd_fsid": "null", + } + sizing = volume_group.sizing(parts=parts, size=size) + for part in range(0, sizing['parts']): + size = sizing['sizes'] + extents = sizing['extents'] + lvs.append( + create_lv(name_prefix, uuid.uuid4(), vg=volume_group, extents=extents, tags=tags) + ) + return lvs + + +def remove_lv(lv): + """ + Removes a logical volume given it's absolute path. + + Will return True if the lv is successfully removed or + raises a RuntimeError if the removal fails. + + :param lv: A ``Volume`` object or the path for an LV + """ + if isinstance(lv, Volume): + path = lv.lv_path + else: + path = lv + + stdout, stderr, returncode = process.call( + [ + 'lvremove', + '-v', # verbose + '-f', # force it + path + ], + show_command=True, + terminal_verbose=True, + ) + if returncode != 0: + raise RuntimeError("Unable to remove %s" % path) + return True + + +def get_lvs(fields=LV_FIELDS, filters='', tags=None): + """ + Return a list of LVs that are available on the system and match the + filters and tags passed. Argument filters takes a dictionary containing + arguments required by -S option of LVM. Passing a list of LVM tags can be + quite tricky to pass as a dictionary within dictionary, therefore pass + dictionary of tags via tags argument and tricky part will be taken care of + by the helper methods. + + :param fields: string containing list of fields to be displayed by the + lvs command + :param sep: string containing separator to be used between two fields + :param filters: dictionary containing LVM filters + :param tags: dictionary containng LVM tags + :returns: list of class Volume object representing LVs on the system + """ + filters = make_filters_lvmcmd_ready(filters, tags) + args = ['lvs'] + LV_CMD_OPTIONS + ['-S', filters, '-o', fields] + + stdout, stderr, returncode = process.call(args, verbose_on_failure=False) + lvs_report = _output_parser(stdout, fields) + return [Volume(**lv_report) for lv_report in lvs_report] + + +def get_first_lv(fields=LV_FIELDS, filters=None, tags=None): + """ + Wrapper of get_lv meant to be a convenience method to avoid the phrase:: + lvs = get_lvs() + if len(lvs) >= 1: + lv = lvs[0] + """ + lvs = get_lvs(fields=fields, filters=filters, tags=tags) + return lvs[0] if len(lvs) > 0 else [] + + +def get_lv_by_name(name): + stdout, stderr, returncode = process.call( + ['lvs', '--noheadings', '-o', LV_FIELDS, '-S', + 'lv_name={}'.format(name)], + verbose_on_failure=False + ) + lvs = _output_parser(stdout, LV_FIELDS) + return [Volume(**lv) for lv in lvs] + + +def get_lvs_by_tag(lv_tag): + stdout, stderr, returncode = process.call( + ['lvs', '--noheadings', '--separator=";"', '-a', '-o', LV_FIELDS, '-S', + 'lv_tags={{{}}}'.format(lv_tag)], + verbose_on_failure=False + ) + lvs = _output_parser(stdout, LV_FIELDS) + return [Volume(**lv) for lv in lvs] + + +def get_device_lvs(device, name_prefix=''): + stdout, stderr, returncode = process.call( + ['pvs'] + LV_CMD_OPTIONS + ['-o', LV_FIELDS, device], + verbose_on_failure=False + ) + lvs = _output_parser(stdout, LV_FIELDS) + return [Volume(**lv) for lv in lvs if lv['lv_name'] and + lv['lv_name'].startswith(name_prefix)] diff --git a/src/ceph-volume/ceph_volume/configuration.py b/src/ceph-volume/ceph_volume/configuration.py new file mode 100644 index 00000000..2fee47ff --- /dev/null +++ b/src/ceph-volume/ceph_volume/configuration.py @@ -0,0 +1,231 @@ +import contextlib +import logging +import os +import re +from ceph_volume import terminal, conf +from ceph_volume import exceptions +from sys import version_info as sys_version_info + +if sys_version_info.major >= 3: + import configparser + conf_parentclass = configparser.ConfigParser +elif sys_version_info.major < 3: + import ConfigParser as configparser + conf_parentclass = configparser.SafeConfigParser +else: + raise RuntimeError('Not expecting python version > 3 yet.') + + +logger = logging.getLogger(__name__) + + +class _TrimIndentFile(object): + """ + This is used to take a file-like object and removes any + leading tabs from each line when it's read. This is important + because some ceph configuration files include tabs which break + ConfigParser. + """ + def __init__(self, fp): + self.fp = fp + + def readline(self): + line = self.fp.readline() + return line.lstrip(' \t') + + def __iter__(self): + return iter(self.readline, '') + + +def load_ceph_conf_path(cluster_name='ceph'): + abspath = '/etc/ceph/%s.conf' % cluster_name + conf.path = os.getenv('CEPH_CONF', abspath) + conf.cluster = cluster_name + + +def load(abspath=None): + if abspath is None: + abspath = conf.path + + if not os.path.exists(abspath): + raise exceptions.ConfigurationError(abspath=abspath) + + parser = Conf() + + try: + ceph_file = open(abspath) + trimmed_conf = _TrimIndentFile(ceph_file) + with contextlib.closing(ceph_file): + parser.read_conf(trimmed_conf) + conf.ceph = parser + return parser + except configparser.ParsingError as error: + logger.exception('Unable to parse INI-style file: %s' % abspath) + terminal.error(str(error)) + raise RuntimeError('Unable to read configuration file: %s' % abspath) + + +class Conf(conf_parentclass): + """ + Subclasses from ConfigParser to give a few helpers for Ceph + configuration. + """ + + def read_path(self, path): + self.path = path + return self.read(path) + + def is_valid(self): + try: + self.get('global', 'fsid') + except (configparser.NoSectionError, configparser.NoOptionError): + raise exceptions.ConfigurationKeyError('global', 'fsid') + + def optionxform(self, s): + s = s.replace('_', ' ') + s = '_'.join(s.split()) + return s + + def get_safe(self, section, key, default=None): + """ + Attempt to get a configuration value from a certain section + in a ``cfg`` object but returning None if not found. Avoids the need + to be doing try/except {ConfigParser Exceptions} every time. + """ + self.is_valid() + try: + return self.get(section, key) + except (configparser.NoSectionError, configparser.NoOptionError): + return default + + def get_list(self, section, key, default=None, split=','): + """ + Assumes that the value for a given key is going to be a list separated + by commas. It gets rid of trailing comments. If just one item is + present it returns a list with a single item, if no key is found an + empty list is returned. + + Optionally split on other characters besides ',' and return a fallback + value if no items are found. + """ + self.is_valid() + value = self.get_safe(section, key, []) + if value == []: + if default is not None: + return default + return value + + # strip comments + value = re.split(r'\s+#', value)[0] + + # split on commas + value = value.split(split) + + # strip spaces + return [x.strip() for x in value] + + # XXX Almost all of it lifted from the original ConfigParser._read method, + # except for the parsing of '#' in lines. This is only a problem in Python 2.7, and can be removed + # once tooling is Python3 only with `Conf(inline_comment_prefixes=('#',';'))` + def _read(self, fp, fpname): + """Parse a sectioned setup file. + + The sections in setup file contains a title line at the top, + indicated by a name in square brackets (`[]'), plus key/value + options lines, indicated by `name: value' format lines. + Continuations are represented by an embedded newline then + leading whitespace. Blank lines, lines beginning with a '#', + and just about everything else are ignored. + """ + cursect = None # None, or a dictionary + optname = None + lineno = 0 + e = None # None, or an exception + while True: + line = fp.readline() + if not line: + break + lineno = lineno + 1 + # comment or blank line? + if line.strip() == '' or line[0] in '#;': + continue + if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR": + # no leading whitespace + continue + # continuation line? + if line[0].isspace() and cursect is not None and optname: + value = line.strip() + if value: + cursect[optname].append(value) + # a section header or option header? + else: + # is it a section header? + mo = self.SECTCRE.match(line) + if mo: + sectname = mo.group('header') + if sectname in self._sections: + cursect = self._sections[sectname] + elif sectname == 'DEFAULT': + cursect = self._defaults + else: + cursect = self._dict() + cursect['__name__'] = sectname + self._sections[sectname] = cursect + # So sections can't start with a continuation line + optname = None + # no section header in the file? + elif cursect is None: + raise configparser.MissingSectionHeaderError(fpname, lineno, line) + # an option line? + else: + mo = self._optcre.match(line) + if mo: + optname, vi, optval = mo.group('option', 'vi', 'value') + optname = self.optionxform(optname.rstrip()) + # This check is fine because the OPTCRE cannot + # match if it would set optval to None + if optval is not None: + # XXX Added support for '#' inline comments + if vi in ('=', ':') and (';' in optval or '#' in optval): + # strip comments + optval = re.split(r'\s+(;|#)', optval)[0] + # if what is left is comment as a value, fallback to an empty string + # that is: `foo = ;` would mean `foo` is '', which brings parity with + # what ceph-conf tool does + if optval in [';','#']: + optval = '' + optval = optval.strip() + # allow empty values + if optval == '""': + optval = '' + cursect[optname] = [optval] + else: + # valueless option handling + cursect[optname] = optval + else: + # a non-fatal parsing error occurred. set up the + # exception but keep going. the exception will be + # raised at the end of the file and will contain a + # list of all bogus lines + if not e: + e = configparser.ParsingError(fpname) + e.append(lineno, repr(line)) + # if any parsing errors occurred, raise an exception + if e: + raise e + + # join the multi-line values collected while reading + all_sections = [self._defaults] + all_sections.extend(self._sections.values()) + for options in all_sections: + for name, val in options.items(): + if isinstance(val, list): + options[name] = '\n'.join(val) + + def read_conf(self, conffile): + if sys_version_info.major >= 3: + self.read_file(conffile) + elif sys_version_info.major < 3: + self.readfp(conffile) + else: + raise RuntimeError('Not expecting python version > 3 yet.') diff --git a/src/ceph-volume/ceph_volume/decorators.py b/src/ceph-volume/ceph_volume/decorators.py new file mode 100644 index 00000000..f6777281 --- /dev/null +++ b/src/ceph-volume/ceph_volume/decorators.py @@ -0,0 +1,90 @@ +import os +import sys +from ceph_volume import terminal, exceptions +from functools import wraps + + +def needs_root(func): + """ + Check for super user privileges on functions/methods. Raise + ``SuperUserError`` with a nice message. + """ + @wraps(func) + def is_root(*a, **kw): + if not os.getuid() == 0: + raise exceptions.SuperUserError() + return func(*a, **kw) + return is_root + + +def catches(catch=None, handler=None, exit=True): + """ + Very simple decorator that tries any of the exception(s) passed in as + a single exception class or tuple (containing multiple ones) returning the + exception message and optionally handling the problem if it rises with the + handler if it is provided. + + So instead of douing something like this:: + + def bar(): + try: + some_call() + print "Success!" + except TypeError, exc: + print "Error while handling some call: %s" % exc + sys.exit(1) + + You would need to decorate it like this to have the same effect:: + + @catches(TypeError) + def bar(): + some_call() + print "Success!" + + If multiple exceptions need to be caught they need to be provided as a + tuple:: + + @catches((TypeError, AttributeError)) + def bar(): + some_call() + print "Success!" + """ + catch = catch or Exception + + def decorate(f): + + @wraps(f) + def newfunc(*a, **kw): + try: + return f(*a, **kw) + except catch as e: + import logging + logger = logging.getLogger('ceph_volume') + logger.exception('exception caught by decorator') + if os.environ.get('CEPH_VOLUME_DEBUG'): + raise + if handler: + return handler(e) + else: + sys.stderr.write(make_exception_message(e)) + if exit: + sys.exit(1) + return newfunc + + return decorate + +# +# Decorator helpers +# + + +def make_exception_message(exc): + """ + An exception is passed in and this function + returns the proper string depending on the result + so it is readable enough. + """ + if str(exc): + return '%s %s: %s\n' % (terminal.red_arrow, exc.__class__.__name__, exc) + else: + return '%s %s\n' % (terminal.red_arrow, exc.__class__.__name__) diff --git a/src/ceph-volume/ceph_volume/devices/__init__.py b/src/ceph-volume/ceph_volume/devices/__init__.py new file mode 100644 index 00000000..2b017d67 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/__init__.py @@ -0,0 +1 @@ +from . import lvm, simple, raw # noqa diff --git a/src/ceph-volume/ceph_volume/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/devices/lvm/__init__.py new file mode 100644 index 00000000..3c147123 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/__init__.py @@ -0,0 +1 @@ +from .main import LVM # noqa diff --git a/src/ceph-volume/ceph_volume/devices/lvm/activate.py b/src/ceph-volume/ceph_volume/devices/lvm/activate.py new file mode 100644 index 00000000..e4ac074a --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/activate.py @@ -0,0 +1,370 @@ +from __future__ import print_function +import argparse +import logging +import os +from textwrap import dedent +from ceph_volume import process, conf, decorators, terminal, __release__, configuration +from ceph_volume.util import system, disk +from ceph_volume.util import prepare as prepare_utils +from ceph_volume.util import encryption as encryption_utils +from ceph_volume.systemd import systemctl +from ceph_volume.api import lvm as api +from .listing import direct_report + + +logger = logging.getLogger(__name__) + + +def activate_filestore(osd_lvs, no_systemd=False): + # find the osd + for osd_lv in osd_lvs: + if osd_lv.tags.get('ceph.type') == 'data': + data_lv = osd_lv + break + else: + raise RuntimeError('Unable to find a data LV for filestore activation') + + is_encrypted = data_lv.tags.get('ceph.encrypted', '0') == '1' + is_vdo = data_lv.tags.get('ceph.vdo', '0') + + osd_id = data_lv.tags['ceph.osd_id'] + configuration.load_ceph_conf_path(data_lv.tags['ceph.cluster_name']) + configuration.load() + # it may have a volume with a journal + for osd_lv in osd_lvs: + if osd_lv.tags.get('ceph.type') == 'journal': + osd_journal_lv = osd_lv + break + else: + osd_journal_lv = None + + # TODO: add sensible error reporting if this is ever the case + # blow up with a KeyError if this doesn't exist + osd_fsid = data_lv.tags['ceph.osd_fsid'] + if not osd_journal_lv: + # must be a disk partition, by querying blkid by the uuid we are ensuring that the + # device path is always correct + journal_uuid = data_lv.tags['ceph.journal_uuid'] + osd_journal = disk.get_device_from_partuuid(journal_uuid) + else: + journal_uuid = osd_journal_lv.lv_uuid + osd_journal = data_lv.tags['ceph.journal_device'] + + if not osd_journal: + raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id) + + # this is done here, so that previous checks that ensure path availability + # and correctness can still be enforced, and report if any issues are found + if is_encrypted: + lockbox_secret = data_lv.tags['ceph.cephx_lockbox_secret'] + # this keyring writing is idempotent + encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) + dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) + encryption_utils.luks_open(dmcrypt_secret, data_lv.lv_path, data_lv.lv_uuid) + encryption_utils.luks_open(dmcrypt_secret, osd_journal, journal_uuid) + + osd_journal = '/dev/mapper/%s' % journal_uuid + source = '/dev/mapper/%s' % data_lv.lv_uuid + else: + source = data_lv.lv_path + + # mount the osd + destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) + if not system.device_is_mounted(source, destination=destination): + prepare_utils.mount_osd(source, osd_id, is_vdo=is_vdo) + + # ensure that the OSD destination is always chowned properly + system.chown(destination) + + # always re-do the symlink regardless if it exists, so that the journal + # device path that may have changed can be mapped correctly every time + destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id) + process.run(['ln', '-snf', osd_journal, destination]) + + # make sure that the journal has proper permissions + system.chown(osd_journal) + + if no_systemd is False: + # enable the ceph-volume unit for this OSD + systemctl.enable_volume(osd_id, osd_fsid, 'lvm') + + # enable the OSD + systemctl.enable_osd(osd_id) + + # start the OSD + systemctl.start_osd(osd_id) + terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id) + + +def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None): + """ + ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we can + query LVs on system and fallback to querying the uuid if that is not + present. + + Return a path if possible, failing to do that a ``None``, since some of + these devices are optional. + """ + osd_block_lv = None + for lv in osd_lvs: + if lv.tags.get('ceph.type') == 'block': + osd_block_lv = lv + break + if osd_block_lv: + is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1' + logger.debug('Found block device (%s) with encryption: %s', osd_block_lv.name, is_encrypted) + uuid_tag = 'ceph.%s_uuid' % device_type + device_uuid = osd_block_lv.tags.get(uuid_tag) + if not device_uuid: + return None + + device_lv = None + for lv in osd_lvs: + if lv.tags.get('ceph.type') == device_type: + device_lv = lv + break + if device_lv: + if is_encrypted: + encryption_utils.luks_open(dmcrypt_secret, device_lv.lv_path, device_uuid) + return '/dev/mapper/%s' % device_uuid + return device_lv.lv_path + + # this could be a regular device, so query it with blkid + physical_device = disk.get_device_from_partuuid(device_uuid) + if physical_device: + if is_encrypted: + encryption_utils.luks_open(dmcrypt_secret, physical_device, device_uuid) + return '/dev/mapper/%s' % device_uuid + return physical_device + + raise RuntimeError('could not find %s with uuid %s' % (device_type, device_uuid)) + + +def activate_bluestore(osd_lvs, no_systemd=False): + for lv in osd_lvs: + if lv.tags.get('ceph.type') == 'block': + osd_block_lv = lv + break + else: + raise RuntimeError('could not find a bluestore OSD to activate') + + is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1' + dmcrypt_secret = None + osd_id = osd_block_lv.tags['ceph.osd_id'] + conf.cluster = osd_block_lv.tags['ceph.cluster_name'] + osd_fsid = osd_block_lv.tags['ceph.osd_fsid'] + + # mount on tmpfs the osd directory + osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) + if not system.path_is_mounted(osd_path): + # mkdir -p and mount as tmpfs + prepare_utils.create_osd_path(osd_id, tmpfs=True) + # XXX This needs to be removed once ceph-bluestore-tool can deal with + # symlinks that exist in the osd dir + for link_name in ['block', 'block.db', 'block.wal']: + link_path = os.path.join(osd_path, link_name) + if os.path.exists(link_path): + os.unlink(os.path.join(osd_path, link_name)) + # encryption is handled here, before priming the OSD dir + if is_encrypted: + osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid + lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret'] + encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) + dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) + encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid) + else: + osd_lv_path = osd_block_lv.lv_path + + db_device_path = get_osd_device_path(osd_lvs, 'db', dmcrypt_secret=dmcrypt_secret) + wal_device_path = get_osd_device_path(osd_lvs, 'wal', dmcrypt_secret=dmcrypt_secret) + + # Once symlinks are removed, the osd dir can be 'primed again. chown first, + # regardless of what currently exists so that ``prime-osd-dir`` can succeed + # even if permissions are somehow messed up + system.chown(osd_path) + prime_command = [ + 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster, + 'prime-osd-dir', '--dev', osd_lv_path, + '--path', osd_path] + + if __release__ != "luminous": + # mon-config changes are not available in Luminous + prime_command.append('--no-mon-config') + + process.run(prime_command) + # always re-do the symlink regardless if it exists, so that the block, + # block.wal, and block.db devices that may have changed can be mapped + # correctly every time + process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')]) + system.chown(os.path.join(osd_path, 'block')) + system.chown(osd_path) + if db_device_path: + destination = os.path.join(osd_path, 'block.db') + process.run(['ln', '-snf', db_device_path, destination]) + system.chown(db_device_path) + system.chown(destination) + if wal_device_path: + destination = os.path.join(osd_path, 'block.wal') + process.run(['ln', '-snf', wal_device_path, destination]) + system.chown(wal_device_path) + system.chown(destination) + + if no_systemd is False: + # enable the ceph-volume unit for this OSD + systemctl.enable_volume(osd_id, osd_fsid, 'lvm') + + # enable the OSD + systemctl.enable_osd(osd_id) + + # start the OSD + systemctl.start_osd(osd_id) + terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id) + + +class Activate(object): + + help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD' + + def __init__(self, argv): + self.argv = argv + + @decorators.needs_root + def activate_all(self, args): + listed_osds = direct_report() + osds = {} + for osd_id, devices in listed_osds.items(): + # the metadata for all devices in each OSD will contain + # the FSID which is required for activation + for device in devices: + fsid = device.get('tags', {}).get('ceph.osd_fsid') + if fsid: + osds[fsid] = osd_id + break + if not osds: + terminal.warning('Was unable to find any OSDs to activate') + terminal.warning('Verify OSDs are present with "ceph-volume lvm list"') + return + for osd_fsid, osd_id in osds.items(): + if systemctl.osd_is_active(osd_id): + terminal.warning( + 'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id, osd_fsid) + ) + else: + terminal.info('Activating OSD ID %s FSID %s' % (osd_id, osd_fsid)) + self.activate(args, osd_id=osd_id, osd_fsid=osd_fsid) + + @decorators.needs_root + def activate(self, args, osd_id=None, osd_fsid=None): + """ + :param args: The parsed arguments coming from the CLI + :param osd_id: When activating all, this gets populated with an + existing OSD ID + :param osd_fsid: When activating all, this gets populated with an + existing OSD FSID + """ + osd_id = osd_id if osd_id else args.osd_id + osd_fsid = osd_fsid if osd_fsid else args.osd_fsid + + if osd_id and osd_fsid: + tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid} + elif not osd_id and osd_fsid: + tags = {'ceph.osd_fsid': osd_fsid} + lvs = api.get_lvs(tags=tags) + if not lvs: + raise RuntimeError('could not find osd.%s with osd_fsid %s' % + (osd_id, osd_fsid)) + + # This argument is only available when passed in directly or via + # systemd, not when ``create`` is being used + if getattr(args, 'auto_detect_objectstore', False): + logger.info('auto detecting objectstore') + # may get multiple lvs, so can't do get_the_lvs() calls here + for lv in lvs: + has_journal = lv.tags.get('ceph.journal_uuid') + if has_journal: + logger.info('found a journal associated with the OSD, ' + 'assuming filestore') + return activate_filestore(lvs, args.no_systemd) + + logger.info('unable to find a journal associated with the OSD, ' + 'assuming bluestore') + + return activate_bluestore(lvs, args.no_systemd) + if args.bluestore: + activate_bluestore(lvs, args.no_systemd) + elif args.filestore: + activate_filestore(lvs, args.no_systemd) + + def main(self): + sub_command_help = dedent(""" + Activate OSDs by discovering them with LVM and mounting them in their + appropriate destination: + + ceph-volume lvm activate {ID} {FSID} + + The lvs associated with the OSD need to have been prepared previously, + so that all needed tags and metadata exist. + + When migrating OSDs, or a multiple-osd activation is needed, the + ``--all`` flag can be used instead of the individual ID and FSID: + + ceph-volume lvm activate --all + + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm activate', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'osd_id', + metavar='ID', + nargs='?', + help='The ID of the OSD, usually an integer, like 0' + ) + parser.add_argument( + 'osd_fsid', + metavar='FSID', + nargs='?', + help='The FSID of the OSD, similar to a SHA1' + ) + parser.add_argument( + '--auto-detect-objectstore', + action='store_true', + help='Autodetect the objectstore by inspecting the OSD', + ) + parser.add_argument( + '--bluestore', + action='store_true', + help='bluestore objectstore (default)', + ) + parser.add_argument( + '--filestore', + action='store_true', + help='filestore objectstore', + ) + parser.add_argument( + '--all', + dest='activate_all', + action='store_true', + help='Activate all OSDs found in the system', + ) + parser.add_argument( + '--no-systemd', + dest='no_systemd', + action='store_true', + help='Skip creating and enabling systemd units and starting OSD services', + ) + if len(self.argv) == 0: + print(sub_command_help) + return + args = parser.parse_args(self.argv) + # Default to bluestore here since defaulting it in add_argument may + # cause both to be True + if not args.bluestore and not args.filestore: + args.bluestore = True + if args.activate_all: + self.activate_all(args) + else: + self.activate(args) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/src/ceph-volume/ceph_volume/devices/lvm/batch.py new file mode 100644 index 00000000..40c0fea4 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -0,0 +1,628 @@ +import argparse +from collections import namedtuple +import json +import logging +from textwrap import dedent +from ceph_volume import terminal, decorators +from ceph_volume.util import disk, prompt_bool, arg_validators, templates +from ceph_volume.util import prepare +from . import common +from .create import Create +from .prepare import Prepare + +mlogger = terminal.MultiLogger(__name__) +logger = logging.getLogger(__name__) + + +device_list_template = """ + * {path: <25} {size: <10} {state}""" + + +def device_formatter(devices): + lines = [] + for path, details in devices: + lines.append(device_list_template.format( + path=path, size=details['human_readable_size'], + state='solid' if details['rotational'] == '0' else 'rotational') + ) + + return ''.join(lines) + + +def ensure_disjoint_device_lists(data, db=[], wal=[], journal=[]): + # check that all device lists are disjoint with each other + if not all([set(data).isdisjoint(set(db)), + set(data).isdisjoint(set(wal)), + set(data).isdisjoint(set(journal)), + set(db).isdisjoint(set(wal))]): + raise Exception('Device lists are not disjoint') + + +def separate_devices_from_lvs(devices): + phys = [] + lvm = [] + for d in devices: + phys.append(d) if d.is_device else lvm.append(d) + return phys, lvm + + +def get_physical_osds(devices, args): + ''' + Goes through passed physical devices and assigns OSDs + ''' + data_slots = args.osds_per_device + if args.data_slots: + data_slots = max(args.data_slots, args.osds_per_device) + rel_data_size = 1.0 / data_slots + mlogger.debug('relative data size: {}'.format(rel_data_size)) + ret = [] + for dev in devices: + if dev.available_lvm: + dev_size = dev.vg_size[0] + abs_size = disk.Size(b=int(dev_size * rel_data_size)) + free_size = dev.vg_free[0] + for _ in range(args.osds_per_device): + if abs_size > free_size: + break + free_size -= abs_size.b + osd_id = None + if args.osd_ids: + osd_id = args.osd_ids.pop() + ret.append(Batch.OSD(dev.path, + rel_data_size, + abs_size, + args.osds_per_device, + osd_id, + 'dmcrypt' if args.dmcrypt else None)) + return ret + + +def get_lvm_osds(lvs, args): + ''' + Goes through passed LVs and assigns planned osds + ''' + ret = [] + for lv in lvs: + if lv.used_by_ceph: + continue + osd_id = None + if args.osd_ids: + osd_id = args.osd_ids.pop() + osd = Batch.OSD("{}/{}".format(lv.vg_name, lv.lv_name), + 100.0, + disk.Size(b=int(lv.lvs[0].lv_size)), + 1, + osd_id, + 'dmcrypt' if args.dmcrypt else None) + ret.append(osd) + return ret + + +def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, args): + requested_slots = getattr(args, '{}_slots'.format(type_)) + if not requested_slots or requested_slots < fast_slots_per_device: + if requested_slots: + mlogger.info('{}_slots argument is too small, ignoring'.format(type_)) + requested_slots = fast_slots_per_device + + requested_size = getattr(args, '{}_size'.format(type_), 0) + if requested_size == 0: + # no size argument was specified, check ceph.conf + get_size_fct = getattr(prepare, 'get_{}_size'.format(type_)) + requested_size = get_size_fct(lv_format=False) + + ret = [] + for dev in devices: + if not dev.available_lvm: + continue + # any LV present is considered a taken slot + occupied_slots = len(dev.lvs) + # this only looks at the first vg on device, unsure if there is a better + # way + dev_size = dev.vg_size[0] + abs_size = disk.Size(b=int(dev_size / requested_slots)) + free_size = dev.vg_free[0] + relative_size = int(abs_size) / dev_size + if requested_size: + if requested_size <= abs_size: + abs_size = requested_size + else: + mlogger.error( + '{} was requested for {}, but only {} can be fulfilled'.format( + requested_size, + '{}_size'.format(type_), + abs_size, + )) + exit(1) + while abs_size <= free_size and len(ret) < new_osds and occupied_slots < fast_slots_per_device: + free_size -= abs_size.b + occupied_slots += 1 + ret.append((dev.path, relative_size, abs_size, requested_slots)) + return ret + + +def get_lvm_fast_allocs(lvs): + return [("{}/{}".format(d.vg_name, d.lv_name), 100.0, + disk.Size(b=int(d.lvs[0].lv_size)), 1) for d in lvs if not + d.used_by_ceph] + + +class Batch(object): + + help = 'Automatically size devices for multi-OSD provisioning with minimal interaction' + + _help = dedent(""" + Automatically size devices ready for OSD provisioning based on default strategies. + + Usage: + + ceph-volume lvm batch [DEVICE...] + + Devices can be physical block devices or LVs. + Optional reporting on possible outcomes is enabled with --report + + ceph-volume lvm batch --report [DEVICE...] + """) + + def __init__(self, argv): + parser = argparse.ArgumentParser( + prog='ceph-volume lvm batch', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self._help, + ) + + parser.add_argument( + 'devices', + metavar='DEVICES', + nargs='*', + type=arg_validators.ValidBatchDevice(), + default=[], + help='Devices to provision OSDs', + ) + parser.add_argument( + '--db-devices', + nargs='*', + type=arg_validators.ValidBatchDevice(), + default=[], + help='Devices to provision OSDs db volumes', + ) + parser.add_argument( + '--wal-devices', + nargs='*', + type=arg_validators.ValidBatchDevice(), + default=[], + help='Devices to provision OSDs wal volumes', + ) + parser.add_argument( + '--journal-devices', + nargs='*', + type=arg_validators.ValidBatchDevice(), + default=[], + help='Devices to provision OSDs journal volumes', + ) + parser.add_argument( + '--auto', + action='store_true', + help=('deploy multi-device OSDs if rotational and non-rotational drives ' + 'are passed in DEVICES'), + default=True + ) + parser.add_argument( + '--no-auto', + action='store_false', + dest='auto', + help=('deploy standalone OSDs if rotational and non-rotational drives ' + 'are passed in DEVICES'), + ) + parser.add_argument( + '--bluestore', + action='store_true', + help='bluestore objectstore (default)', + ) + parser.add_argument( + '--filestore', + action='store_true', + help='filestore objectstore', + ) + parser.add_argument( + '--report', + action='store_true', + help='Only report on OSD that would be created and exit', + ) + parser.add_argument( + '--yes', + action='store_true', + help='Avoid prompting for confirmation when provisioning', + ) + parser.add_argument( + '--format', + help='output format, defaults to "pretty"', + default='pretty', + choices=['json', 'json-pretty', 'pretty'], + ) + parser.add_argument( + '--dmcrypt', + action='store_true', + help='Enable device encryption via dm-crypt', + ) + parser.add_argument( + '--crush-device-class', + dest='crush_device_class', + help='Crush device class to assign this OSD to', + ) + parser.add_argument( + '--no-systemd', + dest='no_systemd', + action='store_true', + help='Skip creating and enabling systemd units and starting OSD services', + ) + parser.add_argument( + '--osds-per-device', + type=int, + default=1, + help='Provision more than 1 (the default) OSD per device', + ) + parser.add_argument( + '--data-slots', + type=int, + help=('Provision more than 1 (the default) OSD slot per device' + ' if more slots then osds-per-device are specified, slots' + 'will stay unoccupied'), + ) + parser.add_argument( + '--block-db-size', + type=disk.Size.parse, + help='Set (or override) the "bluestore_block_db_size" value, in bytes' + ) + parser.add_argument( + '--block-db-slots', + type=int, + help='Provision slots on DB device, can remain unoccupied' + ) + parser.add_argument( + '--block-wal-size', + type=disk.Size.parse, + help='Set (or override) the "bluestore_block_wal_size" value, in bytes' + ) + parser.add_argument( + '--block-wal-slots', + type=int, + help='Provision slots on WAL device, can remain unoccupied' + ) + def journal_size_in_mb_hack(size): + # TODO give user time to adjust, then remove this + if size and size[-1].isdigit(): + mlogger.warning('DEPRECATION NOTICE') + mlogger.warning('--journal-size as integer is parsed as megabytes') + mlogger.warning('A future release will parse integers as bytes') + mlogger.warning('Add a "M" to explicitly pass a megabyte size') + size += 'M' + return disk.Size.parse(size) + parser.add_argument( + '--journal-size', + type=journal_size_in_mb_hack, + help='Override the "osd_journal_size" value, in megabytes' + ) + parser.add_argument( + '--journal-slots', + type=int, + help='Provision slots on journal device, can remain unoccupied' + ) + parser.add_argument( + '--prepare', + action='store_true', + help='Only prepare all OSDs, do not activate', + ) + parser.add_argument( + '--osd-ids', + nargs='*', + default=[], + help='Reuse existing OSD ids', + ) + self.args = parser.parse_args(argv) + self.parser = parser + for dev_list in ['', 'db_', 'wal_', 'journal_']: + setattr(self, '{}usable'.format(dev_list), []) + + def report(self, plan): + report = self._create_report(plan) + print(report) + + def _create_report(self, plan): + if self.args.format == 'pretty': + report = '' + report += templates.total_osds.format(total_osds=len(plan)) + + report += templates.osd_component_titles + for osd in plan: + report += templates.osd_header + report += osd.report() + return report + else: + json_report = [] + for osd in plan: + json_report.append(osd.report_json()) + if self.args.format == 'json': + return json.dumps(json_report) + elif self.args.format == 'json-pretty': + return json.dumps(json_report, indent=4, + sort_keys=True) + + def _check_slot_args(self): + ''' + checking if -slots args are consistent with other arguments + ''' + if self.args.data_slots and self.args.osds_per_device: + if self.args.data_slots < self.args.osds_per_device: + raise ValueError('data_slots is smaller then osds_per_device') + + def _sort_rotational_disks(self): + ''' + Helper for legacy auto behaviour. + Sorts drives into rotating and non-rotating, the latter being used for + db or journal. + ''' + mlogger.warning('DEPRECATION NOTICE') + mlogger.warning('You are using the legacy automatic disk sorting behavior') + mlogger.warning('The Pacific release will change the default to --no-auto') + rotating = [] + ssd = [] + for d in self.args.devices: + rotating.append(d) if d.rotational else ssd.append(d) + if ssd and not rotating: + # no need for additional sorting, we'll only deploy standalone on ssds + return + self.args.devices = rotating + if self.args.filestore: + self.args.journal_devices = ssd + else: + self.args.db_devices = ssd + + @decorators.needs_root + def main(self): + if not self.args.devices: + return self.parser.print_help() + + # Default to bluestore here since defaulting it in add_argument may + # cause both to be True + if not self.args.bluestore and not self.args.filestore: + self.args.bluestore = True + + if (self.args.auto and not self.args.db_devices and not + self.args.wal_devices and not self.args.journal_devices): + self._sort_rotational_disks() + + self._check_slot_args() + + ensure_disjoint_device_lists(self.args.devices, + self.args.db_devices, + self.args.wal_devices, + self.args.journal_devices) + + plan = self.get_plan(self.args) + + if self.args.report: + self.report(plan) + return 0 + + if not self.args.yes: + self.report(plan) + terminal.info('The above OSDs would be created if the operation continues') + if not prompt_bool('do you want to proceed? (yes/no)'): + terminal.error('aborting OSD provisioning') + raise SystemExit(0) + + self._execute(plan) + + def _execute(self, plan): + defaults = common.get_default_args() + global_args = [ + 'bluestore', + 'filestore', + 'dmcrypt', + 'crush_device_class', + 'no_systemd', + ] + defaults.update({arg: getattr(self.args, arg) for arg in global_args}) + for osd in plan: + args = osd.get_args(defaults) + if self.args.prepare: + p = Prepare([]) + p.safe_prepare(argparse.Namespace(**args)) + else: + c = Create([]) + c.create(argparse.Namespace(**args)) + + + def get_plan(self, args): + if args.bluestore: + plan = self.get_deployment_layout(args, args.devices, args.db_devices, + args.wal_devices) + elif args.filestore: + plan = self.get_deployment_layout(args, args.devices, args.journal_devices) + return plan + + def get_deployment_layout(self, args, devices, fast_devices=[], + very_fast_devices=[]): + ''' + The methods here are mostly just organization, error reporting and + setting up of (default) args. The heavy lifting code for the deployment + layout can be found in the static get_*_osds and get_*_fast_allocs + functions. + ''' + plan = [] + phys_devs, lvm_devs = separate_devices_from_lvs(devices) + mlogger.debug(('passed data devices: {} physical,' + ' {} LVM').format(len(phys_devs), len(lvm_devs))) + + plan.extend(get_physical_osds(phys_devs, args)) + + plan.extend(get_lvm_osds(lvm_devs, args)) + + num_osds = len(plan) + if num_osds == 0: + mlogger.info('All data devices are unavailable') + return plan + requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs) + + fast_type = 'block_db' if args.bluestore else 'journal' + fast_allocations = self.fast_allocations(fast_devices, + requested_osds, + num_osds, + fast_type) + if fast_devices and not fast_allocations: + mlogger.info('{} fast devices were passed, but none are available'.format(len(fast_devices))) + return [] + if fast_devices and not len(fast_allocations) == num_osds: + mlogger.error('{} fast allocations != {} num_osds'.format( + len(fast_allocations), num_osds)) + exit(1) + + very_fast_allocations = self.fast_allocations(very_fast_devices, + requested_osds, + num_osds, + 'block_wal') + if very_fast_devices and not very_fast_allocations: + mlogger.info('{} very fast devices were passed, but none are available'.format(len(very_fast_devices))) + return [] + if very_fast_devices and not len(very_fast_allocations) == num_osds: + mlogger.error('{} very fast allocations != {} num_osds'.format( + len(very_fast_allocations), num_osds)) + exit(1) + + for osd in plan: + if fast_devices: + osd.add_fast_device(*fast_allocations.pop(), + type_=fast_type) + if very_fast_devices and args.bluestore: + osd.add_very_fast_device(*very_fast_allocations.pop()) + return plan + + def fast_allocations(self, devices, requested_osds, new_osds, type_): + ret = [] + if not devices: + return ret + phys_devs, lvm_devs = separate_devices_from_lvs(devices) + mlogger.debug(('passed {} devices: {} physical,' + ' {} LVM').format(type_, len(phys_devs), len(lvm_devs))) + + ret.extend(get_lvm_fast_allocs(lvm_devs)) + + # fill up uneven distributions across fast devices: 5 osds and 2 fast + # devices? create 3 slots on each device rather then deploying + # heterogeneous osds + if (requested_osds - len(lvm_devs)) % len(phys_devs): + fast_slots_per_device = int((requested_osds - len(lvm_devs)) / len(phys_devs)) + 1 + else: + fast_slots_per_device = int((requested_osds - len(lvm_devs)) / len(phys_devs)) + + + ret.extend(get_physical_fast_allocs(phys_devs, + type_, + fast_slots_per_device, + new_osds, + self.args)) + return ret + + class OSD(object): + ''' + This class simply stores info about to-be-deployed OSDs and provides an + easy way to retrieve the necessary create arguments. + ''' + VolSpec = namedtuple('VolSpec', + ['path', + 'rel_size', + 'abs_size', + 'slots', + 'type_']) + + def __init__(self, + data_path, + rel_size, + abs_size, + slots, + id_, + encryption): + self.id_ = id_ + self.data = self.VolSpec(path=data_path, + rel_size=rel_size, + abs_size=abs_size, + slots=slots, + type_='data') + self.fast = None + self.very_fast = None + self.encryption = encryption + + def add_fast_device(self, path, rel_size, abs_size, slots, type_): + self.fast = self.VolSpec(path=path, + rel_size=rel_size, + abs_size=abs_size, + slots=slots, + type_=type_) + + def add_very_fast_device(self, path, rel_size, abs_size, slots): + self.very_fast = self.VolSpec(path=path, + rel_size=rel_size, + abs_size=abs_size, + slots=slots, + type_='block_wal') + + def _get_osd_plan(self): + plan = { + 'data': self.data.path, + 'data_size': self.data.abs_size, + 'encryption': self.encryption, + } + if self.fast: + type_ = self.fast.type_.replace('.', '_') + plan.update( + { + type_: self.fast.path, + '{}_size'.format(type_): self.fast.abs_size, + }) + if self.very_fast: + plan.update( + { + 'block_wal': self.very_fast.path, + 'block_wal_size': self.very_fast.abs_size, + }) + if self.id_: + plan.update({'osd_id': self.id_}) + return plan + + def get_args(self, defaults): + my_defaults = defaults.copy() + my_defaults.update(self._get_osd_plan()) + return my_defaults + + def report(self): + report = '' + if self.id_: + report += templates.osd_reused_id.format( + id_=self.id_) + if self.encryption: + report += templates.osd_encryption.format( + enc=self.encryption) + report += templates.osd_component.format( + _type=self.data.type_, + path=self.data.path, + size=self.data.abs_size, + percent=self.data.rel_size) + if self.fast: + report += templates.osd_component.format( + _type=self.fast.type_, + path=self.fast.path, + size=self.fast.abs_size, + percent=self.fast.rel_size) + if self.very_fast: + report += templates.osd_component.format( + _type=self.very_fast.type_, + path=self.very_fast.path, + size=self.very_fast.abs_size, + percent=self.very_fast.rel_size) + return report + + def report_json(self): + # cast all values to string so that the report can be dumped in to + # json.dumps + return {k: str(v) for k, v in self._get_osd_plan().items()} diff --git a/src/ceph-volume/ceph_volume/devices/lvm/common.py b/src/ceph-volume/ceph_volume/devices/lvm/common.py new file mode 100644 index 00000000..06369e47 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/common.py @@ -0,0 +1,184 @@ +from ceph_volume.util import arg_validators, disk +from ceph_volume import process, conf +from ceph_volume import terminal +import argparse + + +def rollback_osd(args, osd_id=None): + """ + When the process of creating or preparing fails, the OSD needs to be + destroyed so that the ID cane be reused. This is prevents leaving the ID + around as "used" on the monitor, which can cause confusion if expecting + sequential OSD IDs. + + The usage of `destroy-new` allows this to be done without requiring the + admin keyring (otherwise needed for destroy and purge commands) + """ + if not osd_id: + # it means that it wasn't generated, so there is nothing to rollback here + return + + # once here, this is an error condition that needs to be rolled back + terminal.error('Was unable to complete a new OSD, will rollback changes') + osd_name = 'osd.%s' + bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster + cmd = [ + 'ceph', + '--cluster', conf.cluster, + '--name', 'client.bootstrap-osd', + '--keyring', bootstrap_keyring, + 'osd', 'purge-new', osd_name % osd_id, + '--yes-i-really-mean-it', + ] + + process.run(cmd) + + +common_args = { + '--data': { + 'help': 'OSD data path. A physical device or logical volume', + 'required': True, + 'type': arg_validators.ValidDevice(as_string=True), + #'default':, + #'type':, + }, + '--data-size': { + 'help': 'Size of data LV in case a device was passed in --data', + 'default': '0', + 'type': disk.Size.parse + }, + '--data-slots': { + 'help': ('Intended number of slots on data device. The new OSD gets one' + 'of those slots or 1/nth of the available capacity'), + 'type': int, + 'default': 1, + }, + '--osd-id': { + 'help': 'Reuse an existing OSD id', + 'default': None, + }, + '--osd-fsid': { + 'help': 'Reuse an existing OSD fsid', + 'default': None, + }, + '--cluster-fsid': { + 'help': 'Specify the cluster fsid, useful when no ceph.conf is available', + 'default': None, + }, + '--crush-device-class': { + 'dest': 'crush_device_class', + 'help': 'Crush device class to assign this OSD to', + 'default': None, + }, + '--dmcrypt': { + 'action': 'store_true', + 'help': 'Enable device encryption via dm-crypt', + }, + '--no-systemd': { + 'dest': 'no_systemd', + 'action': 'store_true', + 'help': 'Skip creating and enabling systemd units and starting OSD services when activating', + }, +} + +bluestore_args = { + '--bluestore': { + 'action': 'store_true', + 'help': 'Use the bluestore objectstore', + }, + '--block.db': { + 'dest': 'block_db', + 'help': 'Path to bluestore block.db logical volume or device', + }, + '--block.db-size': { + 'dest': 'block_db_size', + 'help': 'Size of block.db LV in case device was passed in --block.db', + 'default': '0', + 'type': disk.Size.parse + }, + '--block.db-slots': { + 'dest': 'block_db_slots', + 'help': ('Intended number of slots on db device. The new OSD gets one' + 'of those slots or 1/nth of the available capacity'), + 'type': int, + 'default': 1, + }, + '--block.wal': { + 'dest': 'block_wal', + 'help': 'Path to bluestore block.wal logical volume or device', + }, + '--block.wal-size': { + 'dest': 'block_wal_size', + 'help': 'Size of block.wal LV in case device was passed in --block.wal', + 'default': '0', + 'type': disk.Size.parse + }, + '--block.wal-slots': { + 'dest': 'block_wal_slots', + 'help': ('Intended number of slots on wal device. The new OSD gets one' + 'of those slots or 1/nth of the available capacity'), + 'type': int, + 'default': 1, + }, +} + +filestore_args = { + '--filestore': { + 'action': 'store_true', + 'help': 'Use the filestore objectstore', + }, + '--journal': { + 'help': 'A logical volume (vg_name/lv_name), or path to a device', + }, + '--journal-size': { + 'help': 'Size of journal LV in case a raw block device was passed in --journal', + 'default': '0', + 'type': disk.Size.parse + }, + '--journal-slots': { + 'help': ('Intended number of slots on journal device. The new OSD gets one' + 'of those slots or 1/nth of the available capacity'), + 'type': int, + 'default': 1, + }, +} + +def get_default_args(): + defaults = {} + def format_name(name): + return name.strip('-').replace('-', '_').replace('.', '_') + for argset in (common_args, filestore_args, bluestore_args): + defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()}) + return defaults + + +def common_parser(prog, description): + """ + Both prepare and create share the same parser, those are defined here to + avoid duplication + """ + parser = argparse.ArgumentParser( + prog=prog, + formatter_class=argparse.RawDescriptionHelpFormatter, + description=description, + ) + + filestore_group = parser.add_argument_group('filestore') + bluestore_group = parser.add_argument_group('bluestore') + + for name, kwargs in common_args.items(): + parser.add_argument(name, **kwargs) + + for name, kwargs in bluestore_args.items(): + bluestore_group.add_argument(name, **kwargs) + + for name, kwargs in filestore_args.items(): + filestore_group.add_argument(name, **kwargs) + + # Do not parse args, so that consumers can do something before the args get + # parsed triggering argparse behavior + return parser + + +create_parser = common_parser # noqa +prepare_parser = common_parser # noqa diff --git a/src/ceph-volume/ceph_volume/devices/lvm/create.py b/src/ceph-volume/ceph_volume/devices/lvm/create.py new file mode 100644 index 00000000..af2cd96c --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/create.py @@ -0,0 +1,77 @@ +from __future__ import print_function +from textwrap import dedent +import logging +from ceph_volume.util import system +from ceph_volume.util.arg_validators import exclude_group_options +from ceph_volume import decorators, terminal +from .common import create_parser, rollback_osd +from .prepare import Prepare +from .activate import Activate + +logger = logging.getLogger(__name__) + + +class Create(object): + + help = 'Create a new OSD from an LVM device' + + def __init__(self, argv): + self.argv = argv + + @decorators.needs_root + def create(self, args): + if not args.osd_fsid: + args.osd_fsid = system.generate_uuid() + prepare_step = Prepare([]) + prepare_step.safe_prepare(args) + osd_id = prepare_step.osd_id + try: + # we try this for activate only when 'creating' an OSD, because a rollback should not + # happen when doing normal activation. For example when starting an OSD, systemd will call + # activate, which would never need to be rolled back. + Activate([]).activate(args) + except Exception: + logger.exception('lvm activate was unable to complete, while creating the OSD') + logger.info('will rollback OSD ID creation') + rollback_osd(args, osd_id) + raise + terminal.success("ceph-volume lvm create successful for: %s" % args.data) + + def main(self): + sub_command_help = dedent(""" + Create an OSD by assigning an ID and FSID, registering them with the + cluster with an ID and FSID, formatting and mounting the volume, adding + all the metadata to the logical volumes using LVM tags, and starting + the OSD daemon. This is a convenience command that combines the prepare + and activate steps. + + Encryption is supported via dmcrypt and the --dmcrypt flag. + + Existing logical volume (lv): + + ceph-volume lvm create --data {vg/lv} + + Existing block device (a logical volume will be created): + + ceph-volume lvm create --data /path/to/device + + Optionally, can consume db and wal block devices, partitions or logical + volumes. A device will get a logical volume, partitions and existing + logical volumes will be used as is: + + ceph-volume lvm create --data {vg/lv} --block.wal {partition} --block.db {/path/to/device} + """) + parser = create_parser( + prog='ceph-volume lvm create', + description=sub_command_help, + ) + if len(self.argv) == 0: + print(sub_command_help) + return + exclude_group_options(parser, groups=['filestore', 'bluestore'], argv=self.argv) + args = parser.parse_args(self.argv) + # Default to bluestore here since defaulting it in add_argument may + # cause both to be True + if not args.bluestore and not args.filestore: + args.bluestore = True + self.create(args) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py b/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py new file mode 100644 index 00000000..5de6dbe3 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/deactivate.py @@ -0,0 +1,90 @@ +import argparse +import logging +import sys +from textwrap import dedent +from ceph_volume import conf +from ceph_volume.util import encryption, system +from ceph_volume.api.lvm import get_lvs_by_tag + +logger = logging.getLogger(__name__) + + +def deactivate_osd(osd_id=None, osd_uuid=None): + + lvs = [] + if osd_uuid is not None: + lvs = get_lvs_by_tag('ceph.osd_fsid={}'.format(osd_uuid)) + osd_id = next(lv.tags['ceph.osd_id'] for lv in lvs) + else: + lvs = get_lvs_by_tag('ceph.osd_id={}'.format(osd_id)) + + data_lv = next(lv for lv in lvs if lv.tags['ceph.type'] in ['data', 'block']) + + conf.cluster = data_lv.tags['ceph.cluster_name'] + logger.debug('Found cluster name {}'.format(conf.cluster)) + + tmpfs_path = '/var/lib/ceph/osd/{}-{}'.format(conf.cluster, osd_id) + system.unmount_tmpfs(tmpfs_path) + + for lv in lvs: + if lv.tags.get('ceph.encrypted', '0') == '1': + encryption.dmcrypt_close(lv.lv_uuid) + + +class Deactivate(object): + + help = 'Deactivate OSDs' + + def deactivate(self, args=None): + if args: + self.args = args + try: + deactivate_osd(self.args.osd_id, self.args.osd_uuid) + except StopIteration: + logger.error(('No data or block LV found for OSD' + '{}').format(self.args.osd_id)) + sys.exit(1) + + def __init__(self, argv): + self.argv = argv + + def main(self): + sub_command_help = dedent(""" + Deactivate unmounts and OSDs tmpfs and closes any crypt devices. + + ceph-volume lvm deactivate {ID} {FSID} + + To deactivate all volumes use the --all flag. + ceph-volume lvm deactivate --all + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm deactivate', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'osd_id', + nargs='?', + help='The ID of the OSD' + ) + parser.add_argument( + 'osd_uuid', + nargs='?', + help='The UUID of the OSD, similar to a SHA1, takes precedence over osd_id' + ) + # parser.add_argument( + # '--all', + # action='store_true', + # help='Deactivate all OSD volumes found in the system', + # ) + if len(self.argv) == 0: + print(sub_command_help) + return + args = parser.parse_args(self.argv) + # Default to bluestore here since defaulting it in add_argument may + # cause both to be True + if not args.osd_id and not args.osd_uuid: + raise ValueError(('Can not identify OSD, pass either all or' + 'osd_id or osd_uuid')) + self.deactivate(args) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/listing.py b/src/ceph-volume/ceph_volume/devices/lvm/listing.py new file mode 100644 index 00000000..1ae8489f --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/listing.py @@ -0,0 +1,223 @@ +from __future__ import print_function +import argparse +import json +import logging +import os.path +from textwrap import dedent +from ceph_volume import decorators +from ceph_volume.api import lvm as api + +logger = logging.getLogger(__name__) + + +osd_list_header_template = """\n +{osd_id:=^20}""" + + +osd_device_header_template = """ + + {type: <13} {path} +""" + +device_metadata_item_template = """ + {tag_name: <25} {value}""" + + +def readable_tag(tag): + actual_name = tag.split('.')[-1] + return actual_name.replace('_', ' ') + + +def pretty_report(report): + output = [] + for osd_id, devices in sorted(report.items()): + output.append( + osd_list_header_template.format(osd_id=" osd.%s " % osd_id) + ) + for device in devices: + output.append( + osd_device_header_template.format( + type='[%s]' % device['type'], + path=device['path'] + ) + ) + for tag_name, value in sorted(device.get('tags', {}).items()): + output.append( + device_metadata_item_template.format( + tag_name=readable_tag(tag_name), + value=value + ) + ) + if not device.get('devices'): + continue + else: + output.append( + device_metadata_item_template.format( + tag_name='devices', + value=','.join(device['devices']) + ) + ) + + print(''.join(output)) + + +def direct_report(): + """ + Other non-cli consumers of listing information will want to consume the + report without the need to parse arguments or other flags. This helper + bypasses the need to deal with the class interface which is meant for cli + handling. + """ + return List([]).full_report() + + +# TODO: Perhaps, get rid of this class and simplify this module further? +class List(object): + + help = 'list logical volumes and devices associated with Ceph' + + def __init__(self, argv): + self.argv = argv + + @decorators.needs_root + def list(self, args): + report = self.single_report(args.device) if args.device else \ + self.full_report() + if args.format == 'json': + # If the report is empty, we don't return a non-zero exit status + # because it is assumed this is going to be consumed by automated + # systems like ceph-ansible which would be forced to ignore the + # non-zero exit status if all they need is the information in the + # JSON object + print(json.dumps(report, indent=4, sort_keys=True)) + else: + if not report: + raise SystemExit('No valid Ceph lvm devices found') + pretty_report(report) + + def create_report(self, lvs): + """ + Create a report for LVM dev(s) passed. Returns '{}' to denote failure. + """ + + report = {} + + for lv in lvs: + if not api.is_ceph_device(lv): + continue + + osd_id = lv.tags['ceph.osd_id'] + report.setdefault(osd_id, []) + lv_report = lv.as_dict() + + pvs = api.get_pvs(filters={'lv_uuid': lv.lv_uuid}) + lv_report['devices'] = [pv.name for pv in pvs] if pvs else [] + report[osd_id].append(lv_report) + + phys_devs = self.create_report_non_lv_device(lv) + if phys_devs: + report[osd_id].append(phys_devs) + + return report + + def create_report_non_lv_device(self, lv): + report = {} + if lv.tags.get('ceph.type', '') in ['data', 'block']: + for dev_type in ['journal', 'wal', 'db']: + dev = lv.tags.get('ceph.{}_device'.format(dev_type), '') + # counting / in the device name seems brittle but should work, + # lvs will have 3 + if dev and dev.count('/') == 2: + device_uuid = lv.tags.get('ceph.{}_uuid'.format(dev_type)) + report = {'tags': {'PARTUUID': device_uuid}, + 'type': dev_type, + 'path': dev} + return report + + def full_report(self): + """ + Create a report of all Ceph LVs. Returns '{}' to denote failure. + """ + return self.create_report(api.get_lvs()) + + def single_report(self, device): + """ + Generate a report for a single device. This can be either a logical + volume in the form of vg/lv or a device with an absolute path like + /dev/sda1 or /dev/sda. Returns '{}' to denote failure. + """ + lvs = [] + if os.path.isabs(device): + # we have a block device + lvs = api.get_device_lvs(device) + if not lvs: + # maybe this was a LV path /dev/vg_name/lv_name or /dev/mapper/ + lvs = api.get_lvs(filters={'path': device}) + else: + # vg_name/lv_name was passed + vg_name, lv_name = device.split('/') + lvs = api.get_lvs(filters={'lv_name': lv_name, + 'vg_name': vg_name}) + + report = self.create_report(lvs) + + if not report: + # check if device is a non-lvm journals or wal/db + for dev_type in ['journal', 'wal', 'db']: + lvs = api.get_lvs(tags={ + 'ceph.{}_device'.format(dev_type): device}) + if lvs: + # just taking the first lv here should work + lv = lvs[0] + phys_dev = self.create_report_non_lv_device(lv) + osd_id = lv.tags.get('ceph.osd_id') + if osd_id: + report[osd_id] = [phys_dev] + + + return report + + def main(self): + sub_command_help = dedent(""" + List devices or logical volumes associated with Ceph. An association is + determined if a device has information relating to an OSD. This is + verified by querying LVM's metadata and correlating it with devices. + + The lvs associated with the OSD need to have been prepared previously, + so that all needed tags and metadata exist. + + Full listing of all system devices associated with a cluster:: + + ceph-volume lvm list + + List a particular device, reporting all metadata about it:: + + ceph-volume lvm list /dev/sda1 + + List a logical volume, along with all its metadata (vg is a volume + group, and lv the logical volume name):: + + ceph-volume lvm list {vg/lv} + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm list', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'device', + metavar='DEVICE', + nargs='?', + help='Path to an lv (as vg/lv) or to a device like /dev/sda1' + ) + + parser.add_argument( + '--format', + help='output format, defaults to "pretty"', + default='pretty', + choices=['json', 'pretty'], + ) + + args = parser.parse_args(self.argv) + self.list(args) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/main.py b/src/ceph-volume/ceph_volume/devices/lvm/main.py new file mode 100644 index 00000000..3ef3c111 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/main.py @@ -0,0 +1,50 @@ +import argparse +from textwrap import dedent +from ceph_volume import terminal +from . import activate +from . import deactivate +from . import prepare +from . import create +from . import trigger +from . import listing +from . import zap +from . import batch + + +class LVM(object): + + help = 'Use LVM and LVM-based technologies to deploy OSDs' + + _help = dedent(""" + Use LVM and LVM-based technologies to deploy OSDs + + {sub_help} + """) + + mapper = { + 'activate': activate.Activate, + 'deactivate': deactivate.Deactivate, + 'batch': batch.Batch, + 'prepare': prepare.Prepare, + 'create': create.Create, + 'trigger': trigger.Trigger, + 'list': listing.List, + 'zap': zap.Zap, + } + + def __init__(self, argv): + self.argv = argv + + def print_help(self, sub_help): + return self._help.format(sub_help=sub_help) + + def main(self): + terminal.dispatch(self.mapper, self.argv) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.print_help(terminal.subhelp(self.mapper)), + ) + parser.parse_args(self.argv) + if len(self.argv) <= 1: + return parser.print_help() diff --git a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py new file mode 100644 index 00000000..f0c3959a --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py @@ -0,0 +1,441 @@ +from __future__ import print_function +import json +import logging +from textwrap import dedent +from ceph_volume.util import prepare as prepare_utils +from ceph_volume.util import encryption as encryption_utils +from ceph_volume.util import system, disk +from ceph_volume.util.arg_validators import exclude_group_options +from ceph_volume import conf, decorators, terminal +from ceph_volume.api import lvm as api +from .common import prepare_parser, rollback_osd + + +logger = logging.getLogger(__name__) + + +def prepare_dmcrypt(key, device, device_type, tags): + """ + Helper for devices that are encrypted. The operations needed for + block, db, wal, or data/journal devices are all the same + """ + if not device: + return '' + tag_name = 'ceph.%s_uuid' % device_type + uuid = tags[tag_name] + # format data device + encryption_utils.luks_format( + key, + device + ) + encryption_utils.luks_open( + key, + device, + uuid + ) + + return '/dev/mapper/%s' % uuid + + +def prepare_filestore(device, journal, secrets, tags, osd_id, fsid): + """ + :param device: The name of the logical volume to work with + :param journal: similar to device but can also be a regular/plain disk + :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) + :param id_: The OSD id + :param fsid: The OSD fsid, also known as the OSD UUID + """ + cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) + + # encryption-only operations + if secrets.get('dmcrypt_key'): + # format and open ('decrypt' devices) and re-assign the device and journal + # variables so that the rest of the process can use the mapper paths + key = secrets['dmcrypt_key'] + device = prepare_dmcrypt(key, device, 'data', tags) + journal = prepare_dmcrypt(key, journal, 'journal', tags) + + # vdo detection + is_vdo = api.is_vdo(device) + # create the directory + prepare_utils.create_osd_path(osd_id) + # format the device + prepare_utils.format_device(device) + # mount the data device + prepare_utils.mount_osd(device, osd_id, is_vdo=is_vdo) + # symlink the journal + prepare_utils.link_journal(journal, osd_id) + # get the latest monmap + prepare_utils.get_monmap(osd_id) + # prepare the osd filesystem + prepare_utils.osd_mkfs_filestore(osd_id, fsid, cephx_secret) + # write the OSD keyring if it doesn't exist already + prepare_utils.write_keyring(osd_id, cephx_secret) + if secrets.get('dmcrypt_key'): + # if the device is going to get activated right away, this can be done + # here, otherwise it will be recreated + encryption_utils.write_lockbox_keyring( + osd_id, + fsid, + tags['ceph.cephx_lockbox_secret'] + ) + + +def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid): + """ + :param block: The name of the logical volume for the bluestore data + :param wal: a regular/plain disk or logical volume, to be used for block.wal + :param db: a regular/plain disk or logical volume, to be used for block.db + :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) + :param id_: The OSD id + :param fsid: The OSD fsid, also known as the OSD UUID + """ + cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) + # encryption-only operations + if secrets.get('dmcrypt_key'): + # If encrypted, there is no need to create the lockbox keyring file because + # bluestore re-creates the files and does not have support for other files + # like the custom lockbox one. This will need to be done on activation. + # format and open ('decrypt' devices) and re-assign the device and journal + # variables so that the rest of the process can use the mapper paths + key = secrets['dmcrypt_key'] + block = prepare_dmcrypt(key, block, 'block', tags) + wal = prepare_dmcrypt(key, wal, 'wal', tags) + db = prepare_dmcrypt(key, db, 'db', tags) + + # create the directory + prepare_utils.create_osd_path(osd_id, tmpfs=True) + # symlink the block + prepare_utils.link_block(block, osd_id) + # get the latest monmap + prepare_utils.get_monmap(osd_id) + # write the OSD keyring if it doesn't exist already + prepare_utils.write_keyring(osd_id, cephx_secret) + # prepare the osd filesystem + prepare_utils.osd_mkfs_bluestore( + osd_id, fsid, + keyring=cephx_secret, + wal=wal, + db=db + ) + + +class Prepare(object): + + help = 'Format an LVM device and associate it with an OSD' + + def __init__(self, argv): + self.argv = argv + self.osd_id = None + + def get_ptuuid(self, argument): + uuid = disk.get_partuuid(argument) + if not uuid: + terminal.error('blkid could not detect a PARTUUID for device: %s' % argument) + raise RuntimeError('unable to use device') + return uuid + + def setup_device(self, device_type, device_name, tags, size, slots): + """ + Check if ``device`` is an lv, if so, set the tags, making sure to + update the tags with the lv_uuid and lv_path which the incoming tags + will not have. + + If the device is not a logical volume, then retrieve the partition UUID + by querying ``blkid`` + """ + if device_name is None: + return '', '', tags + tags['ceph.type'] = device_type + tags['ceph.vdo'] = api.is_vdo(device_name) + + try: + vg_name, lv_name = device_name.split('/') + lv = api.get_first_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) + except ValueError: + lv = None + + if lv: + lv_uuid = lv.lv_uuid + path = lv.lv_path + tags['ceph.%s_uuid' % device_type] = lv_uuid + tags['ceph.%s_device' % device_type] = path + lv.set_tags(tags) + elif disk.is_device(device_name): + # We got a disk, create an lv + lv_type = "osd-{}".format(device_type) + name_uuid = system.generate_uuid() + kwargs = { + 'device': device_name, + 'tags': tags, + 'slots': slots + } + #TODO use get_block_db_size and co here to get configured size in + #conf file + if size != 0: + kwargs['size'] = size + lv = api.create_lv( + lv_type, + name_uuid, + **kwargs) + path = lv.lv_path + tags['ceph.{}_device'.format(device_type)] = path + tags['ceph.{}_uuid'.format(device_type)] = lv.lv_uuid + lv_uuid = lv.lv_uuid + lv.set_tags(tags) + else: + # otherwise assume this is a regular disk partition + name_uuid = self.get_ptuuid(device_name) + path = device_name + tags['ceph.%s_uuid' % device_type] = name_uuid + tags['ceph.%s_device' % device_type] = path + lv_uuid = name_uuid + return path, lv_uuid, tags + + def prepare_data_device(self, device_type, osd_uuid): + """ + Check if ``arg`` is a device or partition to create an LV out of it + with a distinct volume group name, assigning LV tags on it and + ultimately, returning the logical volume object. Failing to detect + a device or partition will result in error. + + :param arg: The value of ``--data`` when parsing args + :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore) + :param osd_uuid: The OSD uuid + """ + device = self.args.data + if disk.is_partition(device) or disk.is_device(device): + # we must create a vg, and then a single lv + lv_name_prefix = "osd-{}".format(device_type) + kwargs = {'device': device, + 'tags': {'ceph.type': device_type}, + 'slots': self.args.data_slots, + } + logger.debug('data device size: {}'.format(self.args.data_size)) + if self.args.data_size != 0: + kwargs['size'] = self.args.data_size + return api.create_lv( + lv_name_prefix, + osd_uuid, + **kwargs) + else: + error = [ + 'Cannot use device ({}).'.format(device), + 'A vg/lv path or an existing device is needed'] + raise RuntimeError(' '.join(error)) + + raise RuntimeError('no data logical volume found with: {}'.format(device)) + + def safe_prepare(self, args=None): + """ + An intermediate step between `main()` and `prepare()` so that we can + capture the `self.osd_id` in case we need to rollback + + :param args: Injected args, usually from `lvm create` which compounds + both `prepare` and `create` + """ + if args is not None: + self.args = args + + try: + vgname, lvname = self.args.data.split('/') + lv = api.get_first_lv(filters={'lv_name': lvname, + 'vg_name': vgname}) + except ValueError: + lv = None + + if api.is_ceph_device(lv): + logger.info("device {} is already used".format(self.args.data)) + raise RuntimeError("skipping {}, it is already prepared".format(self.args.data)) + try: + self.prepare() + except Exception: + logger.exception('lvm prepare was unable to complete') + logger.info('will rollback OSD ID creation') + rollback_osd(self.args, self.osd_id) + raise + terminal.success("ceph-volume lvm prepare successful for: %s" % self.args.data) + + def get_cluster_fsid(self): + """ + Allows using --cluster-fsid as an argument, but can fallback to reading + from ceph.conf if that is unset (the default behavior). + """ + if self.args.cluster_fsid: + return self.args.cluster_fsid + else: + return conf.ceph.get('global', 'fsid') + + @decorators.needs_root + def prepare(self): + # FIXME we don't allow re-using a keyring, we always generate one for the + # OSD, this needs to be fixed. This could either be a file (!) or a string + # (!!) or some flags that we would need to compound into a dict so that we + # can convert to JSON (!!!) + secrets = {'cephx_secret': prepare_utils.create_key()} + cephx_lockbox_secret = '' + encrypted = 1 if self.args.dmcrypt else 0 + cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key() + + if encrypted: + secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key() + secrets['cephx_lockbox_secret'] = cephx_lockbox_secret + + cluster_fsid = self.get_cluster_fsid() + + osd_fsid = self.args.osd_fsid or system.generate_uuid() + crush_device_class = self.args.crush_device_class + if crush_device_class: + secrets['crush_device_class'] = crush_device_class + # reuse a given ID if it exists, otherwise create a new ID + self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id) + tags = { + 'ceph.osd_fsid': osd_fsid, + 'ceph.osd_id': self.osd_id, + 'ceph.cluster_fsid': cluster_fsid, + 'ceph.cluster_name': conf.cluster, + 'ceph.crush_device_class': crush_device_class, + 'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity() + } + if self.args.filestore: + if not self.args.journal: + logger.info(('no journal was specifed, creating journal lv ' + 'on {}').format(self.args.data)) + self.args.journal = self.args.data + self.args.journal_size = disk.Size(g=5) + # need to adjust data size/slots for colocated journal + if self.args.data_size: + self.args.data_size -= self.args.journal_size + if self.args.data_slots == 1: + self.args.data_slots = 0 + else: + raise RuntimeError('Can\'t handle multiple filestore OSDs ' + 'with colocated journals yet. Please ' + 'create journal LVs manually') + tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret + tags['ceph.encrypted'] = encrypted + + journal_device, journal_uuid, tags = self.setup_device( + 'journal', + self.args.journal, + tags, + self.args.journal_size, + self.args.journal_slots) + + try: + vg_name, lv_name = self.args.data.split('/') + data_lv = api.get_first_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) + except ValueError: + data_lv = None + + if not data_lv: + data_lv = self.prepare_data_device('data', osd_fsid) + + tags['ceph.data_device'] = data_lv.lv_path + tags['ceph.data_uuid'] = data_lv.lv_uuid + tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path) + tags['ceph.type'] = 'data' + data_lv.set_tags(tags) + if not journal_device.startswith('/'): + # we got a journal lv, set rest of the tags + api.get_first_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}).set_tags(tags) + + prepare_filestore( + data_lv.lv_path, + journal_device, + secrets, + tags, + self.osd_id, + osd_fsid, + ) + elif self.args.bluestore: + try: + vg_name, lv_name = self.args.data.split('/') + block_lv = api.get_first_lv(filters={'lv_name': lv_name, + 'vg_name': vg_name}) + except ValueError: + block_lv = None + + if not block_lv: + block_lv = self.prepare_data_device('block', osd_fsid) + + tags['ceph.block_device'] = block_lv.lv_path + tags['ceph.block_uuid'] = block_lv.lv_uuid + tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret + tags['ceph.encrypted'] = encrypted + tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path) + + wal_device, wal_uuid, tags = self.setup_device( + 'wal', + self.args.block_wal, + tags, + self.args.block_wal_size, + self.args.block_wal_slots) + db_device, db_uuid, tags = self.setup_device( + 'db', + self.args.block_db, + tags, + self.args.block_db_size, + self.args.block_db_slots) + + tags['ceph.type'] = 'block' + block_lv.set_tags(tags) + + prepare_bluestore( + block_lv.lv_path, + wal_device, + db_device, + secrets, + tags, + self.osd_id, + osd_fsid, + ) + + def main(self): + sub_command_help = dedent(""" + Prepare an OSD by assigning an ID and FSID, registering them with the + cluster with an ID and FSID, formatting and mounting the volume, and + finally by adding all the metadata to the logical volumes using LVM + tags, so that it can later be discovered. + + Once the OSD is ready, an ad-hoc systemd unit will be enabled so that + it can later get activated and the OSD daemon can get started. + + Encryption is supported via dmcrypt and the --dmcrypt flag. + + Existing logical volume (lv): + + ceph-volume lvm prepare --data {vg/lv} + + Existing block device (a logical volume will be created): + + ceph-volume lvm prepare --data /path/to/device + + Optionally, can consume db and wal devices, partitions or logical + volumes. A device will get a logical volume, partitions and existing + logical volumes will be used as is: + + ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {/path/to/device} + """) + parser = prepare_parser( + prog='ceph-volume lvm prepare', + description=sub_command_help, + ) + if len(self.argv) == 0: + print(sub_command_help) + return + exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore']) + self.args = parser.parse_args(self.argv) + # the unfortunate mix of one superset for both filestore and bluestore + # makes this validation cumbersome + if self.args.filestore: + if not self.args.journal: + raise SystemExit('--journal is required when using --filestore') + # Default to bluestore here since defaulting it in add_argument may + # cause both to be True + if not self.args.bluestore and not self.args.filestore: + self.args.bluestore = True + self.safe_prepare() diff --git a/src/ceph-volume/ceph_volume/devices/lvm/trigger.py b/src/ceph-volume/ceph_volume/devices/lvm/trigger.py new file mode 100644 index 00000000..dc57011d --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/trigger.py @@ -0,0 +1,70 @@ +from __future__ import print_function +import argparse +from textwrap import dedent +from ceph_volume.exceptions import SuffixParsingError +from ceph_volume import decorators +from .activate import Activate + + +def parse_osd_id(string): + osd_id = string.split('-', 1)[0] + if not osd_id: + raise SuffixParsingError('OSD id', string) + if osd_id.isdigit(): + return osd_id + raise SuffixParsingError('OSD id', string) + + +def parse_osd_uuid(string): + osd_id = '%s-' % parse_osd_id(string) + # remove the id first + osd_uuid = string.split(osd_id, 1)[-1] + if not osd_uuid: + raise SuffixParsingError('OSD uuid', string) + return osd_uuid + + +class Trigger(object): + + help = 'systemd helper to activate an OSD' + + def __init__(self, argv): + self.argv = argv + + @decorators.needs_root + def main(self): + sub_command_help = dedent(""" + ** DO NOT USE DIRECTLY ** + This tool is meant to help the systemd unit that knows about OSDs. + + Proxy OSD activation to ``ceph-volume lvm activate`` by parsing the + input from systemd, detecting the UUID and ID associated with an OSD:: + + ceph-volume lvm trigger {SYSTEMD-DATA} + + The systemd "data" is expected to be in the format of:: + + {OSD ID}-{OSD UUID} + + The lvs associated with the OSD need to have been prepared previously, + so that all needed tags and metadata exist. + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm trigger', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'systemd_data', + metavar='SYSTEMD_DATA', + nargs='?', + help='Data from a systemd unit containing ID and UUID of the OSD, like asdf-lkjh-0' + ) + if len(self.argv) == 0: + print(sub_command_help) + return + args = parser.parse_args(self.argv) + osd_id = parse_osd_id(args.systemd_data) + osd_uuid = parse_osd_uuid(args.systemd_data) + Activate(['--auto-detect-objectstore', osd_id, osd_uuid]).main() diff --git a/src/ceph-volume/ceph_volume/devices/lvm/zap.py b/src/ceph-volume/ceph_volume/devices/lvm/zap.py new file mode 100644 index 00000000..21b54b6c --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/lvm/zap.py @@ -0,0 +1,403 @@ +import argparse +import os +import logging +import time + +from textwrap import dedent + +from ceph_volume import decorators, terminal, process +from ceph_volume.api import lvm as api +from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int, merge_dict +from ceph_volume.util.device import Device +from ceph_volume.systemd import systemctl + +logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) + + +def wipefs(path): + """ + Removes the filesystem from an lv or partition. + + Environment variables supported:: + + * ``CEPH_VOLUME_WIPEFS_TRIES``: Defaults to 8 + * ``CEPH_VOLUME_WIPEFS_INTERVAL``: Defaults to 5 + + """ + tries = str_to_int( + os.environ.get('CEPH_VOLUME_WIPEFS_TRIES', 8) + ) + interval = str_to_int( + os.environ.get('CEPH_VOLUME_WIPEFS_INTERVAL', 5) + ) + + for trying in range(tries): + stdout, stderr, exit_code = process.call([ + 'wipefs', + '--all', + path + ]) + if exit_code != 0: + # this could narrow the retry by poking in the stderr of the output + # to verify that 'probing initialization failed' appears, but + # better to be broad in this retry to prevent missing on + # a different message that needs to be retried as well + terminal.warning( + 'failed to wipefs device, will try again to workaround probable race condition' + ) + time.sleep(interval) + else: + return + raise RuntimeError("could not complete wipefs on device: %s" % path) + + +def zap_data(path): + """ + Clears all data from the given path. Path should be + an absolute path to an lv or partition. + + 10M of data is written to the path to make sure that + there is no trace left of any previous Filesystem. + """ + process.run([ + 'dd', + 'if=/dev/zero', + 'of={path}'.format(path=path), + 'bs=1M', + 'count=10', + 'conv=fsync' + ]) + + +def find_associated_devices(osd_id=None, osd_fsid=None): + """ + From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the + system that match those tag values, further detect if any partitions are + part of the OSD, and then return the set of LVs and partitions (if any). + """ + lv_tags = {} + if osd_id: + lv_tags['ceph.osd_id'] = osd_id + if osd_fsid: + lv_tags['ceph.osd_fsid'] = osd_fsid + + lvs = api.get_lvs(tags=lv_tags) + if not lvs: + raise RuntimeError('Unable to find any LV for zapping OSD: ' + '%s' % osd_id or osd_fsid) + + devices_to_zap = ensure_associated_lvs(lvs, lv_tags) + return [Device(path) for path in set(devices_to_zap) if path] + + +def ensure_associated_lvs(lvs, lv_tags={}): + """ + Go through each LV and ensure if backing devices (journal, wal, block) + are LVs or partitions, so that they can be accurately reported. + """ + # look for many LVs for each backing type, because it is possible to + # receive a filtering for osd.1, and have multiple failed deployments + # leaving many journals with osd.1 - usually, only a single LV will be + # returned + + journal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'journal'})) + db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'})) + wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'})) + backing_devices = [(journal_lvs, 'journal'), (db_lvs, 'db'), + (wal_lvs, 'wal')] + + verified_devices = [] + + for lv in lvs: + # go through each lv and append it, otherwise query `blkid` to find + # a physical device. Do this for each type (journal,db,wal) regardless + # if they have been processed in the previous LV, so that bad devices + # with the same ID can be caught + for ceph_lvs, _type in backing_devices: + if ceph_lvs: + verified_devices.extend([l.lv_path for l in ceph_lvs]) + continue + + # must be a disk partition, by querying blkid by the uuid we are + # ensuring that the device path is always correct + try: + device_uuid = lv.tags['ceph.%s_uuid' % _type] + except KeyError: + # Bluestore will not have ceph.journal_uuid, and Filestore + # will not not have ceph.db_uuid + continue + + osd_device = disk.get_device_from_partuuid(device_uuid) + if not osd_device: + # if the osd_device is not found by the partuuid, then it is + # not possible to ensure this device exists anymore, so skip it + continue + verified_devices.append(osd_device) + + verified_devices.append(lv.lv_path) + + # reduce the list from all the duplicates that were added + return list(set(verified_devices)) + + +class Zap(object): + + help = 'Removes all data and filesystems from a logical volume or partition.' + + def __init__(self, argv): + self.argv = argv + + def unmount_lv(self, lv): + if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'): + lv_path = "/var/lib/ceph/osd/{}-{}".format(lv.tags['ceph.cluster_name'], lv.tags['ceph.osd_id']) + else: + lv_path = lv.lv_path + dmcrypt_uuid = lv.lv_uuid + dmcrypt = lv.encrypted + if system.path_is_mounted(lv_path): + mlogger.info("Unmounting %s", lv_path) + system.unmount(lv_path) + if dmcrypt and dmcrypt_uuid: + self.dmcrypt_close(dmcrypt_uuid) + + def zap_lv(self, device): + """ + Device examples: vg-name/lv-name, /dev/vg-name/lv-name + Requirements: Must be a logical volume (LV) + """ + lv = api.get_first_lv(filters={'lv_name': device.lv_name, 'vg_name': + device.vg_name}) + self.unmount_lv(lv) + + wipefs(device.abspath) + zap_data(device.abspath) + + if self.args.destroy: + lvs = api.get_lvs(filters={'vg_name': device.vg_name}) + if lvs == []: + mlogger.info('No LVs left, exiting', device.vg_name) + return + elif len(lvs) <= 1: + mlogger.info('Only 1 LV left in VG, will proceed to destroy ' + 'volume group %s', device.vg_name) + api.remove_vg(device.vg_name) + else: + mlogger.info('More than 1 LV left in VG, will proceed to ' + 'destroy LV only') + mlogger.info('Removing LV because --destroy was given: %s', + device.abspath) + api.remove_lv(device.abspath) + elif lv: + # just remove all lvm metadata, leaving the LV around + lv.clear_tags() + + def zap_partition(self, device): + """ + Device example: /dev/sda1 + Requirements: Must be a partition + """ + if device.is_encrypted: + # find the holder + holders = [ + '/dev/%s' % holder for holder in device.sys_api.get('holders', []) + ] + for mapper_uuid in os.listdir('/dev/mapper'): + mapper_path = os.path.join('/dev/mapper', mapper_uuid) + if os.path.realpath(mapper_path) in holders: + self.dmcrypt_close(mapper_uuid) + + if system.device_is_mounted(device.abspath): + mlogger.info("Unmounting %s", device.abspath) + system.unmount(device.abspath) + + wipefs(device.abspath) + zap_data(device.abspath) + + if self.args.destroy: + mlogger.info("Destroying partition since --destroy was used: %s" % device.abspath) + disk.remove_partition(device) + + def zap_lvm_member(self, device): + """ + An LVM member may have more than one LV and or VG, for example if it is + a raw device with multiple partitions each belonging to a different LV + + Device example: /dev/sda + Requirements: An LV or VG present in the device, making it an LVM member + """ + for lv in device.lvs: + if lv.lv_name: + mlogger.info('Zapping lvm member {}. lv_path is {}'.format(device.abspath, lv.lv_path)) + self.zap_lv(Device(lv.lv_path)) + else: + vg = api.get_first_vg(filters={'vg_name': lv.vg_name}) + if vg: + mlogger.info('Found empty VG {}, removing'.format(vg.vg_name)) + api.remove_vg(vg.vg_name) + + + + def zap_raw_device(self, device): + """ + Any whole (raw) device passed in as input will be processed here, + checking for LVM membership and partitions (if any). + + Device example: /dev/sda + Requirements: None + """ + if not self.args.destroy: + # the use of dd on a raw device causes the partition table to be + # destroyed + mlogger.warning( + '--destroy was not specified, but zapping a whole device will remove the partition table' + ) + + # look for partitions and zap those + for part_name in device.sys_api.get('partitions', {}).keys(): + self.zap_partition(Device('/dev/%s' % part_name)) + + wipefs(device.abspath) + zap_data(device.abspath) + + @decorators.needs_root + def zap(self, devices=None): + devices = devices or self.args.devices + + for device in devices: + mlogger.info("Zapping: %s", device.abspath) + if device.is_mapper: + terminal.error("Refusing to zap the mapper device: {}".format(device)) + raise SystemExit(1) + if device.is_lvm_member: + self.zap_lvm_member(device) + if device.is_lv: + self.zap_lv(device) + if device.is_partition: + self.zap_partition(device) + if device.is_device: + self.zap_raw_device(device) + + if self.args.devices: + terminal.success( + "Zapping successful for: %s" % ", ".join([str(d) for d in self.args.devices]) + ) + else: + identifier = self.args.osd_id or self.args.osd_fsid + terminal.success( + "Zapping successful for OSD: %s" % identifier + ) + + @decorators.needs_root + def zap_osd(self): + if self.args.osd_id and not self.args.no_systemd: + osd_is_running = systemctl.osd_is_active(self.args.osd_id) + if osd_is_running: + mlogger.error("OSD ID %s is running, stop it with:" % self.args.osd_id) + mlogger.error("systemctl stop ceph-osd@%s" % self.args.osd_id) + raise SystemExit("Unable to zap devices associated with OSD ID: %s" % self.args.osd_id) + devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid) + self.zap(devices) + + def dmcrypt_close(self, dmcrypt_uuid): + dmcrypt_path = "/dev/mapper/{}".format(dmcrypt_uuid) + mlogger.info("Closing encrypted path %s", dmcrypt_path) + encryption.dmcrypt_close(dmcrypt_path) + + def main(self): + sub_command_help = dedent(""" + Zaps the given logical volume(s), raw device(s) or partition(s) for reuse by ceph-volume. + If given a path to a logical volume it must be in the format of vg/lv. Any + filesystems present on the given device, vg/lv, or partition will be removed and + all data will be purged. + + If the logical volume, raw device or partition is being used for any ceph related + mount points they will be unmounted. + + However, the lv or partition will be kept intact. + + Example calls for supported scenarios: + + Zapping a logical volume: + + ceph-volume lvm zap {vg name/lv name} + + Zapping a partition: + + ceph-volume lvm zap /dev/sdc1 + + Zapping many raw devices: + + ceph-volume lvm zap /dev/sda /dev/sdb /db/sdc + + Zapping devices associated with an OSD ID: + + ceph-volume lvm zap --osd-id 1 + + Optionally include the OSD FSID + + ceph-volume lvm zap --osd-id 1 --osd-fsid 55BD4219-16A7-4037-BC20-0F158EFCC83D + + If the --destroy flag is given and you are zapping a raw device or partition + then all vgs and lvs that exist on that raw device or partition will be destroyed. + + This is especially useful if a raw device or partition was used by ceph-volume lvm create + or ceph-volume lvm prepare commands previously and now you want to reuse that device. + + For example: + + ceph-volume lvm zap /dev/sda --destroy + + If the --destroy flag is given and you are zapping an lv then the lv is still + kept intact for reuse. + + """) + parser = argparse.ArgumentParser( + prog='ceph-volume lvm zap', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'devices', + metavar='DEVICES', + nargs='*', + type=arg_validators.ValidDevice(gpt_ok=True), + default=[], + help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)' + ) + + parser.add_argument( + '--destroy', + action='store_true', + default=False, + help='Destroy all volume groups and logical volumes if you are zapping a raw device or partition', + ) + + parser.add_argument( + '--osd-id', + help='Specify an OSD ID to detect associated devices for zapping', + ) + + parser.add_argument( + '--osd-fsid', + help='Specify an OSD FSID to detect associated devices for zapping', + ) + + parser.add_argument( + '--no-systemd', + dest='no_systemd', + action='store_true', + help='Skip systemd unit checks', + ) + + if len(self.argv) == 0: + print(sub_command_help) + return + + self.args = parser.parse_args(self.argv) + + if self.args.osd_id or self.args.osd_fsid: + self.zap_osd() + else: + self.zap() diff --git a/src/ceph-volume/ceph_volume/devices/raw/__init__.py b/src/ceph-volume/ceph_volume/devices/raw/__init__.py new file mode 100644 index 00000000..dd0a6534 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/raw/__init__.py @@ -0,0 +1 @@ +from .main import Raw # noqa diff --git a/src/ceph-volume/ceph_volume/devices/raw/activate.py b/src/ceph-volume/ceph_volume/devices/raw/activate.py new file mode 100644 index 00000000..94f54f1a --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/raw/activate.py @@ -0,0 +1,137 @@ +from __future__ import print_function +import argparse +import logging +import os +from textwrap import dedent +from ceph_volume import process, conf, decorators, terminal +from ceph_volume.util import system +from ceph_volume.util import prepare as prepare_utils +from .list import direct_report + + +logger = logging.getLogger(__name__) + +def activate_bluestore(meta, tmpfs, systemd, block_wal=None, block_db=None): + # find the osd + osd_id = meta['osd_id'] + osd_uuid = meta['osd_uuid'] + + # mount on tmpfs the osd directory + osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) + if not system.path_is_mounted(osd_path): + # mkdir -p and mount as tmpfs + prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs) + + # XXX This needs to be removed once ceph-bluestore-tool can deal with + # symlinks that exist in the osd dir + for link_name in ['block', 'block.db', 'block.wal']: + link_path = os.path.join(osd_path, link_name) + if os.path.exists(link_path): + os.unlink(os.path.join(osd_path, link_name)) + + # Once symlinks are removed, the osd dir can be 'primed again. chown first, + # regardless of what currently exists so that ``prime-osd-dir`` can succeed + # even if permissions are somehow messed up + system.chown(osd_path) + prime_command = [ + 'ceph-bluestore-tool', + 'prime-osd-dir', '--dev', meta['device'], + '--path', osd_path, + '--no-mon-config'] + process.run(prime_command) + + # always re-do the symlink regardless if it exists, so that the block, + # block.wal, and block.db devices that may have changed can be mapped + # correctly every time + prepare_utils.link_block( meta['device'], osd_id) + + if block_wal: + prepare_utils.link_wal(block_wal, osd_id, osd_uuid) + + if block_db: + prepare_utils.link_db(block_db, osd_id, osd_uuid) + + system.chown(osd_path) + terminal.success("ceph-volume raw activate successful for osd ID: %s" % osd_id) + + +class Activate(object): + + help = 'Discover and prepare a data directory for a (BlueStore) OSD on a raw device' + + def __init__(self, argv): + self.argv = argv + self.args = None + + @decorators.needs_root + def activate(self, devices, tmpfs, systemd, block_wal, block_db): + """ + :param args: The parsed arguments coming from the CLI + """ + assert devices + found = direct_report(devices) + + for osd_id, meta in found.items(): + logger.info('Activating osd.%s uuid %s cluster %s' % ( + osd_id, meta['osd_uuid'], meta['ceph_fsid'])) + activate_bluestore(meta, + tmpfs=tmpfs, + systemd=systemd, + block_wal=block_wal, + block_db=block_db) + + def main(self): + sub_command_help = dedent(""" + Activate (BlueStore) OSD on a raw block device based on the + device label (normally the first block of the device). + + ceph-volume raw activate --device /dev/sdb + + The device(s) associated with the OSD needs to have been prepared + previously, so that all needed tags and metadata exist. + """) + parser = argparse.ArgumentParser( + prog='ceph-volume raw activate', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + parser.add_argument( + '--device', + nargs='+', + help='The device for the OSD to start' + ) + parser.add_argument( + '--no-systemd', + dest='no_systemd', + action='store_true', + help='Skip creating and enabling systemd units and starting OSD services' + ) + parser.add_argument( + '--block.db', + dest='block_db', + help='Path to bluestore block.db block device' + ) + parser.add_argument( + '--block.wal', + dest='block_wal', + help='Path to bluestore block.wal block device' + ) + parser.add_argument( + '--no-tmpfs', + action='store_true', + help='Do not use a tmpfs mount for OSD data dir' + ) + + if not self.argv: + print(sub_command_help) + return + args = parser.parse_args(self.argv) + self.args = args + if not args.no_systemd: + terminal.error('systemd support not yet implemented') + raise SystemExit(1) + self.activate(args.device, + tmpfs=not args.no_tmpfs, + systemd=not self.args.no_systemd, + block_wal=self.args.block_wal, + block_db=self.args.block_db) diff --git a/src/ceph-volume/ceph_volume/devices/raw/common.py b/src/ceph-volume/ceph_volume/devices/raw/common.py new file mode 100644 index 00000000..08cfd028 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/raw/common.py @@ -0,0 +1,49 @@ +import argparse +from ceph_volume.util import arg_validators + +def create_parser(prog, description): + """ + Both prepare and create share the same parser, those are defined here to + avoid duplication + """ + parser = argparse.ArgumentParser( + prog=prog, + formatter_class=argparse.RawDescriptionHelpFormatter, + description=description, + ) + parser.add_argument( + '--data', + required=True, + type=arg_validators.ValidDevice(as_string=True), + help='a raw device to use for the OSD', + ) + parser.add_argument( + '--bluestore', + action='store_true', + help='Use BlueStore backend') + parser.add_argument( + '--crush-device-class', + dest='crush_device_class', + help='Crush device class to assign this OSD to', + ) + parser.add_argument( + '--no-tmpfs', + action='store_true', + help='Do not use a tmpfs mount for OSD data dir' + ) + parser.add_argument( + '--block.db', + dest='block_db', + help='Path to bluestore block.db block device' + ) + parser.add_argument( + '--block.wal', + dest='block_wal', + help='Path to bluestore block.wal block device' + ) + parser.add_argument( + '--dmcrypt', + action='store_true', + help='Enable device encryption via dm-crypt', + ) + return parser diff --git a/src/ceph-volume/ceph_volume/devices/raw/list.py b/src/ceph-volume/ceph_volume/devices/raw/list.py new file mode 100644 index 00000000..bb15bf19 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/raw/list.py @@ -0,0 +1,136 @@ +from __future__ import print_function +import argparse +import json +import logging +from textwrap import dedent +from ceph_volume import decorators, process + + +logger = logging.getLogger(__name__) + +def direct_report(devices): + """ + Other non-cli consumers of listing information will want to consume the + report without the need to parse arguments or other flags. This helper + bypasses the need to deal with the class interface which is meant for cli + handling. + """ + _list = List([]) + return _list.generate(devices) + + +class List(object): + + help = 'list BlueStore OSDs on raw devices' + + def __init__(self, argv): + self.argv = argv + + def generate(self, devs=None): + if not devs: + logger.debug('Listing block devices via lsblk...') + devs = [] + # adding '--inverse' allows us to get the mapper devices list in that command output. + # not listing root devices containing partitions shouldn't have side effect since we are + # in `ceph-volume raw` context. + # + # example: + # running `lsblk --paths --nodeps --output=NAME --noheadings` doesn't allow to get the mapper list + # because the output is like following : + # + # $ lsblk --paths --nodeps --output=NAME --noheadings + # /dev/sda + # /dev/sdb + # /dev/sdc + # /dev/sdd + # + # the dmcrypt mappers are hidden because of the `--nodeps` given they are displayed as a dependency. + # + # $ lsblk --paths --output=NAME --noheadings + # /dev/sda + # |-/dev/mapper/ceph-3b52c90d-6548-407d-bde1-efd31809702f-sda-block-dmcrypt + # `-/dev/mapper/ceph-3b52c90d-6548-407d-bde1-efd31809702f-sda-db-dmcrypt + # /dev/sdb + # /dev/sdc + # /dev/sdd + # + # adding `--inverse` is a trick to get around this issue, the counterpart is that we can't list root devices if they contain + # at least one partition but this shouldn't be an issue in `ceph-volume raw` context given we only deal with raw devices. + out, err, ret = process.call([ + 'lsblk', '--paths', '--nodeps', '--output=NAME', '--noheadings', '--inverse' + ]) + assert not ret + devs = out + result = {} + for dev in devs: + logger.debug('Examining %s' % dev) + # bluestore? + out, err, ret = process.call([ + 'ceph-bluestore-tool', 'show-label', + '--dev', dev], verbose_on_failure=False) + if ret: + logger.debug('No label on %s' % dev) + continue + oj = json.loads(''.join(out)) + if dev not in oj: + continue + if oj[dev]['description'] != 'main': + # ignore non-main devices, for now + continue + whoami = oj[dev]['whoami'] + result[whoami] = { + 'type': 'bluestore', + 'osd_id': int(whoami), + } + for f in ['osd_uuid', 'ceph_fsid']: + result[whoami][f] = oj[dev][f] + result[whoami]['device'] = dev + return result + + @decorators.needs_root + def list(self, args): + report = self.generate(args.device) + if args.format == 'json': + print(json.dumps(report, indent=4, sort_keys=True)) + else: + if not report: + raise SystemExit('No valid Ceph devices found') + raise RuntimeError('not implemented yet') + + def main(self): + sub_command_help = dedent(""" + List OSDs on raw devices with raw device labels (usually the first + block of the device). + + Full listing of all identifiable (currently, BlueStore) OSDs + on raw devices: + + ceph-volume raw list + + List a particular device, reporting all metadata about it:: + + ceph-volume raw list /dev/sda1 + + """) + parser = argparse.ArgumentParser( + prog='ceph-volume raw list', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'device', + metavar='DEVICE', + nargs='*', + help='Path to a device like /dev/sda1' + ) + + parser.add_argument( + '--format', + help='output format, defaults to "pretty"', + default='json', + choices=['json', 'pretty'], + ) + + args = parser.parse_args(self.argv) + self.list(args) diff --git a/src/ceph-volume/ceph_volume/devices/raw/main.py b/src/ceph-volume/ceph_volume/devices/raw/main.py new file mode 100644 index 00000000..efa25109 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/raw/main.py @@ -0,0 +1,40 @@ +import argparse +from textwrap import dedent +from ceph_volume import terminal +from . import list +from . import prepare +from . import activate + +class Raw(object): + + help = 'Manage single-device OSDs on raw block devices' + + _help = dedent(""" + Manage a single-device OSD on a raw block device. Rely on + the existing device labels to store any needed metadata. + + {sub_help} + """) + + mapper = { + 'list': list.List, + 'prepare': prepare.Prepare, + 'activate': activate.Activate, + } + + def __init__(self, argv): + self.argv = argv + + def print_help(self, sub_help): + return self._help.format(sub_help=sub_help) + + def main(self): + terminal.dispatch(self.mapper, self.argv) + parser = argparse.ArgumentParser( + prog='ceph-volume raw', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.print_help(terminal.subhelp(self.mapper)), + ) + parser.parse_args(self.argv) + if len(self.argv) <= 1: + return parser.print_help() diff --git a/src/ceph-volume/ceph_volume/devices/raw/prepare.py b/src/ceph-volume/ceph_volume/devices/raw/prepare.py new file mode 100644 index 00000000..3c96eeda --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/raw/prepare.py @@ -0,0 +1,169 @@ +from __future__ import print_function +import json +import logging +import os +from textwrap import dedent +from ceph_volume.util import prepare as prepare_utils +from ceph_volume.util import encryption as encryption_utils +from ceph_volume.util import disk +from ceph_volume.util import system +from ceph_volume import decorators, terminal +from ceph_volume.devices.lvm.common import rollback_osd +from .common import create_parser + +logger = logging.getLogger(__name__) + +def prepare_dmcrypt(key, device, device_type, fsid): + """ + Helper for devices that are encrypted. The operations needed for + block, db, wal, or data/journal devices are all the same + """ + if not device: + return '' + kname = disk.lsblk(device)['KNAME'] + mapping = 'ceph-{}-{}-{}-dmcrypt'.format(fsid, kname, device_type) + # format data device + encryption_utils.luks_format( + key, + device + ) + encryption_utils.luks_open( + key, + device, + mapping + ) + + return '/dev/mapper/{}'.format(mapping) + +def prepare_bluestore(block, wal, db, secrets, osd_id, fsid, tmpfs): + """ + :param block: The name of the logical volume for the bluestore data + :param wal: a regular/plain disk or logical volume, to be used for block.wal + :param db: a regular/plain disk or logical volume, to be used for block.db + :param secrets: A dict with the secrets needed to create the osd (e.g. cephx) + :param id_: The OSD id + :param fsid: The OSD fsid, also known as the OSD UUID + """ + cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key()) + + if secrets.get('dmcrypt_key'): + key = secrets['dmcrypt_key'] + block = prepare_dmcrypt(key, block, 'block', fsid) + wal = prepare_dmcrypt(key, wal, 'wal', fsid) + db = prepare_dmcrypt(key, db, 'db', fsid) + + # create the directory + prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs) + # symlink the block + prepare_utils.link_block(block, osd_id) + # get the latest monmap + prepare_utils.get_monmap(osd_id) + # write the OSD keyring if it doesn't exist already + prepare_utils.write_keyring(osd_id, cephx_secret) + # prepare the osd filesystem + prepare_utils.osd_mkfs_bluestore( + osd_id, fsid, + keyring=cephx_secret, + wal=wal, + db=db + ) + + +class Prepare(object): + + help = 'Format a raw device and associate it with a (BlueStore) OSD' + + def __init__(self, argv): + self.argv = argv + self.osd_id = None + + def safe_prepare(self, args=None): + """ + An intermediate step between `main()` and `prepare()` so that we can + capture the `self.osd_id` in case we need to rollback + + :param args: Injected args, usually from `raw create` which compounds + both `prepare` and `create` + """ + if args is not None: + self.args = args + try: + self.prepare() + except Exception: + logger.exception('raw prepare was unable to complete') + logger.info('will rollback OSD ID creation') + rollback_osd(self.args, self.osd_id) + raise + dmcrypt_log = 'dmcrypt' if args.dmcrypt else 'clear' + terminal.success("ceph-volume raw {} prepare successful for: {}".format(dmcrypt_log, self.args.data)) + + + @decorators.needs_root + def prepare(self): + secrets = {'cephx_secret': prepare_utils.create_key()} + encrypted = 1 if self.args.dmcrypt else 0 + cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key() + + if encrypted: + secrets['dmcrypt_key'] = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET') + secrets['cephx_lockbox_secret'] = cephx_lockbox_secret # dummy value to make `ceph osd new` not complaining + + osd_fsid = system.generate_uuid() + crush_device_class = self.args.crush_device_class + if crush_device_class: + secrets['crush_device_class'] = crush_device_class + tmpfs = not self.args.no_tmpfs + wal = "" + db = "" + if self.args.block_wal: + wal = self.args.block_wal + if self.args.block_db: + db = self.args.block_db + + # reuse a given ID if it exists, otherwise create a new ID + self.osd_id = prepare_utils.create_id( + osd_fsid, json.dumps(secrets)) + + prepare_bluestore( + self.args.data, + wal, + db, + secrets, + self.osd_id, + osd_fsid, + tmpfs, + ) + + def main(self): + sub_command_help = dedent(""" + Prepare an OSD by assigning an ID and FSID, registering them with the + cluster with an ID and FSID, formatting the volume. + + Once the OSD is ready, an ad-hoc systemd unit will be enabled so that + it can later get activated and the OSD daemon can get started. + + ceph-volume raw prepare --bluestore --data {device} + + DB and WAL devices are supported. + + ceph-volume raw prepare --bluestore --data {device} --block.db {device} --block.wal {device} + + """) + parser = create_parser( + prog='ceph-volume raw prepare', + description=sub_command_help, + ) + if not self.argv: + print(sub_command_help) + return + self.args = parser.parse_args(self.argv) + if not self.args.bluestore: + terminal.error('must specify --bluestore (currently the only supported backend)') + raise SystemExit(1) + if self.args.dmcrypt and not os.getenv('CEPH_VOLUME_DMCRYPT_SECRET'): + terminal.error('encryption was requested (--dmcrypt) but environment variable ' \ + 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set ' \ + 'this variable to provide a dmcrypt secret.') + raise SystemExit(1) + + self.safe_prepare(self.args) diff --git a/src/ceph-volume/ceph_volume/devices/simple/__init__.py b/src/ceph-volume/ceph_volume/devices/simple/__init__.py new file mode 100644 index 00000000..280e130e --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/simple/__init__.py @@ -0,0 +1 @@ +from .main import Simple # noqa diff --git a/src/ceph-volume/ceph_volume/devices/simple/activate.py b/src/ceph-volume/ceph_volume/devices/simple/activate.py new file mode 100644 index 00000000..7439141c --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/simple/activate.py @@ -0,0 +1,302 @@ +from __future__ import print_function +import argparse +import base64 +import glob +import json +import logging +import os +from textwrap import dedent +from ceph_volume import process, decorators, terminal, conf +from ceph_volume.util import system, disk +from ceph_volume.util import encryption as encryption_utils +from ceph_volume.util import prepare as prepare_utils +from ceph_volume.systemd import systemctl + + +logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) + + +class Activate(object): + + help = 'Enable systemd units to mount configured devices and start a Ceph OSD' + + def __init__(self, argv, from_trigger=False): + self.argv = argv + self.from_trigger = from_trigger + self.skip_systemd = False + + def validate_devices(self, json_config): + """ + ``json_config`` is the loaded dictionary coming from the JSON file. It is usually mixed with + other non-device items, but for sakes of comparison it doesn't really matter. This method is + just making sure that the keys needed exist + """ + devices = json_config.keys() + try: + objectstore = json_config['type'] + except KeyError: + if {'data', 'journal'}.issubset(set(devices)): + logger.warning( + '"type" key not found, assuming "filestore" since journal key is present' + ) + objectstore = 'filestore' + else: + logger.warning( + '"type" key not found, assuming "bluestore" since journal key is not present' + ) + objectstore = 'bluestore' + + # Go through all the device combinations that are absolutely required, + # raise an error describing what was expected and what was found + # otherwise. + if objectstore == 'filestore': + if {'data', 'journal'}.issubset(set(devices)): + return True + else: + found = [i for i in devices if i in ['data', 'journal']] + mlogger.error("Required devices (data, and journal) not present for filestore") + mlogger.error('filestore devices found: %s', found) + raise RuntimeError('Unable to activate filestore OSD due to missing devices') + else: + # This is a bit tricky, with newer bluestore we don't need data, older implementations + # do (e.g. with ceph-disk). ceph-volume just uses a tmpfs that doesn't require data. + if {'block', 'data'}.issubset(set(devices)): + return True + else: + bluestore_devices = ['block.db', 'block.wal', 'block', 'data'] + found = [i for i in devices if i in bluestore_devices] + mlogger.error("Required devices (block and data) not present for bluestore") + mlogger.error('bluestore devices found: %s', found) + raise RuntimeError('Unable to activate bluestore OSD due to missing devices') + + def get_device(self, uuid): + """ + If a device is encrypted, it will decrypt/open and return the mapper + path, if it isn't encrypted it will just return the device found that + is mapped to the uuid. This will make it easier for the caller to + avoid if/else to check if devices need decrypting + + :param uuid: The partition uuid of the device (PARTUUID) + """ + device = disk.get_device_from_partuuid(uuid) + + # If device is not found, it is fine to return an empty string from the + # helper that finds `device`. If it finds anything and it is not + # encrypted, just return what was found + if not self.is_encrypted or not device: + return device + + if self.encryption_type == 'luks': + encryption_utils.luks_open(self.dmcrypt_secret, device, uuid) + else: + encryption_utils.plain_open(self.dmcrypt_secret, device, uuid) + + return '/dev/mapper/%s' % uuid + + def enable_systemd_units(self, osd_id, osd_fsid): + """ + * disables the ceph-disk systemd units to prevent them from running when + a UDEV event matches Ceph rules + * creates the ``simple`` systemd units to handle the activation and + startup of the OSD with ``osd_id`` and ``osd_fsid`` + * enables the OSD systemd unit and finally starts the OSD. + """ + if not self.from_trigger and not self.skip_systemd: + # means it was scanned and now activated directly, so ensure that + # ceph-disk units are disabled, and that the `simple` systemd unit + # is created and enabled + + # enable the ceph-volume unit for this OSD + systemctl.enable_volume(osd_id, osd_fsid, 'simple') + + # disable any/all ceph-disk units + systemctl.mask_ceph_disk() + terminal.warning( + ('All ceph-disk systemd units have been disabled to ' + 'prevent OSDs getting triggered by UDEV events') + ) + else: + terminal.info('Skipping enabling of `simple` systemd unit') + terminal.info('Skipping masking of ceph-disk systemd units') + + if not self.skip_systemd: + # enable the OSD + systemctl.enable_osd(osd_id) + + # start the OSD + systemctl.start_osd(osd_id) + else: + terminal.info( + 'Skipping enabling and starting OSD simple systemd unit because --no-systemd was used' + ) + + @decorators.needs_root + def activate(self, args): + with open(args.json_config, 'r') as fp: + osd_metadata = json.load(fp) + + # Make sure that required devices are configured + self.validate_devices(osd_metadata) + + osd_id = osd_metadata.get('whoami', args.osd_id) + osd_fsid = osd_metadata.get('fsid', args.osd_fsid) + data_uuid = osd_metadata.get('data', {}).get('uuid') + conf.cluster = osd_metadata.get('cluster_name', 'ceph') + if not data_uuid: + raise RuntimeError( + 'Unable to activate OSD %s - no "uuid" key found for data' % args.osd_id + ) + + # Encryption detection, and capturing of the keys to decrypt + self.is_encrypted = osd_metadata.get('encrypted', False) + self.encryption_type = osd_metadata.get('encryption_type') + if self.is_encrypted: + lockbox_secret = osd_metadata.get('lockbox.keyring') + # write the keyring always so that we can unlock + encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret) + # Store the secret around so that the decrypt method can reuse + raw_dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid) + # Note how both these calls need b64decode. For some reason, the + # way ceph-disk creates these keys, it stores them in the monitor + # *undecoded*, requiring this decode call again. The lvm side of + # encryption doesn't need it, so we are assuming here that anything + # that `simple` scans, will come from ceph-disk and will need this + # extra decode call here + self.dmcrypt_secret = base64.b64decode(raw_dmcrypt_secret) + + cluster_name = osd_metadata.get('cluster_name', 'ceph') + osd_dir = '/var/lib/ceph/osd/%s-%s' % (cluster_name, osd_id) + + # XXX there is no support for LVM here + data_device = self.get_device(data_uuid) + + if not data_device: + raise RuntimeError("osd fsid {} doesn't exist, this file will " + "be skipped, consider cleaning legacy " + "json file {}".format(osd_metadata['fsid'], args.json_config)) + + journal_device = self.get_device(osd_metadata.get('journal', {}).get('uuid')) + block_device = self.get_device(osd_metadata.get('block', {}).get('uuid')) + block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid')) + block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid')) + + if not system.device_is_mounted(data_device, destination=osd_dir): + if osd_metadata.get('type') == 'filestore': + prepare_utils.mount_osd(data_device, osd_id) + else: + process.run(['mount', '-v', data_device, osd_dir]) + + device_map = { + 'journal': journal_device, + 'block': block_device, + 'block.db': block_db_device, + 'block.wal': block_wal_device + } + + for name, device in device_map.items(): + if not device: + continue + # always re-do the symlink regardless if it exists, so that the journal + # device path that may have changed can be mapped correctly every time + destination = os.path.join(osd_dir, name) + process.run(['ln', '-snf', device, destination]) + + # make sure that the journal has proper permissions + system.chown(device) + + self.enable_systemd_units(osd_id, osd_fsid) + + terminal.success('Successfully activated OSD %s with FSID %s' % (osd_id, osd_fsid)) + + def main(self): + sub_command_help = dedent(""" + Activate OSDs by mounting devices previously configured to their + appropriate destination:: + + ceph-volume simple activate {ID} {FSID} + + Or using a JSON file directly:: + + ceph-volume simple activate --file /etc/ceph/osd/{ID}-{FSID}.json + + The OSD must have been "scanned" previously (see ``ceph-volume simple + scan``), so that all needed OSD device information and metadata exist. + + A previously scanned OSD would exist like:: + + /etc/ceph/osd/{ID}-{FSID}.json + + + Environment variables supported: + + CEPH_VOLUME_SIMPLE_JSON_DIR: Directory location for scanned OSD JSON configs + """) + parser = argparse.ArgumentParser( + prog='ceph-volume simple activate', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + parser.add_argument( + 'osd_id', + metavar='ID', + nargs='?', + help='The ID of the OSD, usually an integer, like 0' + ) + parser.add_argument( + 'osd_fsid', + metavar='FSID', + nargs='?', + help='The FSID of the OSD, similar to a SHA1' + ) + parser.add_argument( + '--all', + help='Activate all OSDs with a OSD JSON config', + action='store_true', + default=False, + ) + parser.add_argument( + '--file', + help='The path to a JSON file, from a scanned OSD' + ) + parser.add_argument( + '--no-systemd', + dest='skip_systemd', + action='store_true', + help='Skip creating and enabling systemd units and starting OSD services', + ) + if len(self.argv) == 0: + print(sub_command_help) + return + args = parser.parse_args(self.argv) + if not args.file and not args.all: + if not args.osd_id and not args.osd_fsid: + terminal.error('ID and FSID are required to find the right OSD to activate') + terminal.error('from a scanned OSD location in /etc/ceph/osd/') + raise RuntimeError('Unable to activate without both ID and FSID') + # don't allow a CLI flag to specify the JSON dir, because that might + # implicitly indicate that it would be possible to activate a json file + # at a non-default location which would not work at boot time if the + # custom location is not exposed through an ENV var + self.skip_systemd = args.skip_systemd + json_dir = os.environ.get('CEPH_VOLUME_SIMPLE_JSON_DIR', '/etc/ceph/osd/') + if args.all: + if args.file or args.osd_id: + mlogger.warn('--all was passed, ignoring --file and ID/FSID arguments') + json_configs = glob.glob('{}/*.json'.format(json_dir)) + for json_config in json_configs: + mlogger.info('activating OSD specified in {}'.format(json_config)) + args.json_config = json_config + try: + self.activate(args) + except RuntimeError as e: + terminal.warning(e.message) + else: + if args.file: + json_config = args.file + else: + json_config = os.path.join(json_dir, '%s-%s.json' % (args.osd_id, args.osd_fsid)) + if not os.path.exists(json_config): + raise RuntimeError('Expected JSON config path not found: %s' % json_config) + args.json_config = json_config + self.activate(args) diff --git a/src/ceph-volume/ceph_volume/devices/simple/main.py b/src/ceph-volume/ceph_volume/devices/simple/main.py new file mode 100644 index 00000000..2119963f --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/simple/main.py @@ -0,0 +1,41 @@ +import argparse +from textwrap import dedent +from ceph_volume import terminal +from . import scan +from . import activate +from . import trigger + + +class Simple(object): + + help = 'Manage already deployed OSDs with ceph-volume' + + _help = dedent(""" + Take over a deployed OSD, persisting its metadata in /etc/ceph/osd/ so that it can be managed + with ceph-volume directly. Avoids UDEV and ceph-disk handling. + + {sub_help} + """) + + mapper = { + 'scan': scan.Scan, + 'activate': activate.Activate, + 'trigger': trigger.Trigger, + } + + def __init__(self, argv): + self.argv = argv + + def print_help(self, sub_help): + return self._help.format(sub_help=sub_help) + + def main(self): + terminal.dispatch(self.mapper, self.argv) + parser = argparse.ArgumentParser( + prog='ceph-volume simple', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.print_help(terminal.subhelp(self.mapper)), + ) + parser.parse_args(self.argv) + if len(self.argv) <= 1: + return parser.print_help() diff --git a/src/ceph-volume/ceph_volume/devices/simple/scan.py b/src/ceph-volume/ceph_volume/devices/simple/scan.py new file mode 100644 index 00000000..34da0962 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/simple/scan.py @@ -0,0 +1,385 @@ +from __future__ import print_function +import argparse +import base64 +import json +import logging +import os +from textwrap import dedent +from ceph_volume import decorators, terminal, conf +from ceph_volume.api import lvm +from ceph_volume.systemd import systemctl +from ceph_volume.util import arg_validators, system, disk, encryption +from ceph_volume.util.device import Device + + +logger = logging.getLogger(__name__) + + +def parse_keyring(file_contents): + """ + Extract the actual key from a string. Usually from a keyring file, where + the keyring will be in a client section. In the case of a lockbox, it is + something like:: + + [client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]\n\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==\n + + From the above case, it would return:: + + AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA== + """ + # remove newlines that might be trailing + keyring = file_contents.strip('\n') + + # Now split on spaces + keyring = keyring.split(' ')[-1] + + # Split on newlines + keyring = keyring.split('\n')[-1] + + return keyring.strip() + + +class Scan(object): + + help = 'Capture metadata from all running ceph-disk OSDs, OSD data partition or directory' + + def __init__(self, argv): + self.argv = argv + self._etc_path = '/etc/ceph/osd/' + + @property + def etc_path(self): + if os.path.isdir(self._etc_path): + return self._etc_path + + if not os.path.exists(self._etc_path): + os.mkdir(self._etc_path) + return self._etc_path + + error = "OSD Configuration path (%s) needs to be a directory" % self._etc_path + raise RuntimeError(error) + + def get_contents(self, path): + with open(path, 'r') as fp: + contents = fp.readlines() + if len(contents) > 1: + return ''.join(contents) + return ''.join(contents).strip().strip('\n') + + def scan_device(self, path): + device_metadata = {'path': None, 'uuid': None} + if not path: + return device_metadata + if self.is_encrypted: + encryption_metadata = encryption.legacy_encrypted(path) + device_metadata['path'] = encryption_metadata['device'] + device_metadata['uuid'] = disk.get_partuuid(encryption_metadata['device']) + return device_metadata + # cannot read the symlink if this is tmpfs + if os.path.islink(path): + device = os.readlink(path) + else: + device = path + lvm_device = lvm.get_first_lv(filters={'lv_path': device}) + if lvm_device: + device_uuid = lvm_device.lv_uuid + else: + device_uuid = disk.get_partuuid(device) + + device_metadata['uuid'] = device_uuid + device_metadata['path'] = device + + return device_metadata + + def scan_directory(self, path): + osd_metadata = {'cluster_name': conf.cluster} + directory_files = os.listdir(path) + if 'keyring' not in directory_files: + raise RuntimeError( + 'OSD files not found, required "keyring" file is not present at: %s' % path + ) + for file_ in os.listdir(path): + file_path = os.path.join(path, file_) + file_json_key = file_ + if file_.endswith('_dmcrypt'): + file_json_key = file_.rstrip('_dmcrypt') + logger.info( + 'reading file {}, stripping _dmcrypt suffix'.format(file_) + ) + if os.path.islink(file_path): + if os.path.exists(file_path): + osd_metadata[file_json_key] = self.scan_device(file_path) + else: + msg = 'broken symlink found %s -> %s' % (file_path, os.path.realpath(file_path)) + terminal.warning(msg) + logger.warning(msg) + + if os.path.isdir(file_path): + continue + + # the check for binary needs to go before the file, to avoid + # capturing data from binary files but still be able to capture + # contents from actual files later + try: + if system.is_binary(file_path): + logger.info('skipping binary file: %s' % file_path) + continue + except IOError: + logger.exception('skipping due to IOError on file: %s' % file_path) + continue + if os.path.isfile(file_path): + content = self.get_contents(file_path) + if 'keyring' in file_path: + content = parse_keyring(content) + try: + osd_metadata[file_json_key] = int(content) + except ValueError: + osd_metadata[file_json_key] = content + + # we must scan the paths again because this might be a temporary mount + path_mounts = system.get_mounts(paths=True) + device = path_mounts.get(path) + + # it is possible to have more than one device, pick the first one, and + # warn that it is possible that more than one device is 'data' + if not device: + terminal.error('Unable to detect device mounted for path: %s' % path) + raise RuntimeError('Cannot activate OSD') + osd_metadata['data'] = self.scan_device(device[0] if len(device) else None) + + return osd_metadata + + def scan_encrypted(self, directory=None): + device = self.encryption_metadata['device'] + lockbox = self.encryption_metadata['lockbox'] + encryption_type = self.encryption_metadata['type'] + osd_metadata = {} + # Get the PARTUUID of the device to make sure have the right one and + # that maps to the data device + device_uuid = disk.get_partuuid(device) + dm_path = '/dev/mapper/%s' % device_uuid + # check if this partition is already mapped + device_status = encryption.status(device_uuid) + + # capture all the information from the lockbox first, reusing the + # directory scan method + if self.device_mounts.get(lockbox): + lockbox_path = self.device_mounts.get(lockbox)[0] + lockbox_metadata = self.scan_directory(lockbox_path) + # ceph-disk stores the fsid as osd-uuid in the lockbox, thanks ceph-disk + dmcrypt_secret = encryption.get_dmcrypt_key( + None, # There is no ID stored in the lockbox + lockbox_metadata['osd-uuid'], + os.path.join(lockbox_path, 'keyring') + ) + else: + with system.tmp_mount(lockbox) as lockbox_path: + lockbox_metadata = self.scan_directory(lockbox_path) + # ceph-disk stores the fsid as osd-uuid in the lockbox, thanks ceph-disk + dmcrypt_secret = encryption.get_dmcrypt_key( + None, # There is no ID stored in the lockbox + lockbox_metadata['osd-uuid'], + os.path.join(lockbox_path, 'keyring') + ) + + if not device_status: + # Note how both these calls need b64decode. For some reason, the + # way ceph-disk creates these keys, it stores them in the monitor + # *undecoded*, requiring this decode call again. The lvm side of + # encryption doesn't need it, so we are assuming here that anything + # that `simple` scans, will come from ceph-disk and will need this + # extra decode call here + dmcrypt_secret = base64.b64decode(dmcrypt_secret) + if encryption_type == 'luks': + encryption.luks_open(dmcrypt_secret, device, device_uuid) + else: + encryption.plain_open(dmcrypt_secret, device, device_uuid) + + # If we have a directory, use that instead of checking for mounts + if directory: + osd_metadata = self.scan_directory(directory) + else: + # Now check if that mapper is mounted already, to avoid remounting and + # decrypting the device + dm_path_mount = self.device_mounts.get(dm_path) + if dm_path_mount: + osd_metadata = self.scan_directory(dm_path_mount[0]) + else: + with system.tmp_mount(dm_path, encrypted=True) as device_path: + osd_metadata = self.scan_directory(device_path) + + osd_metadata['encrypted'] = True + osd_metadata['encryption_type'] = encryption_type + osd_metadata['lockbox.keyring'] = parse_keyring(lockbox_metadata['keyring']) + return osd_metadata + + @decorators.needs_root + def scan(self, args): + osd_metadata = {'cluster_name': conf.cluster} + osd_path = None + logger.info('detecting if argument is a device or a directory: %s', args.osd_path) + if os.path.isdir(args.osd_path): + logger.info('will scan directly, path is a directory') + osd_path = args.osd_path + else: + # assume this is a device, check if it is mounted and use that path + logger.info('path is not a directory, will check if mounted') + if system.device_is_mounted(args.osd_path): + logger.info('argument is a device, which is mounted') + mounted_osd_paths = self.device_mounts.get(args.osd_path) + osd_path = mounted_osd_paths[0] if len(mounted_osd_paths) else None + + # argument is not a directory, and it is not a device that is mounted + # somewhere so temporarily mount it to poke inside, otherwise, scan + # directly + if not osd_path: + # check if we have an encrypted device first, so that we can poke at + # the lockbox instead + if self.is_encrypted: + if not self.encryption_metadata.get('lockbox'): + raise RuntimeError( + 'Lockbox partition was not found for device: %s' % args.osd_path + ) + osd_metadata = self.scan_encrypted() + else: + logger.info('device is not mounted, will mount it temporarily to scan') + with system.tmp_mount(args.osd_path) as osd_path: + osd_metadata = self.scan_directory(osd_path) + else: + if self.is_encrypted: + logger.info('will scan encrypted OSD directory at path: %s', osd_path) + osd_metadata = self.scan_encrypted(osd_path) + else: + logger.info('will scan OSD directory at path: %s', osd_path) + osd_metadata = self.scan_directory(osd_path) + + osd_id = osd_metadata['whoami'] + osd_fsid = osd_metadata['fsid'] + filename = '%s-%s.json' % (osd_id, osd_fsid) + json_path = os.path.join(self.etc_path, filename) + + if os.path.exists(json_path) and not args.stdout: + if not args.force: + raise RuntimeError( + '--force was not used and OSD metadata file exists: %s' % json_path + ) + + if args.stdout: + print(json.dumps(osd_metadata, indent=4, sort_keys=True, ensure_ascii=False)) + else: + with open(json_path, 'w') as fp: + json.dump(osd_metadata, fp, indent=4, sort_keys=True, ensure_ascii=False) + fp.write(os.linesep) + terminal.success( + 'OSD %s got scanned and metadata persisted to file: %s' % ( + osd_id, + json_path + ) + ) + terminal.success( + 'To take over management of this scanned OSD, and disable ceph-disk and udev, run:' + ) + terminal.success(' ceph-volume simple activate %s %s' % (osd_id, osd_fsid)) + + if not osd_metadata.get('data'): + msg = 'Unable to determine device mounted on %s' % args.osd_path + logger.warning(msg) + terminal.warning(msg) + terminal.warning('OSD will not be able to start without this information:') + terminal.warning(' "data": "/path/to/device",') + logger.warning('Unable to determine device mounted on %s' % args.osd_path) + + def main(self): + sub_command_help = dedent(""" + Scan running OSDs, an OSD directory (or data device) for files and configurations + that will allow to take over the management of the OSD. + + Scanned OSDs will get their configurations stored in + /etc/ceph/osd/<id>-<fsid>.json + + For an OSD ID of 0 with fsid of ``a9d50838-e823-43d6-b01f-2f8d0a77afc2`` + that could mean a scan command that looks like:: + + ceph-volume simple scan /var/lib/ceph/osd/ceph-0 + + Which would store the metadata in a JSON file at:: + + /etc/ceph/osd/0-a9d50838-e823-43d6-b01f-2f8d0a77afc2.json + + To scan all running OSDs: + + ceph-volume simple scan + + To a scan a specific running OSD: + + ceph-volume simple scan /var/lib/ceph/osd/{cluster}-{osd id} + + And to scan a device (mounted or unmounted) that has OSD data in it, for example /dev/sda1 + + ceph-volume simple scan /dev/sda1 + + Scanning a device or directory that belongs to an OSD not created by ceph-disk will be ingored. + """) + parser = argparse.ArgumentParser( + prog='ceph-volume simple scan', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + '-f', '--force', + action='store_true', + help='If OSD has already been scanned, the JSON file will be overwritten' + ) + + parser.add_argument( + '--stdout', + action='store_true', + help='Do not save to a file, output metadata to stdout' + ) + + parser.add_argument( + 'osd_path', + metavar='OSD_PATH', + type=arg_validators.OSDPath(), + nargs='?', + default=None, + help='Path to an existing OSD directory or OSD data partition' + ) + + args = parser.parse_args(self.argv) + paths = [] + if args.osd_path: + paths.append(args.osd_path) + else: + osd_ids = systemctl.get_running_osd_ids() + for osd_id in osd_ids: + paths.append("/var/lib/ceph/osd/{}-{}".format( + conf.cluster, + osd_id, + )) + + # Capture some environment status, so that it can be reused all over + self.device_mounts = system.get_mounts(devices=True) + self.path_mounts = system.get_mounts(paths=True) + + for path in paths: + args.osd_path = path + device = Device(args.osd_path) + if device.is_partition: + if device.ceph_disk.type != 'data': + label = device.ceph_disk.partlabel + msg = 'Device must be the ceph data partition, but PARTLABEL reported: "%s"' % label + raise RuntimeError(msg) + + self.encryption_metadata = encryption.legacy_encrypted(args.osd_path) + self.is_encrypted = self.encryption_metadata['encrypted'] + + if self.encryption_metadata['device'] != "tmpfs": + device = Device(self.encryption_metadata['device']) + if not device.is_ceph_disk_member: + terminal.warning("Ignoring %s because it's not a ceph-disk created osd." % path) + else: + self.scan(args) + else: + terminal.warning("Ignoring %s because it's not a ceph-disk created osd." % path) diff --git a/src/ceph-volume/ceph_volume/devices/simple/trigger.py b/src/ceph-volume/ceph_volume/devices/simple/trigger.py new file mode 100644 index 00000000..c01d9ae2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/devices/simple/trigger.py @@ -0,0 +1,70 @@ +from __future__ import print_function +import argparse +from textwrap import dedent +from ceph_volume.exceptions import SuffixParsingError +from ceph_volume import decorators +from .activate import Activate + + +def parse_osd_id(string): + osd_id = string.split('-', 1)[0] + if not osd_id: + raise SuffixParsingError('OSD id', string) + if osd_id.isdigit(): + return osd_id + raise SuffixParsingError('OSD id', string) + + +def parse_osd_uuid(string): + osd_id = '%s-' % parse_osd_id(string) + # remove the id first + osd_uuid = string.split(osd_id, 1)[-1] + if not osd_uuid: + raise SuffixParsingError('OSD uuid', string) + return osd_uuid + + +class Trigger(object): + + help = 'systemd helper to activate an OSD' + + def __init__(self, argv): + self.argv = argv + + @decorators.needs_root + def main(self): + sub_command_help = dedent(""" + ** DO NOT USE DIRECTLY ** + This tool is meant to help the systemd unit that knows about OSDs. + + Proxy OSD activation to ``ceph-volume simple activate`` by parsing the + input from systemd, detecting the UUID and ID associated with an OSD:: + + ceph-volume simple trigger {SYSTEMD-DATA} + + The systemd "data" is expected to be in the format of:: + + {OSD ID}-{OSD UUID} + + The devices associated with the OSD need to have been scanned previously, + so that all needed metadata can be used for starting the OSD process. + """) + parser = argparse.ArgumentParser( + prog='ceph-volume simple trigger', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=sub_command_help, + ) + + parser.add_argument( + 'systemd_data', + metavar='SYSTEMD_DATA', + nargs='?', + help='Data from a systemd unit containing ID and UUID of the OSD, like 0-asdf-lkjh' + ) + if len(self.argv) == 0: + print(sub_command_help) + return + args = parser.parse_args(self.argv) + osd_id = parse_osd_id(args.systemd_data) + osd_uuid = parse_osd_uuid(args.systemd_data) + Activate([osd_id, osd_uuid], from_trigger=True).main() diff --git a/src/ceph-volume/ceph_volume/exceptions.py b/src/ceph-volume/ceph_volume/exceptions.py new file mode 100644 index 00000000..5c642948 --- /dev/null +++ b/src/ceph-volume/ceph_volume/exceptions.py @@ -0,0 +1,63 @@ +import os + + +class ConfigurationError(Exception): + + def __init__(self, cluster_name='ceph', path='/etc/ceph', abspath=None): + self.cluster_name = cluster_name + self.path = path + self.abspath = abspath or "%s.conf" % os.path.join(self.path, self.cluster_name) + + def __str__(self): + return 'Unable to load expected Ceph config at: %s' % self.abspath + + +class ConfigurationSectionError(Exception): + + def __init__(self, section): + self.section = section + + def __str__(self): + return 'Unable to find expected configuration section: "%s"' % self.section + + +class ConfigurationKeyError(Exception): + + def __init__(self, section, key): + self.section = section + self.key = key + + def __str__(self): + return 'Unable to find expected configuration key: "%s" from section "%s"' % ( + self.key, + self.section + ) + + +class SuffixParsingError(Exception): + + def __init__(self, suffix, part=None): + self.suffix = suffix + self.part = part + + def __str__(self): + return 'Unable to parse the %s from systemd suffix: %s' % (self.part, self.suffix) + + +class SuperUserError(Exception): + + def __str__(self): + return 'This command needs to be executed with sudo or as root' + + +class SizeAllocationError(Exception): + + def __init__(self, requested, available): + self.requested = requested + self.available = available + + def __str__(self): + msg = 'Unable to allocate size (%s), not enough free space (%s)' % ( + self.requested, self.available + ) + return msg diff --git a/src/ceph-volume/ceph_volume/inventory/__init__.py b/src/ceph-volume/ceph_volume/inventory/__init__.py new file mode 100644 index 00000000..c9e0c0cc --- /dev/null +++ b/src/ceph-volume/ceph_volume/inventory/__init__.py @@ -0,0 +1 @@ +from .main import Inventory # noqa diff --git a/src/ceph-volume/ceph_volume/inventory/main.py b/src/ceph-volume/ceph_volume/inventory/main.py new file mode 100644 index 00000000..7053a3eb --- /dev/null +++ b/src/ceph-volume/ceph_volume/inventory/main.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +import argparse +import json + +from ceph_volume.util.device import Devices, Device + + +class Inventory(object): + + help = "Get this nodes available disk inventory" + + def __init__(self, argv): + self.argv = argv + + def main(self): + parser = argparse.ArgumentParser( + prog='ceph-volume inventory', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.help, + ) + parser.add_argument( + 'path', + nargs='?', + default=None, + help=('Report on specific disk'), + ) + parser.add_argument( + '--format', + choices=['plain', 'json', 'json-pretty'], + default='plain', + help='Output format', + ) + parser.add_argument( + '--filter-for-batch', + action='store_true', + help=('Filter devices unsuitable to pass to an OSD service spec, ' + 'no effect when <path> is passed'), + default=False, + ) + self.args = parser.parse_args(self.argv) + if self.args.path: + self.format_report(Device(self.args.path)) + else: + self.format_report(Devices(filter_for_batch=self.args.filter_for_batch)) + + def get_report(self): + if self.args.path: + return Device(self.args.path).json_report() + else: + return Devices(filter_for_batch=self.args.filter_for_batch).json_report() + + def format_report(self, inventory): + if self.args.format == 'json': + print(json.dumps(inventory.json_report())) + elif self.args.format == 'json-pretty': + print(json.dumps(inventory.json_report(), indent=4, sort_keys=True)) + else: + print(inventory.pretty_report()) diff --git a/src/ceph-volume/ceph_volume/log.py b/src/ceph-volume/ceph_volume/log.py new file mode 100644 index 00000000..b283bedb --- /dev/null +++ b/src/ceph-volume/ceph_volume/log.py @@ -0,0 +1,49 @@ +import logging +import os +from ceph_volume import terminal +from ceph_volume import conf + +BASE_FORMAT = "[%(name)s][%(levelname)-6s] %(message)s" +FILE_FORMAT = "[%(asctime)s]" + BASE_FORMAT + + +def setup(name='ceph-volume.log', log_path=None, log_level=None): + log_path = log_path or conf.log_path + # if a non-root user calls help or other no-sudo-required command the + # logger will fail to write to /var/lib/ceph/ so this /tmp/ path is used as + # a fallback + tmp_log_file = os.path.join('/tmp/', name) + root_logger = logging.getLogger() + # The default path is where all ceph log files are, and will get rotated by + # Ceph's logrotate rules. + log_level = log_level or "DEBUG" + log_level = getattr(logging, log_level.upper()) + root_logger.setLevel(log_level) + + try: + fh = logging.FileHandler(log_path) + except (OSError, IOError) as err: + terminal.warning("Falling back to /tmp/ for logging. Can't use %s" % log_path) + terminal.warning(str(err)) + conf.log_path = tmp_log_file + fh = logging.FileHandler(tmp_log_file) + + fh.setLevel(log_level) + fh.setFormatter(logging.Formatter(FILE_FORMAT)) + + root_logger.addHandler(fh) + + +def setup_console(): + # TODO: At some point ceph-volume should stop using the custom logger + # interface that exists in terminal.py and use the logging module to + # produce output for the terminal + # Console Logger + sh = logging.StreamHandler() + sh.setFormatter(logging.Formatter('[terminal] %(message)s')) + sh.setLevel(logging.DEBUG) + + terminal_logger = logging.getLogger('terminal') + + # allow all levels at root_logger, handlers control individual levels + terminal_logger.addHandler(sh) diff --git a/src/ceph-volume/ceph_volume/main.py b/src/ceph-volume/ceph_volume/main.py new file mode 100644 index 00000000..728d5008 --- /dev/null +++ b/src/ceph-volume/ceph_volume/main.py @@ -0,0 +1,181 @@ +from __future__ import print_function +import argparse +import os +import pkg_resources +import sys +import logging + +from ceph_volume.decorators import catches +from ceph_volume import log, devices, configuration, conf, exceptions, terminal, inventory + + +class Volume(object): + _help = """ +ceph-volume: Deploy Ceph OSDs using different device technologies like lvm or +physical disks. + +Log Path: {log_path} +Ceph Conf: {ceph_path} + +{sub_help} +{plugins} +{environ_vars} +{warning} + """ + + def __init__(self, argv=None, parse=True): + self.mapper = { + 'lvm': devices.lvm.LVM, + 'simple': devices.simple.Simple, + 'raw': devices.raw.Raw, + 'inventory': inventory.Inventory, + } + self.plugin_help = "No plugins found/loaded" + if argv is None: + self.argv = sys.argv + else: + self.argv = argv + if parse: + self.main(self.argv) + + def help(self, warning=False): + warning = 'See "ceph-volume --help" for full list of options.' if warning else '' + return self._help.format( + warning=warning, + log_path=conf.log_path, + ceph_path=self.stat_ceph_conf(), + plugins=self.plugin_help, + sub_help=terminal.subhelp(self.mapper), + environ_vars=self.get_environ_vars() + ) + + def get_environ_vars(self): + environ_vars = [] + for key, value in os.environ.items(): + if key.startswith('CEPH_'): + environ_vars.append("%s=%s" % (key, value)) + if not environ_vars: + return '' + else: + environ_vars.insert(0, '\nEnviron Variables:') + return '\n'.join(environ_vars) + + def enable_plugins(self): + """ + Load all plugins available, add them to the mapper and extend the help + string with the information from each one + """ + plugins = _load_library_extensions() + for plugin in plugins: + self.mapper[plugin._ceph_volume_name_] = plugin + self.plugin_help = '\n'.join(['%-19s %s\n' % ( + plugin.name, getattr(plugin, 'help_menu', '')) + for plugin in plugins]) + if self.plugin_help: + self.plugin_help = '\nPlugins:\n' + self.plugin_help + + def load_log_path(self): + conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph') + + def stat_ceph_conf(self): + try: + configuration.load(conf.path) + return terminal.green(conf.path) + except exceptions.ConfigurationError as error: + return terminal.red(error) + + def _get_split_args(self): + subcommands = self.mapper.keys() + slice_on_index = len(self.argv) + 1 + pruned_args = self.argv[1:] + for count, arg in enumerate(pruned_args): + if arg in subcommands: + slice_on_index = count + break + return pruned_args[:slice_on_index], pruned_args[slice_on_index:] + + @catches() + def main(self, argv): + # these need to be available for the help, which gets parsed super + # early + configuration.load_ceph_conf_path() + self.load_log_path() + self.enable_plugins() + main_args, subcommand_args = self._get_split_args() + # no flags where passed in, return the help menu instead of waiting for + # argparse which will end up complaning that there are no args + if len(argv) <= 1: + print(self.help(warning=True)) + raise SystemExit(0) + parser = argparse.ArgumentParser( + prog='ceph-volume', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.help(), + ) + parser.add_argument( + '--cluster', + default='ceph', + help='Cluster name (defaults to "ceph")', + ) + parser.add_argument( + '--log-level', + default='debug', + choices=['debug', 'info', 'warning', 'error', 'critical'], + help='Change the file log level (defaults to debug)', + ) + parser.add_argument( + '--log-path', + default='/var/log/ceph/', + help='Change the log path (defaults to /var/log/ceph)', + ) + args = parser.parse_args(main_args) + conf.log_path = args.log_path + if os.path.isdir(conf.log_path): + conf.log_path = os.path.join(args.log_path, 'ceph-volume.log') + log.setup(log_level=args.log_level) + log.setup_console() + logger = logging.getLogger(__name__) + logger.info("Running command: ceph-volume %s %s", " ".join(main_args), " ".join(subcommand_args)) + # set all variables from args and load everything needed according to + # them + configuration.load_ceph_conf_path(cluster_name=args.cluster) + try: + conf.ceph = configuration.load(conf.path) + except exceptions.ConfigurationError as error: + # we warn only here, because it is possible that the configuration + # file is not needed, or that it will be loaded by some other means + # (like reading from lvm tags) + logger.exception('ignoring inability to load ceph.conf') + terminal.red(error) + # dispatch to sub-commands + terminal.dispatch(self.mapper, subcommand_args) + + +def _load_library_extensions(): + """ + Locate all setuptools entry points by the name 'ceph_volume_handlers' + and initialize them. + Any third-party library may register an entry point by adding the + following to their setup.py:: + + entry_points = { + 'ceph_volume_handlers': [ + 'plugin_name = mylib.mymodule:Handler_Class', + ], + }, + + `plugin_name` will be used to load it as a sub command. + """ + logger = logging.getLogger('ceph_volume.plugins') + group = 'ceph_volume_handlers' + entry_points = pkg_resources.iter_entry_points(group=group) + plugins = [] + for ep in entry_points: + try: + logger.debug('loading %s' % ep.name) + plugin = ep.load() + plugin._ceph_volume_name_ = ep.name + plugins.append(plugin) + except Exception as error: + logger.exception("Error initializing plugin %s: %s" % (ep, error)) + return plugins diff --git a/src/ceph-volume/ceph_volume/process.py b/src/ceph-volume/ceph_volume/process.py new file mode 100644 index 00000000..e7098689 --- /dev/null +++ b/src/ceph-volume/ceph_volume/process.py @@ -0,0 +1,233 @@ +from fcntl import fcntl, F_GETFL, F_SETFL +from os import O_NONBLOCK, read +import subprocess +from select import select +from ceph_volume import terminal +from ceph_volume.util import as_bytes + +import logging + +logger = logging.getLogger(__name__) + + +def which(executable): + """ + Proxy function to ceph_volume.util.system.which because the ``system`` + module does import ``process`` + """ + from ceph_volume.util import system + return system.which(executable) + + +def log_output(descriptor, message, terminal_logging, logfile_logging): + """ + log output to both the logger and the terminal if terminal_logging is + enabled + """ + if not message: + return + message = message.strip() + line = '%s %s' % (descriptor, message) + if terminal_logging: + getattr(terminal, descriptor)(message) + if logfile_logging: + logger.info(line) + + +def log_descriptors(reads, process, terminal_logging): + """ + Helper to send output to the terminal while polling the subprocess + """ + # these fcntl are set to O_NONBLOCK for the filedescriptors coming from + # subprocess so that the logging does not block. Without these a prompt in + # a subprocess output would hang and nothing would get printed. Note how + # these are just set when logging subprocess, not globally. + stdout_flags = fcntl(process.stdout, F_GETFL) # get current p.stdout flags + stderr_flags = fcntl(process.stderr, F_GETFL) # get current p.stderr flags + fcntl(process.stdout, F_SETFL, stdout_flags | O_NONBLOCK) + fcntl(process.stderr, F_SETFL, stderr_flags | O_NONBLOCK) + descriptor_names = { + process.stdout.fileno(): 'stdout', + process.stderr.fileno(): 'stderr' + } + for descriptor in reads: + descriptor_name = descriptor_names[descriptor] + try: + message = read(descriptor, 1024) + if not isinstance(message, str): + message = message.decode('utf-8') + log_output(descriptor_name, message, terminal_logging, True) + except (IOError, OSError): + # nothing else to log + pass + + +def obfuscate(command_, on=None): + """ + Certain commands that are useful to log might contain information that + should be replaced by '*' like when creating OSDs and the keyrings are + being passed, which should not be logged. + + :param on: A string (will match a flag) or an integer (will match an index) + + If matching on a flag (when ``on`` is a string) it will obfuscate on the + value for that flag. That is a command like ['ls', '-l', '/'] that calls + `obfuscate(command, on='-l')` will obfustace '/' which is the value for + `-l`. + + The reason for `on` to allow either a string or an integer, altering + behavior for both is because it is easier for ``run`` and ``call`` to just + pop a value to obfuscate (vs. allowing an index or a flag) + """ + command = command_[:] + msg = "Running command: %s" % ' '.join(command) + if on in [None, False]: + return msg + + if isinstance(on, int): + index = on + + else: + try: + index = command.index(on) + 1 + except ValueError: + # if the flag just doesn't exist then it doesn't matter just return + # the base msg + return msg + + try: + command[index] = '*' * len(command[index]) + except IndexError: # the index was completely out of range + return msg + + return "Running command: %s" % ' '.join(command) + + +def run(command, **kw): + """ + A real-time-logging implementation of a remote subprocess.Popen call where + a command is just executed on the remote end and no other handling is done. + + :param command: The command to pass in to the remote subprocess.Popen as a list + :param stop_on_error: If a nonzero exit status is return, it raises a ``RuntimeError`` + :param fail_msg: If a nonzero exit status is returned this message will be included in the log + """ + executable = which(command.pop(0)) + command.insert(0, executable) + stop_on_error = kw.pop('stop_on_error', True) + command_msg = obfuscate(command, kw.pop('obfuscate', None)) + fail_msg = kw.pop('fail_msg', None) + logger.info(command_msg) + terminal.write(command_msg) + terminal_logging = kw.pop('terminal_logging', True) + + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + **kw + ) + + while True: + reads, _, _ = select( + [process.stdout.fileno(), process.stderr.fileno()], + [], [] + ) + log_descriptors(reads, process, terminal_logging) + + if process.poll() is not None: + # ensure we do not have anything pending in stdout or stderr + log_descriptors(reads, process, terminal_logging) + + break + + returncode = process.wait() + if returncode != 0: + msg = "command returned non-zero exit status: %s" % returncode + if fail_msg: + logger.warning(fail_msg) + if terminal_logging: + terminal.warning(fail_msg) + if stop_on_error: + raise RuntimeError(msg) + else: + if terminal_logging: + terminal.warning(msg) + logger.warning(msg) + + +def call(command, **kw): + """ + Similar to ``subprocess.Popen`` with the following changes: + + * returns stdout, stderr, and exit code (vs. just the exit code) + * logs the full contents of stderr and stdout (separately) to the file log + + By default, no terminal output is given, not even the command that is going + to run. + + Useful when system calls are needed to act on output, and that same output + shouldn't get displayed on the terminal. + + Optionally, the command can be displayed on the terminal and the log file, + and log file output can be turned off. This is useful to prevent sensitive + output going to stderr/stdout and being captured on a log file. + + :param terminal_verbose: Log command output to terminal, defaults to False, and + it is forcefully set to True if a return code is non-zero + :param logfile_verbose: Log stderr/stdout output to log file. Defaults to True + :param verbose_on_failure: On a non-zero exit status, it will forcefully set logging ON for + the terminal. Defaults to True + """ + executable = which(command.pop(0)) + command.insert(0, executable) + terminal_verbose = kw.pop('terminal_verbose', False) + logfile_verbose = kw.pop('logfile_verbose', True) + verbose_on_failure = kw.pop('verbose_on_failure', True) + show_command = kw.pop('show_command', False) + command_msg = "Running command: %s" % ' '.join(command) + stdin = kw.pop('stdin', None) + logger.info(command_msg) + if show_command: + terminal.write(command_msg) + + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + close_fds=True, + **kw + ) + + if stdin: + stdout_stream, stderr_stream = process.communicate(as_bytes(stdin)) + else: + stdout_stream = process.stdout.read() + stderr_stream = process.stderr.read() + returncode = process.wait() + if not isinstance(stdout_stream, str): + stdout_stream = stdout_stream.decode('utf-8') + if not isinstance(stderr_stream, str): + stderr_stream = stderr_stream.decode('utf-8') + stdout = stdout_stream.splitlines() + stderr = stderr_stream.splitlines() + + if returncode != 0: + # set to true so that we can log the stderr/stdout that callers would + # do anyway as long as verbose_on_failure is set (defaults to True) + if verbose_on_failure: + terminal_verbose = True + # logfiles aren't disruptive visually, unlike the terminal, so this + # should always be on when there is a failure + logfile_verbose = True + + # the following can get a messed up order in the log if the system call + # returns output with both stderr and stdout intermingled. This separates + # that. + for line in stdout: + log_output('stdout', line, terminal_verbose, logfile_verbose) + for line in stderr: + log_output('stderr', line, terminal_verbose, logfile_verbose) + return stdout, stderr, returncode diff --git a/src/ceph-volume/ceph_volume/systemd/__init__.py b/src/ceph-volume/ceph_volume/systemd/__init__.py new file mode 100644 index 00000000..493b8814 --- /dev/null +++ b/src/ceph-volume/ceph_volume/systemd/__init__.py @@ -0,0 +1 @@ +from .main import main # noqa diff --git a/src/ceph-volume/ceph_volume/systemd/main.py b/src/ceph-volume/ceph_volume/systemd/main.py new file mode 100644 index 00000000..2cb1d1b8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/systemd/main.py @@ -0,0 +1,108 @@ +""" +This file is used only by systemd units that are passing their instance suffix +as arguments to this script so that it can parse the suffix into arguments that +``ceph-volume <sub command>`` can consume +""" +import os +import sys +import time +import logging +from ceph_volume import log, process +from ceph_volume.exceptions import SuffixParsingError + + +def parse_subcommand(string): + subcommand = string.split('-', 1)[0] + if not subcommand: + raise SuffixParsingError('subcommand', string) + return subcommand + + +def parse_extra_data(string): + # get the subcommand to split on that + sub_command = parse_subcommand(string) + + # the split will leave data with a dash, so remove that + data = string.split(sub_command)[-1] + if not data: + raise SuffixParsingError('data', string) + return data.lstrip('-') + + +def parse_osd_id(string): + osd_id = string.split('-', 1)[0] + if not osd_id: + raise SuffixParsingError('OSD id', string) + if osd_id.isdigit(): + return osd_id + raise SuffixParsingError('OSD id', string) + + +def parse_osd_uuid(string): + osd_id = '%s-' % parse_osd_id(string) + osd_subcommand = '-%s' % parse_subcommand(string) + # remove the id first + trimmed_suffix = string.split(osd_id)[-1] + # now remove the sub command + osd_uuid = trimmed_suffix.split(osd_subcommand)[0] + if not osd_uuid: + raise SuffixParsingError('OSD uuid', string) + return osd_uuid + + +def main(args=None): + """ + Main entry point for the ``ceph-volume-systemd`` executable. ``args`` are + optional for easier testing of arguments. + + Expected input is similar to:: + + ['/path/to/ceph-volume-systemd', '<type>-<extra metadata>'] + + For example:: + + [ + '/usr/bin/ceph-volume-systemd', + 'lvm-0-8715BEB4-15C5-49DE-BA6F-401086EC7B41' + ] + + The first part of the argument is the only interesting bit, which contains + the metadata needed to proxy the call to ``ceph-volume`` itself. + + Reusing the example, the proxy call to ``ceph-volume`` would look like:: + + ceph-volume lvm trigger 0-8715BEB4-15C5-49DE-BA6F-401086EC7B41 + + That means that ``lvm`` is used as the subcommand and it is **expected** + that a ``trigger`` sub-commmand will be present to make sense of the extra + piece of the string. + + """ + log.setup(name='ceph-volume-systemd.log', log_path='/var/log/ceph/ceph-volume-systemd.log') + logger = logging.getLogger('systemd') + + args = args if args is not None else sys.argv + try: + suffix = args[-1] + except IndexError: + raise RuntimeError('no arguments supplied') + sub_command = parse_subcommand(suffix) + extra_data = parse_extra_data(suffix) + logger.info('raw systemd input received: %s', suffix) + logger.info('parsed sub-command: %s, extra data: %s', sub_command, extra_data) + command = ['ceph-volume', sub_command, 'trigger', extra_data] + + tries = int(os.environ.get('CEPH_VOLUME_SYSTEMD_TRIES', 30)) + interval = int(os.environ.get('CEPH_VOLUME_SYSTEMD_INTERVAL', 5)) + while tries > 0: + try: + # don't log any output to the terminal, just rely on stderr/stdout + # going to logging + process.run(command, terminal_logging=False) + logger.info('successfully triggered activation for: %s', extra_data) + break + except RuntimeError as error: + logger.warning(error) + logger.warning('failed activating OSD, retries left: %s', tries) + tries -= 1 + time.sleep(interval) diff --git a/src/ceph-volume/ceph_volume/systemd/systemctl.py b/src/ceph-volume/ceph_volume/systemd/systemctl.py new file mode 100644 index 00000000..778ad147 --- /dev/null +++ b/src/ceph-volume/ceph_volume/systemd/systemctl.py @@ -0,0 +1,101 @@ +""" +Utilities to control systemd units +""" +import logging + +from ceph_volume import process + +logger = logging.getLogger(__name__) + +def start(unit): + process.run(['systemctl', 'start', unit]) + + +def stop(unit): + process.run(['systemctl', 'stop', unit]) + + +def enable(unit, runtime=False): + if runtime: + process.run(['systemctl', 'enable', '--runtime', unit]) + else: + process.run(['systemctl', 'enable', unit]) + + +def disable(unit): + process.run(['systemctl', 'disable', unit]) + + +def mask(unit): + process.run(['systemctl', 'mask', unit]) + + +def is_active(unit): + out, err, rc = process.call( + ['systemctl', 'is-active', unit], + verbose_on_failure=False + ) + return rc == 0 + +def get_running_osd_ids(): + out, err, rc = process.call([ + 'systemctl', + 'show', + '--no-pager', + '--property=Id', + '--state=running', + 'ceph-osd@*', + ]) + osd_ids = [] + if rc == 0: + for line in out: + if line: + # example line looks like: Id=ceph-osd@1.service + try: + osd_id = line.split("@")[1].split(".service")[0] + osd_ids.append(osd_id) + except (IndexError, TypeError): + logger.warning("Failed to parse output from systemctl: %s", line) + return osd_ids + +def start_osd(id_): + return start(osd_unit % id_) + + +def stop_osd(id_): + return stop(osd_unit % id_) + + +def enable_osd(id_): + return enable(osd_unit % id_, runtime=True) + + +def disable_osd(id_): + return disable(osd_unit % id_) + + +def osd_is_active(id_): + return is_active(osd_unit % id_) + + +def enable_volume(id_, fsid, device_type='lvm'): + return enable(volume_unit % (device_type, id_, fsid)) + + +def mask_ceph_disk(): + # systemctl allows using a glob like '*' for masking, but there was a bug + # in that it wouldn't allow this for service templates. This means that + # masking ceph-disk@* will not work, so we must link the service directly. + # /etc/systemd takes precedence regardless of the location of the unit + process.run( + ['ln', '-sf', '/dev/null', '/etc/systemd/system/ceph-disk@.service'] + ) + + +# +# templates +# + +osd_unit = "ceph-osd@%s" +ceph_disk_unit = "ceph-disk@%s" +volume_unit = "ceph-volume@%s-%s-%s" diff --git a/src/ceph-volume/ceph_volume/terminal.py b/src/ceph-volume/ceph_volume/terminal.py new file mode 100644 index 00000000..a34946f9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/terminal.py @@ -0,0 +1,214 @@ +import logging +import sys + + +terminal_logger = logging.getLogger('terminal') + + +class colorize(str): + """ + Pretty simple to use:: + + colorize.make('foo').bold + colorize.make('foo').green + colorize.make('foo').yellow + colorize.make('foo').red + colorize.make('foo').blue + + Otherwise you could go the long way (for example if you are + testing this class):: + + string = colorize('foo') + string._set_attributes() + string.red + + """ + + def __init__(self, string): + self.appends = '' + self.prepends = '' + self.isatty = sys.__stderr__.isatty() + + def _set_attributes(self): + """ + Sets the attributes here because the str class does not + allow to pass in anything other than a string to the constructor + so we can't really mess with the other attributes. + """ + for k, v in self.__colors__.items(): + setattr(self, k, self.make_color(v)) + + def make_color(self, color): + if not self.isatty: + return self + return color + self + '\033[0m' + self.appends + + @property + def __colors__(self): + return dict( + blue='\033[34m', + green='\033[92m', + yellow='\033[33m', + red='\033[91m', + bold='\033[1m', + ends='\033[0m' + ) + + @classmethod + def make(cls, string): + """ + A helper method to return itself and workaround the fact that + the str object doesn't allow extra arguments passed in to the + constructor + """ + obj = cls(string) + obj._set_attributes() + return obj + +# +# Common string manipulations +# +yellow = lambda x: colorize.make(x).yellow # noqa +blue = lambda x: colorize.make(x).blue # noqa +green = lambda x: colorize.make(x).green # noqa +red = lambda x: colorize.make(x).red # noqa +bold = lambda x: colorize.make(x).bold # noqa +red_arrow = red('--> ') +blue_arrow = blue('--> ') +green_arrow = green('--> ') +yellow_arrow = yellow('--> ') + + +class _Write(object): + + def __init__(self, _writer=None, prefix='', suffix='', flush=False): + # we can't set sys.stderr as the default for _writer. otherwise + # pytest's capturing gets confused + self._writer = _writer or sys.stderr + self.suffix = suffix + self.prefix = prefix + self.flush = flush + + def bold(self, string): + self.write(bold(string)) + + def raw(self, string): + if not string.endswith('\n'): + string = '%s\n' % string + self.write(string) + + def write(self, line): + entry = self.prefix + line + self.suffix + + try: + self._writer.write(entry) + if self.flush: + self._writer.flush() + except (UnicodeDecodeError, UnicodeEncodeError): + try: + terminal_logger.info(entry.strip('\n')) + except (AttributeError, TypeError): + terminal_logger.info(entry) + + +def stdout(msg): + return _Write(prefix=blue(' stdout: ')).raw(msg) + + +def stderr(msg): + return _Write(prefix=yellow(' stderr: ')).raw(msg) + + +def write(msg): + return _Write().raw(msg) + + +def error(msg): + return _Write(prefix=red_arrow).raw(msg) + + +def info(msg): + return _Write(prefix=blue_arrow).raw(msg) + + +def debug(msg): + return _Write(prefix=blue_arrow).raw(msg) + + +def warning(msg): + return _Write(prefix=yellow_arrow).raw(msg) + + +def success(msg): + return _Write(prefix=green_arrow).raw(msg) + + +class MultiLogger(object): + """ + Proxy class to be able to report on both logger instances and terminal + messages avoiding the issue of having to call them both separately + + Initialize it in the same way a logger object:: + + logger = terminal.MultiLogger(__name__) + """ + + def __init__(self, name): + self.logger = logging.getLogger(name) + + def _make_record(self, msg, *args): + if len(str(args)): + try: + return msg % args + except TypeError: + self.logger.exception('unable to produce log record: %s' % msg) + return msg + + def warning(self, msg, *args): + record = self._make_record(msg, *args) + warning(record) + self.logger.warning(record) + + def debug(self, msg, *args): + record = self._make_record(msg, *args) + debug(record) + self.logger.debug(record) + + def info(self, msg, *args): + record = self._make_record(msg, *args) + info(record) + self.logger.info(record) + + def error(self, msg, *args): + record = self._make_record(msg, *args) + error(record) + self.logger.error(record) + + +def dispatch(mapper, argv=None): + argv = argv or sys.argv + for count, arg in enumerate(argv, 1): + if arg in mapper.keys(): + instance = mapper.get(arg)(argv[count:]) + if hasattr(instance, 'main'): + instance.main() + raise SystemExit(0) + + +def subhelp(mapper): + """ + Look at every value of every key in the mapper and will output any + ``class.help`` possible to return it as a string that will be sent to + stderr. + """ + help_text_lines = [] + for key, value in mapper.items(): + try: + help_text = value.help + except AttributeError: + continue + help_text_lines.append("%-24s %s" % (key, help_text)) + + if help_text_lines: + return "Available subcommands:\n\n%s" % '\n'.join(help_text_lines) + return '' diff --git a/src/ceph-volume/ceph_volume/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/__init__.py diff --git a/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py new file mode 100644 index 00000000..f01ceb4f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py @@ -0,0 +1,870 @@ +import os +import pytest +from mock.mock import patch +from ceph_volume import process, exceptions +from ceph_volume.api import lvm as api + + +class TestParseTags(object): + + def test_no_tags_means_empty_dict(self): + result = api.parse_tags('') + assert result == {} + + def test_single_tag_gets_parsed(self): + result = api.parse_tags('ceph.osd_something=1') + assert result == {'ceph.osd_something': '1'} + + def test_non_ceph_tags_are_skipped(self): + result = api.parse_tags('foo') + assert result == {} + + def test_mixed_non_ceph_tags(self): + result = api.parse_tags('foo,ceph.bar=1') + assert result == {'ceph.bar': '1'} + + def test_multiple_csv_expands_in_dict(self): + result = api.parse_tags('ceph.osd_something=1,ceph.foo=2,ceph.fsid=0000') + # assert them piecemeal to avoid the un-ordered dict nature + assert result['ceph.osd_something'] == '1' + assert result['ceph.foo'] == '2' + assert result['ceph.fsid'] == '0000' + + +class TestVolume(object): + + def test_is_ceph_device(self): + lv_tags = "ceph.type=data,ceph.osd_id=0" + osd = api.Volume(lv_name='osd/volume', lv_tags=lv_tags) + assert api.is_ceph_device(osd) + + @pytest.mark.parametrize('dev',[ + '/dev/sdb', + api.VolumeGroup(vg_name='foo'), + api.Volume(lv_name='vg/no_osd', lv_tags='', lv_path='lv/path'), + api.Volume(lv_name='vg/no_osd', lv_tags='ceph.osd_id=null', lv_path='lv/path'), + None, + ]) + def test_is_not_ceph_device(self, dev): + assert not api.is_ceph_device(dev) + + def test_no_empty_lv_name(self): + with pytest.raises(ValueError): + api.Volume(lv_name='', lv_tags='') + + +class TestVolumeGroup(object): + + def test_volume_group_no_empty_name(self): + with pytest.raises(ValueError): + api.VolumeGroup(vg_name='') + + +class TestVolumeGroupFree(object): + + def test_integer_gets_produced(self): + vg = api.VolumeGroup(vg_name='nosize', vg_free_count=100, vg_extent_size=4194304) + assert vg.free == 100 * 4194304 + + +class TestCreateLVs(object): + + def setup(self): + self.vg = api.VolumeGroup(vg_name='ceph', + vg_extent_size=1073741824, + vg_extent_count=99999999, + vg_free_count=999) + + def test_creates_correct_lv_number_from_parts(self, monkeypatch): + monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw)) + lvs = api.create_lvs(self.vg, parts=4) + assert len(lvs) == 4 + + def test_suffixes_the_size_arg(self, monkeypatch): + monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw)) + lvs = api.create_lvs(self.vg, parts=4) + assert lvs[0][1]['extents'] == 249 + + def test_only_uses_free_size(self, monkeypatch): + monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw)) + vg = api.VolumeGroup(vg_name='ceph', + vg_extent_size=1073741824, + vg_extent_count=99999999, + vg_free_count=1000) + lvs = api.create_lvs(vg, parts=4) + assert lvs[0][1]['extents'] == 250 + + def test_null_tags_are_set_by_default(self, monkeypatch): + monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw)) + kwargs = api.create_lvs(self.vg, parts=4)[0][1] + assert list(kwargs['tags'].values()) == ['null', 'null', 'null', 'null'] + + def test_fallback_to_one_part(self, monkeypatch): + monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw)) + lvs = api.create_lvs(self.vg) + assert len(lvs) == 1 + + +class TestVolumeGroupSizing(object): + + def setup(self): + self.vg = api.VolumeGroup(vg_name='ceph', + vg_extent_size=1073741824, + vg_free_count=1024) + + def test_parts_and_size_errors(self): + with pytest.raises(ValueError) as error: + self.vg.sizing(parts=4, size=10) + assert "Cannot process sizing" in str(error.value) + + def test_zero_parts_produces_100_percent(self): + result = self.vg.sizing(parts=0) + assert result['percentages'] == 100 + + def test_two_parts_produces_50_percent(self): + result = self.vg.sizing(parts=2) + assert result['percentages'] == 50 + + def test_two_parts_produces_half_size(self): + result = self.vg.sizing(parts=2) + assert result['sizes'] == 512 + + def test_half_size_produces_round_sizes(self): + result = self.vg.sizing(size=512) + assert result['sizes'] == 512 + assert result['percentages'] == 50 + assert result['parts'] == 2 + + def test_bit_more_than_half_size_allocates_full_size(self): + # 513 can't allocate more than 1, so it just fallsback to using the + # whole device + result = self.vg.sizing(size=513) + assert result['sizes'] == 1024 + assert result['percentages'] == 100 + assert result['parts'] == 1 + + def test_extents_are_halfed_rounded_down(self): + result = self.vg.sizing(size=512) + assert result['extents'] == 512 + + def test_bit_less_size_rounds_down(self): + result = self.vg.sizing(size=129) + assert result['sizes'] == 146 + assert result['percentages'] == 14 + assert result['parts'] == 7 + + def test_unable_to_allocate_past_free_size(self): + with pytest.raises(exceptions.SizeAllocationError): + self.vg.sizing(size=2048) + + +class TestRemoveLV(object): + + def test_removes_lv(self, monkeypatch): + def mock_call(cmd, **kw): + return ('', '', 0) + monkeypatch.setattr(process, 'call', mock_call) + assert api.remove_lv("vg/lv") + + def test_removes_lv_object(self, fake_call): + foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') + api.remove_lv(foo_volume) + # last argument from the list passed to process.call + assert fake_call.calls[0]['args'][0][-1] == '/path' + + def test_fails_to_remove_lv(self, monkeypatch): + def mock_call(cmd, **kw): + return ('', '', 1) + monkeypatch.setattr(process, 'call', mock_call) + with pytest.raises(RuntimeError): + api.remove_lv("vg/lv") + + +class TestCreateLV(object): + + def setup(self): + self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') + self.foo_group = api.VolumeGroup(vg_name='foo_group', + vg_extent_size="4194304", + vg_extent_count="100", + vg_free_count="100") + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_size(self, m_get_first_lv, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + api.create_lv('foo', 0, vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', '100', '-n', 'foo-0', 'foo_group'] + m_run.assert_called_with(expected) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_size_adjust_if_1percent_over(self, m_get_first_lv, m_call, m_run, monkeypatch): + foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='') + foo_group = api.VolumeGroup(vg_name='foo_group', + vg_extent_size="4194304", + vg_extent_count="1000", + vg_free_count="1000") + m_get_first_lv.return_value = foo_volume + # 423624704 should be just under 1% off of the available size 419430400 + api.create_lv('foo', 0, vg=foo_group, size=4232052736, tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-0', 'foo_group'] + m_run.assert_called_with(expected) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_size_too_large(self, m_get_first_lv, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + with pytest.raises(RuntimeError): + api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'}) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_extents(self, m_get_first_lv, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + api.create_lv('foo', 0, vg=self.foo_group, extents='50', tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group'] + m_run.assert_called_with(expected) + + @pytest.mark.parametrize("test_input,expected", + [(2, 50), + (3, 33),]) + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_slots(self, m_get_first_lv, m_call, m_run, monkeypatch, test_input, expected): + m_get_first_lv.return_value = self.foo_volume + api.create_lv('foo', 0, vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-0', 'foo_group'] + m_run.assert_called_with(expected) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_uses_all(self, m_get_first_lv, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'}) + expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group'] + m_run.assert_called_with(expected) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.Volume.set_tags') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_calls_to_set_tags_default(self, m_get_first_lv, m_set_tags, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + api.create_lv('foo', 0, vg=self.foo_group) + tags = { + "ceph.osd_id": "null", + "ceph.type": "null", + "ceph.cluster_fsid": "null", + "ceph.osd_fsid": "null", + } + m_set_tags.assert_called_with(tags) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.Volume.set_tags') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_calls_to_set_tags_arg(self, m_get_first_lv, m_set_tags, m_call, m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'}) + tags = { + "ceph.type": "data", + "ceph.data_device": "/path" + } + m_set_tags.assert_called_with(tags) + + @patch('ceph_volume.api.lvm.process.run') + @patch('ceph_volume.api.lvm.process.call') + @patch('ceph_volume.api.lvm.get_device_vgs') + @patch('ceph_volume.api.lvm.create_vg') + @patch('ceph_volume.api.lvm.get_first_lv') + def test_create_vg(self, m_get_first_lv, m_create_vg, m_get_device_vgs, m_call, + m_run, monkeypatch): + m_get_first_lv.return_value = self.foo_volume + m_get_device_vgs.return_value = [] + api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'}) + m_create_vg.assert_called_with('dev/foo', name_prefix='ceph') + + +class TestTags(object): + + def setup(self): + self.foo_volume_clean = api.Volume(lv_name='foo_clean', lv_path='/pathclean', + vg_name='foo_group', + lv_tags='') + self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', + vg_name='foo_group', + lv_tags='ceph.foo0=bar0,ceph.foo1=bar1,ceph.foo2=bar2') + + def test_set_tag(self, monkeypatch, capture): + monkeypatch.setattr(process, 'run', capture) + monkeypatch.setattr(process, 'call', capture) + self.foo_volume_clean.set_tag('foo', 'bar') + expected = ['lvchange', '--addtag', 'foo=bar', '/pathclean'] + assert capture.calls[0]['args'][0] == expected + assert self.foo_volume_clean.tags == {'foo': 'bar'} + + def test_set_clear_tag(self, monkeypatch, capture): + monkeypatch.setattr(process, 'run', capture) + monkeypatch.setattr(process, 'call', capture) + self.foo_volume_clean.set_tag('foo', 'bar') + assert self.foo_volume_clean.tags == {'foo': 'bar'} + self.foo_volume_clean.clear_tag('foo') + expected = ['lvchange', '--deltag', 'foo=bar', '/pathclean'] + assert self.foo_volume_clean.tags == {} + assert capture.calls[1]['args'][0] == expected + + def test_set_tags(self, monkeypatch, capture): + monkeypatch.setattr(process, 'run', capture) + monkeypatch.setattr(process, 'call', capture) + tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'} + assert self.foo_volume.tags == tags + + tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'baz1', 'ceph.foo2': 'baz2'} + self.foo_volume.set_tags(tags) + assert self.foo_volume.tags == tags + + self.foo_volume.set_tag('ceph.foo1', 'other1') + tags['ceph.foo1'] = 'other1' + assert self.foo_volume.tags == tags + + expected = [ + sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag', + 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', '/path']), + sorted(['lvchange', '--deltag', 'ceph.foo1=baz1', '/path']), + sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag', + 'ceph.foo1=baz1', '--addtag', 'ceph.foo2=baz2', '/path']), + sorted(['lvchange', '--addtag', 'ceph.foo1=other1', '/path']), + ] + # The order isn't guaranted + for call in capture.calls: + assert sorted(call['args'][0]) in expected + assert len(capture.calls) == len(expected) + + def test_clear_tags(self, monkeypatch, capture): + monkeypatch.setattr(process, 'run', capture) + monkeypatch.setattr(process, 'call', capture) + tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'} + + self.foo_volume_clean.set_tags(tags) + assert self.foo_volume_clean.tags == tags + self.foo_volume_clean.clear_tags() + assert self.foo_volume_clean.tags == {} + + expected = [ + sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag', + 'ceph.foo1=bar1', '--addtag', 'ceph.foo2=bar2', + '/pathclean']), + sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag', + 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', + '/pathclean']), + ] + # The order isn't guaranted + for call in capture.calls: + assert sorted(call['args'][0]) in expected + assert len(capture.calls) == len(expected) + + +class TestExtendVG(object): + + def setup(self): + self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='') + + def test_uses_single_device_in_list(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.extend_vg(self.foo_volume, ['/dev/sda']) + expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda'] + assert fake_run.calls[0]['args'][0] == expected + + def test_uses_single_device(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.extend_vg(self.foo_volume, '/dev/sda') + expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda'] + assert fake_run.calls[0]['args'][0] == expected + + def test_uses_multiple_devices(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb']) + expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb'] + assert fake_run.calls[0]['args'][0] == expected + + +class TestReduceVG(object): + + def setup(self): + self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='') + + def test_uses_single_device_in_list(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.reduce_vg(self.foo_volume, ['/dev/sda']) + expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda'] + assert fake_run.calls[0]['args'][0] == expected + + def test_uses_single_device(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.reduce_vg(self.foo_volume, '/dev/sda') + expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda'] + assert fake_run.calls[0]['args'][0] == expected + + def test_uses_multiple_devices(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb']) + expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb'] + assert fake_run.calls[0]['args'][0] == expected + + +class TestCreateVG(object): + + def setup(self): + self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='') + + def test_no_name(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.create_vg('/dev/sda') + result = fake_run.calls[0]['args'][0] + assert '/dev/sda' in result + assert result[-2].startswith('ceph-') + + def test_devices_list(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph') + result = fake_run.calls[0]['args'][0] + expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb'] + assert result == expected + + def test_name_prefix(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.create_vg('/dev/sda', name_prefix='master') + result = fake_run.calls[0]['args'][0] + assert '/dev/sda' in result + assert result[-2].startswith('master-') + + def test_specific_name(self, monkeypatch, fake_run): + monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True) + api.create_vg('/dev/sda', name='master') + result = fake_run.calls[0]['args'][0] + assert '/dev/sda' in result + assert result[-2] == 'master' + +# +# The following tests are pretty gnarly. VDO detection is very convoluted and +# involves correlating information from device mappers, realpaths, slaves of +# those mappers, and parents or related mappers. This makes it very hard to +# patch nicely or keep tests short and readable. These tests are trying to +# ensure correctness, the better approach will be to do some functional testing +# with VDO. +# + + +@pytest.fixture +def disable_kvdo_path(monkeypatch): + monkeypatch.setattr('os.path.isdir', lambda x, **kw: False) + + +@pytest.fixture +def enable_kvdo_path(monkeypatch): + monkeypatch.setattr('os.path.isdir', lambda x, **kw: True) + + +# Stub for os.listdir + + +class ListDir(object): + + def __init__(self, paths): + self.paths = paths + self._normalize_paths() + self.listdir = os.listdir + + def _normalize_paths(self): + for k, v in self.paths.items(): + self.paths[k.rstrip('/')] = v.rstrip('/') + + def add(self, original, fake): + self.paths[original.rstrip('/')] = fake.rstrip('/') + + def __call__(self, path): + return self.listdir(self.paths[path.rstrip('/')]) + + +@pytest.fixture(scope='function') +def listdir(monkeypatch): + def apply(paths=None, stub=None): + if not stub: + stub = ListDir(paths) + if paths: + for original, fake in paths.items(): + stub.add(original, fake) + + monkeypatch.setattr('os.listdir', stub) + return apply + + +@pytest.fixture(scope='function') +def makedirs(tmpdir): + def create(directory): + path = os.path.join(str(tmpdir), directory) + os.makedirs(path) + return path + create.base = str(tmpdir) + return create + + +class TestIsVdo(object): + + def test_no_vdo_dir(self, disable_kvdo_path): + assert api._is_vdo('/path') is False + + def test_exceptions_return_false(self, monkeypatch): + def throw(): + raise Exception() + monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', throw) + assert api.is_vdo('/path') == '0' + + def test_is_vdo_returns_a_string(self, monkeypatch): + monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True) + assert api.is_vdo('/path') == '1' + + def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch): + kvdo_path = makedirs('sys/kvdo') + listdir(paths={'/sys/kvdo': kvdo_path}) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: []) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: []) + assert api._is_vdo('/dev/mapper/vdo0') is False + + def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch): + kvdo_path = makedirs('sys/kvdo') + listdir(paths={'/sys/kvdo': kvdo_path}) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3']) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: []) + assert api._is_vdo('/dev/dm-3') is True + + def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch): + kvdo_path = makedirs('sys/kvdo') + listdir(paths={'/sys/kvdo': kvdo_path}) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: []) + monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4']) + assert api._is_vdo('/dev/dm-4') is True + + +class TestVdoSlaves(object): + + def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch): + slaves_path = makedirs('sys/block/vdo0/slaves') + listdir(paths={'/sys/block/vdo0/slaves': slaves_path}) + monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True) + result = sorted(api._vdo_slaves(['vdo0'])) + assert '/dev/mapper/vdo0' in result + assert 'vdo0' in result + + def test_slaves_are_found(self, makedirs, listdir, monkeypatch): + slaves_path = makedirs('sys/block/vdo0/slaves') + makedirs('sys/block/vdo0/slaves/dm-4') + makedirs('dev/mapper/vdo0') + listdir(paths={'/sys/block/vdo0/slaves': slaves_path}) + monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True) + result = sorted(api._vdo_slaves(['vdo0'])) + assert '/dev/dm-4' in result + assert 'dm-4' in result + + +class TestVDOParents(object): + + def test_parents_are_found(self, makedirs, listdir): + block_path = makedirs('sys/block') + slaves_path = makedirs('sys/block/dm-4/slaves') + makedirs('sys/block/dm-4/slaves/dm-3') + listdir(paths={ + '/sys/block/dm-4/slaves': slaves_path, + '/sys/block': block_path}) + result = api._vdo_parents(['dm-3']) + assert '/dev/dm-4' in result + assert 'dm-4' in result + + def test_parents_are_not_found(self, makedirs, listdir): + block_path = makedirs('sys/block') + slaves_path = makedirs('sys/block/dm-4/slaves') + makedirs('sys/block/dm-4/slaves/dm-5') + listdir(paths={ + '/sys/block/dm-4/slaves': slaves_path, + '/sys/block': block_path}) + result = api._vdo_parents(['dm-3']) + assert result == [] + + +class TestSplitNameParser(object): + + def test_keys_are_parsed_without_prefix(self): + line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"] + result = api._splitname_parser(line) + assert result['VG_NAME'] == 'vg' + assert result['LV_NAME'] == 'lv' + assert result['LV_LAYER'] == '' + + def test_vg_name_sans_mapper(self): + line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"] + result = api._splitname_parser(line) + assert '/dev/mapper' not in result['VG_NAME'] + + +class TestGetDeviceVgs(object): + + @patch('ceph_volume.process.call') + @patch('ceph_volume.api.lvm._output_parser') + def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall): + patched_output_parser.return_value = [{'vg_name': ''}] + pcall.return_value = ('', '', '') + vgs = api.get_device_vgs('/dev/foo') + assert vgs == [] + +class TestGetDeviceLvs(object): + + @patch('ceph_volume.process.call') + @patch('ceph_volume.api.lvm._output_parser') + def test_get_device_lvs_with_empty_vg(self, patched_output_parser, pcall): + patched_output_parser.return_value = [{'lv_name': ''}] + pcall.return_value = ('', '', '') + vgs = api.get_device_lvs('/dev/foo') + assert vgs == [] + + +# NOTE: api.convert_filters_to_str() and api.convert_tags_to_str() should get +# tested automatically while testing api.make_filters_lvmcmd_ready() +class TestMakeFiltersLVMCMDReady(object): + + def test_with_no_filters_and_no_tags(self): + retval = api.make_filters_lvmcmd_ready(None, None) + + assert isinstance(retval, str) + assert retval == '' + + def test_with_filters_and_no_tags(self): + filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'} + + retval = api.make_filters_lvmcmd_ready(filters, None) + + assert isinstance(retval, str) + for k, v in filters.items(): + assert k in retval + assert v in retval + + def test_with_no_filters_and_with_tags(self): + tags = {'ceph.type': 'data', 'ceph.osd_id': '0'} + + retval = api.make_filters_lvmcmd_ready(None, tags) + + assert isinstance(retval, str) + assert 'tags' in retval + for k, v in tags.items(): + assert k in retval + assert v in retval + assert retval.find('tags') < retval.find(k) < retval.find(v) + + def test_with_filters_and_tags(self): + filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'} + tags = {'ceph.type': 'data', 'ceph.osd_id': '0'} + + retval = api.make_filters_lvmcmd_ready(filters, tags) + + assert isinstance(retval, str) + for f, t in zip(filters.items(), tags.items()): + assert f[0] in retval + assert f[1] in retval + assert t[0] in retval + assert t[1] in retval + assert retval.find(f[0]) < retval.find(f[1]) < \ + retval.find('tags') < retval.find(t[0]) < retval.find(t[1]) + + +class TestGetPVs(object): + + def test_get_pvs(self, monkeypatch): + pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, + vg_name='vg1') + pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={}, + vg_name='vg2') + pvs = [pv1, pv2] + stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name), + '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + pvs_ = api.get_pvs() + assert len(pvs_) == len(pvs) + for pv, pv_ in zip(pvs, pvs_): + assert pv_.pv_name == pv.pv_name + + def test_get_pvs_single_pv(self, monkeypatch): + pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, + vg_name='vg1') + pvs = [pv1] + stdout = ['{};;;;;;'.format(pv1.pv_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + pvs_ = api.get_pvs() + assert len(pvs_) == 1 + assert pvs_[0].pv_name == pvs[0].pv_name + + def test_get_pvs_empty(self, monkeypatch): + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) + assert api.get_pvs() == [] + + +class TestGetVGs(object): + + def test_get_vgs(self, monkeypatch): + vg1 = api.VolumeGroup(vg_name='vg1') + vg2 = api.VolumeGroup(vg_name='vg2') + vgs = [vg1, vg2] + stdout = ['{};;;;;;'.format(vg1.vg_name), + '{};;;;;;'.format(vg2.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + vgs_ = api.get_vgs() + assert len(vgs_) == len(vgs) + for vg, vg_ in zip(vgs, vgs_): + assert vg_.vg_name == vg.vg_name + + def test_get_vgs_single_vg(self, monkeypatch): + vg1 = api.VolumeGroup(vg_name='vg'); vgs = [vg1] + stdout = ['{};;;;;;'.format(vg1.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + vgs_ = api.get_vgs() + assert len(vgs_) == 1 + assert vgs_[0].vg_name == vgs[0].vg_name + + def test_get_vgs_empty(self, monkeypatch): + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) + assert api.get_vgs() == [] + + +class TestGetLVs(object): + + def test_get_lvs(self, monkeypatch): + lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1', + lv_name='lv1', vg_name='vg1') + lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2', + lv_name='lv2', vg_name='vg2') + lvs = [lv1, lv2] + stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name, + lv1.vg_name), + '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name, + lv2.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + lvs_ = api.get_lvs() + assert len(lvs_) == len(lvs) + for lv, lv_ in zip(lvs, lvs_): + assert lv.__dict__ == lv_.__dict__ + + def test_get_lvs_single_lv(self, monkeypatch): + stdout = ['ceph.type=data;/dev/vg/lv;lv;vg'] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + lvs = [] + lvs.append((api.Volume(lv_tags='ceph.type=data', + lv_path='/dev/vg/lv', + lv_name='lv', vg_name='vg'))) + + lvs_ = api.get_lvs() + assert len(lvs_) == len(lvs) + assert lvs[0].__dict__ == lvs_[0].__dict__ + + def test_get_lvs_empty(self, monkeypatch): + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) + assert api.get_lvs() == [] + + +class TestGetFirstPV(object): + + def test_get_first_pv(self, monkeypatch): + pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, + vg_name='vg1') + pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={}, + vg_name='vg2') + stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name), + '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + pv_ = api.get_first_pv() + assert isinstance(pv_, api.PVolume) + assert pv_.pv_name == pv1.pv_name + + def test_get_first_pv_single_pv(self, monkeypatch): + pv = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={}, + vg_name='vg1') + stdout = ['{};;;;;;'.format(pv.pv_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + pv_ = api.get_first_pv() + assert isinstance(pv_, api.PVolume) + assert pv_.pv_name == pv.pv_name + + def test_get_first_pv_empty(self, monkeypatch): + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) + assert api.get_first_pv() == [] + + +class TestGetFirstVG(object): + + def test_get_first_vg(self, monkeypatch): + vg1 = api.VolumeGroup(vg_name='vg1') + vg2 = api.VolumeGroup(vg_name='vg2') + stdout = ['{};;;;;;'.format(vg1.vg_name), '{};;;;;;'.format(vg2.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + vg_ = api.get_first_vg() + assert isinstance(vg_, api.VolumeGroup) + assert vg_.vg_name == vg1.vg_name + + def test_get_first_vg_single_vg(self, monkeypatch): + vg = api.VolumeGroup(vg_name='vg') + stdout = ['{};;;;;;'.format(vg.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + vg_ = api.get_first_vg() + assert isinstance(vg_, api.VolumeGroup) + assert vg_.vg_name == vg.vg_name + + def test_get_first_vg_empty(self, monkeypatch): + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) + vg_ = api.get_first_vg() + assert vg_ == [] + + +class TestGetFirstLV(object): + + def test_get_first_lv(self, monkeypatch): + lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1', + lv_name='lv1', vg_name='vg1') + lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2', + lv_name='lv2', vg_name='vg2') + stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name, + lv1.vg_name), + '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name, + lv2.vg_name)] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + + lv_ = api.get_first_lv() + assert isinstance(lv_, api.Volume) + assert lv_.lv_name == lv1.lv_name + + def test_get_first_lv_single_lv(self, monkeypatch): + stdout = ['ceph.type=data;/dev/vg/lv;lv;vg'] + monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0)) + lv = api.Volume(lv_tags='ceph.type=data', + lv_path='/dev/vg/lv', + lv_name='lv', vg_name='vg') + + lv_ = api.get_first_lv() + assert isinstance(lv_, api.Volume) + assert lv_.lv_name == lv.lv_name + + def test_get_first_lv_empty(self, monkeypatch): + monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0)) + assert api.get_lvs() == [] diff --git a/src/ceph-volume/ceph_volume/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/conftest.py new file mode 100644 index 00000000..2abedac3 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/conftest.py @@ -0,0 +1,295 @@ +import os +import pytest +from mock.mock import patch, PropertyMock, create_autospec +from ceph_volume.api import lvm +from ceph_volume.util import disk +from ceph_volume.util import device +from ceph_volume.util.constants import ceph_disk_guids +from ceph_volume import conf, configuration + + +class Capture(object): + + def __init__(self, *a, **kw): + self.a = a + self.kw = kw + self.calls = [] + self.return_values = kw.get('return_values', False) + self.always_returns = kw.get('always_returns', False) + + def __call__(self, *a, **kw): + self.calls.append({'args': a, 'kwargs': kw}) + if self.always_returns: + return self.always_returns + if self.return_values: + return self.return_values.pop() + + +class Factory(object): + + def __init__(self, **kw): + for k, v in kw.items(): + setattr(self, k, v) + + +@pytest.fixture +def factory(): + return Factory + + +@pytest.fixture +def capture(): + return Capture() + +@pytest.fixture +def mock_lv_device_generator(): + def mock_lv(): + size = 21474836480 + dev = create_autospec(device.Device) + dev.lv_name = 'lv' + dev.vg_name = 'vg' + dev.path = '{}/{}'.format(dev.vg_name, dev.lv_name) + dev.used_by_ceph = False + dev.vg_size = [size] + dev.vg_free = dev.vg_size + dev.lvs = [lvm.Volume(vg_name=dev.vg_name, lv_name=dev.lv_name, lv_size=size, lv_tags='')] + return dev + return mock_lv + + +@pytest.fixture +def mock_devices_available(): + dev = create_autospec(device.Device) + dev.path = '/dev/foo' + dev.available_lvm = True + dev.vg_size = [21474836480] + dev.vg_free = dev.vg_size + return [dev] + +@pytest.fixture +def mock_device_generator(): + def mock_device(): + dev = create_autospec(device.Device) + dev.path = '/dev/foo' + dev.available_lvm = True + dev.vg_size = [21474836480] + dev.vg_free = dev.vg_size + dev.lvs = [] + return dev + return mock_device + + +@pytest.fixture(params=range(1,11)) +def osds_per_device(request): + return request.param + + +@pytest.fixture +def fake_run(monkeypatch): + fake_run = Capture() + monkeypatch.setattr('ceph_volume.process.run', fake_run) + return fake_run + + +@pytest.fixture +def fake_call(monkeypatch): + fake_call = Capture(always_returns=([], [], 0)) + monkeypatch.setattr('ceph_volume.process.call', fake_call) + return fake_call + + +@pytest.fixture +def fakedevice(factory): + def apply(**kw): + params = dict( + path='/dev/sda', + abspath='/dev/sda', + lv_api=None, + pvs_api=[], + disk_api={}, + sys_api={}, + exists=True, + is_lvm_member=True, + ) + params.update(dict(kw)) + params['lvm_size'] = disk.Size(b=params['sys_api'].get("size", 0)) + return factory(**params) + return apply + + +@pytest.fixture +def stub_call(monkeypatch): + """ + Monkeypatches process.call, so that a caller can add behavior to the response + """ + def apply(return_values): + if isinstance(return_values, tuple): + return_values = [return_values] + stubbed_call = Capture(return_values=return_values) + monkeypatch.setattr('ceph_volume.process.call', stubbed_call) + return stubbed_call + + return apply + + +@pytest.fixture(autouse=True) +def reset_cluster_name(request, monkeypatch): + """ + The globally available ``ceph_volume.conf.cluster`` might get mangled in + tests, make sure that after evert test, it gets reset, preventing pollution + going into other tests later. + """ + def fin(): + conf.cluster = None + try: + os.environ.pop('CEPH_CONF') + except KeyError: + pass + request.addfinalizer(fin) + + +@pytest.fixture +def conf_ceph(monkeypatch): + """ + Monkeypatches ceph_volume.conf.ceph, which is meant to parse/read + a ceph.conf. The patching is naive, it allows one to set return values for + specific method calls. + """ + def apply(**kw): + stub = Factory(**kw) + monkeypatch.setattr(conf, 'ceph', stub) + return stub + return apply + + +@pytest.fixture +def conf_ceph_stub(monkeypatch, tmpfile): + """ + Monkeypatches ceph_volume.conf.ceph with contents from a string that are + written to a temporary file and then is fed through the same ceph.conf + loading mechanisms for testing. Unlike ``conf_ceph`` which is just a fake, + we are actually loading values as seen on a ceph.conf file + + This is useful when more complex ceph.conf's are needed. In the case of + just trying to validate a key/value behavior ``conf_ceph`` is better + suited. + """ + def apply(contents): + conf_path = tmpfile(contents=contents) + parser = configuration.load(conf_path) + monkeypatch.setattr(conf, 'ceph', parser) + return parser + return apply + + +@pytest.fixture +def is_root(monkeypatch): + """ + Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user + is root (or is sudoing to superuser) can continue as-is + """ + monkeypatch.setattr('os.getuid', lambda: 0) + + +@pytest.fixture +def tmpfile(tmpdir): + """ + Create a temporary file, optionally filling it with contents, returns an + absolute path to the file when called + """ + def generate_file(name='file', contents='', directory=None): + directory = directory or str(tmpdir) + path = os.path.join(directory, name) + with open(path, 'w') as fp: + fp.write(contents) + return path + return generate_file + + +@pytest.fixture +def disable_kernel_queries(monkeypatch): + ''' + This speeds up calls to Device and Disk + ''' + monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: {}) + monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: {}) + + +@pytest.fixture(params=[ + '', 'ceph data', 'ceph journal', 'ceph block', + 'ceph block.wal', 'ceph block.db', 'ceph lockbox']) +def ceph_partlabel(request): + return request.param + + +@pytest.fixture(params=list(ceph_disk_guids.keys())) +def ceph_parttype(request): + return request.param + + +@pytest.fixture +def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype): + monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", + lambda path: {'TYPE': 'disk', 'PARTLABEL': ceph_partlabel}) + # setting blkid here too in order to be able to fall back to PARTTYPE based + # membership + monkeypatch.setattr("ceph_volume.util.device.disk.blkid", + lambda path: {'TYPE': 'disk', + 'PARTLABEL': '', + 'PARTTYPE': ceph_parttype}) + + +@pytest.fixture +def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype): + monkeypatch.setattr("ceph_volume.util.device.disk.blkid", + lambda path: {'TYPE': 'disk', + 'PARTLABEL': ceph_partlabel, + 'PARTTYPE': ceph_parttype}) + + +@pytest.fixture(params=[ + ('gluster partition', 'gluster partition'), + # falls back to blkid + ('', 'gluster partition'), + ('gluster partition', ''), +]) +def device_info_not_ceph_disk_member(monkeypatch, request): + monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", + lambda path: {'TYPE': 'disk', + 'PARTLABEL': request.param[0]}) + monkeypatch.setattr("ceph_volume.util.device.disk.blkid", + lambda path: {'TYPE': 'disk', + 'PARTLABEL': request.param[1]}) + +@pytest.fixture +def patched_get_block_devs_lsblk(): + with patch('ceph_volume.util.disk.get_block_devs_lsblk') as p: + yield p + +@pytest.fixture +def patch_bluestore_label(): + with patch('ceph_volume.util.device.Device.has_bluestore_label', + new_callable=PropertyMock) as p: + p.return_value = False + yield p + +@pytest.fixture +def device_info(monkeypatch, patch_bluestore_label): + def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None, + has_bluestore_label=False): + devices = devices if devices else {} + lsblk = lsblk if lsblk else {} + blkid = blkid if blkid else {} + udevadm = udevadm if udevadm else {} + lv = Factory(**lv) if lv else None + monkeypatch.setattr("ceph_volume.sys_info.devices", {}) + monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: devices) + if not devices: + monkeypatch.setattr("ceph_volume.util.device.lvm.get_first_lv", lambda filters: lv) + else: + monkeypatch.setattr("ceph_volume.util.device.lvm.get_device_lvs", + lambda path: [lv]) + monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: lsblk) + monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: blkid) + monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: udevadm) + return apply diff --git a/src/ceph-volume/ceph_volume/tests/devices/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/__init__.py diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py new file mode 100644 index 00000000..33e0ed32 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py @@ -0,0 +1,414 @@ +import pytest +from copy import deepcopy +from ceph_volume.devices.lvm import activate +from ceph_volume.api import lvm as api +from ceph_volume.tests.conftest import Capture + + +class Args(object): + + def __init__(self, **kw): + # default flags + self.bluestore = False + self.filestore = False + self.no_systemd = False + self.auto_detect_objectstore = None + for k, v in kw.items(): + setattr(self, k, v) + + +class TestActivate(object): + + # these tests are very functional, hence the heavy patching, it is hard to + # test the negative side effect with an actual functional run, so we must + # setup a perfect scenario for this test to check it can really work + # with/without osd_id + def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture): + FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', + lv_tags="ceph.osd_fsid=1234") + volumes = [] + volumes.append(FooVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes) + monkeypatch.setattr(activate, 'activate_filestore', capture) + args = Args(osd_id=None, osd_fsid='1234', filestore=True) + activate.Activate([]).activate(args) + assert capture.calls[0]['args'][0] == [FooVolume] + + def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture): + FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', + lv_tags="ceph.osd_fsid=1234") + volumes = [] + volumes.append(FooVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes) + monkeypatch.setattr(activate, 'activate_bluestore', capture) + args = Args(osd_id=None, osd_fsid='1234', bluestore=True) + activate.Activate([]).activate(args) + assert capture.calls[0]['args'][0] == [FooVolume] + + def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture): + FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', + lv_tags="ceph.osd_fsid=1111") + volumes = [] + volumes.append(FooVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: []) + monkeypatch.setattr(api, 'get_first_lv', lambda **kwargs: []) + monkeypatch.setattr(activate, 'activate_filestore', capture) + + args = Args(osd_id=None, osd_fsid='2222') + with pytest.raises(RuntimeError): + activate.Activate([]).activate(args) + + def test_filestore_no_systemd(self, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + JournalVolume = api.Volume( + lv_name='journal', + lv_path='/dev/vg/journal', + lv_uuid='000', + lv_tags=','.join([ + "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", + "ceph.journal_uuid=000", "ceph.type=journal", + "ceph.osd_id=0", "ceph.osd_fsid=1234"]) + ) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_uuid='001', + lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \ + "journal,ceph.journal_uuid=000,ceph.type=data," + \ + "ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + volumes.append(JournalVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls == [] + assert fake_start_osd.calls == [] + + def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + JournalVolume = api.Volume( + lv_name='journal', + lv_path='/dev/vg/journal', + lv_uuid='000', + lv_tags=','.join([ + "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", + "ceph.journal_uuid=000", "ceph.type=journal", + "ceph.osd_id=0", "ceph.osd_fsid=1234"]) + ) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_uuid='001', + lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \ + "journal,ceph.journal_uuid=000,ceph.type=data," + \ + "ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + volumes.append(JournalVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, + filestore=True, auto_detect_objectstore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls == [] + assert fake_start_osd.calls == [] + + def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) + monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + JournalVolume = api.Volume( + lv_name='journal', + lv_path='/dev/vg/journal', + lv_uuid='000', + lv_tags=','.join([ + "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", + "ceph.journal_uuid=000", "ceph.type=journal", + "ceph.osd_id=0","ceph.osd_fsid=1234"]) + ) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_uuid='001', + lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \ + "journal,ceph.journal_uuid=000,ceph.type=data," + \ + "ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + volumes.append(JournalVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=False, + filestore=True, auto_detect_objectstore=False) + activate.Activate([]).activate(args) + assert fake_enable.calls != [] + assert fake_start_osd.calls != [] + + def test_filestore_systemd(self, is_root, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.configuration.load', lambda: None) + monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + JournalVolume = api.Volume( + lv_name='journal', + lv_path='/dev/vg/journal', + lv_uuid='000', + lv_tags=','.join([ + "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal", + "ceph.journal_uuid=000", "ceph.type=journal", + "ceph.osd_id=0","ceph.osd_fsid=1234"]) + ) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_uuid='001', + lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \ + "journal,ceph.journal_uuid=000,ceph.type=data," + \ + "ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + volumes.append(JournalVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=False, + filestore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls != [] + assert fake_start_osd.calls != [] + + def test_bluestore_no_systemd(self, is_root, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \ + "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls == [] + assert fake_start_osd.calls == [] + + def test_bluestore_systemd(self, is_root, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \ + "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=False, + bluestore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls != [] + assert fake_start_osd.calls != [] + + def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000," + \ + "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, + bluestore=True, auto_detect_objectstore=True) + activate.Activate([]).activate(args) + assert fake_enable.calls == [] + assert fake_start_osd.calls == [] + + def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture): + fake_enable = Capture() + fake_start_osd = Capture() + monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', + lambda *a, **kw: True) + monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: + True) + monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True) + monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable) + monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd) + DataVolume = api.Volume( + lv_name='data', + lv_path='/dev/vg/data', + lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \ + "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234") + volumes = [] + volumes.append(DataVolume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes)) + + args = Args(osd_id=None, osd_fsid='1234', no_systemd=False, + bluestore=True, auto_detect_objectstore=False) + activate.Activate([]).activate(args) + assert fake_enable.calls != [] + assert fake_start_osd.calls != [] + +class TestActivateFlags(object): + + def test_default_objectstore(self, capture): + args = ['0', 'asdf-ljh-asdf'] + activation = activate.Activate(args) + activation.activate = capture + activation.main() + parsed_args = capture.calls[0]['args'][0] + assert parsed_args.filestore is False + assert parsed_args.bluestore is True + + def test_uses_filestore(self, capture): + args = ['--filestore', '0', 'asdf-ljh-asdf'] + activation = activate.Activate(args) + activation.activate = capture + activation.main() + parsed_args = capture.calls[0]['args'][0] + assert parsed_args.filestore is True + assert parsed_args.bluestore is False + + def test_uses_bluestore(self, capture): + args = ['--bluestore', '0', 'asdf-ljh-asdf'] + activation = activate.Activate(args) + activation.activate = capture + activation.main() + parsed_args = capture.calls[0]['args'][0] + assert parsed_args.filestore is False + assert parsed_args.bluestore is True + + +class TestActivateAll(object): + + def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch): + monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {}) + args = ['--all'] + activation = activate.Activate(args) + activation.main() + out, err = capsys.readouterr() + assert 'Was unable to find any OSDs to activate' in err + assert 'Verify OSDs are present with ' in err + + def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch): + monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report) + monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True) + args = ['--all'] + activation = activate.Activate(args) + activation.main() + out, err = capsys.readouterr() + assert 'a8789a96ce8b process is active. Skipping activation' in err + assert 'b8218eaa1634 process is active. Skipping activation' in err + + def test_detects_osds_to_activate(self, is_root, capture, monkeypatch): + monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report) + monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False) + args = ['--all'] + activation = activate.Activate(args) + activation.activate = capture + activation.main() + calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id']) + assert calls[0]['kwargs']['osd_id'] == '0' + assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634' + assert calls[1]['kwargs']['osd_id'] == '1' + assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b' + +# +# Activate All fixture +# + +direct_report = { + "0": [ + { + "lv_name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634", + "lv_path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634", + "lv_tags": "ceph.block_device=/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634,ceph.block_uuid=6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=957d22b7-24ce-466a-9883-b8218eaa1634,ceph.osd_id=0,ceph.type=block", + "lv_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4", + "name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634", + "path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634", + "tags": { + "ceph.block_device": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634", + "ceph.block_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4", + "ceph.cephx_lockbox_secret": "", + "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44", + "ceph.cluster_name": "ceph", + "ceph.crush_device_class": "None", + "ceph.encrypted": "0", + "ceph.osd_fsid": "957d22b7-24ce-466a-9883-b8218eaa1634", + "ceph.osd_id": "0", + "ceph.type": "block" + }, + "type": "block", + "vg_name": "ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44" + } + ], + "1": [ + { + "lv_name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b", + "lv_path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b", + "lv_tags": "ceph.block_device=/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.block_uuid=1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.osd_id=1,ceph.type=block", + "lv_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW", + "name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b", + "path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b", + "tags": { + "ceph.block_device": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b", + "ceph.block_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW", + "ceph.cephx_lockbox_secret": "", + "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44", + "ceph.cluster_name": "ceph", + "ceph.crush_device_class": "None", + "ceph.encrypted": "0", + "ceph.osd_fsid": "d0f3e4ad-e52a-4520-afc0-a8789a96ce8b", + "ceph.osd_id": "1", + "ceph.type": "block" + }, + "type": "block", + "vg_name": "ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532" + } + ] +} diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py new file mode 100644 index 00000000..7c968ae8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py @@ -0,0 +1,280 @@ +import pytest +import json +import random + +from argparse import ArgumentError +from mock import MagicMock, patch + +from ceph_volume.devices.lvm import batch +from ceph_volume.util import arg_validators + + +class TestBatch(object): + + def test_batch_instance(self, is_root): + b = batch.Batch([]) + b.main() + + def test_disjoint_device_lists(self, factory): + device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda") + device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb") + devices = [device1, device2] + db_devices = [device2] + with pytest.raises(Exception) as disjoint_ex: + batch.ensure_disjoint_device_lists(devices, db_devices) + assert 'Device lists are not disjoint' in str(disjoint_ex.value) + + @patch('ceph_volume.util.arg_validators.Device') + def test_reject_partition(self, mocked_device): + mocked_device.return_value = MagicMock( + is_partition=True, + has_gpt_headers=False, + ) + with pytest.raises(ArgumentError): + arg_validators.ValidBatchDevice()('foo') + + @pytest.mark.parametrize('format_', ['pretty', 'json', 'json-pretty']) + def test_report(self, format_, factory, conf_ceph_stub, mock_device_generator): + # just ensure reporting works + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + devs = [mock_device_generator() for _ in range(5)] + args = factory(data_slots=1, + osds_per_device=1, + osd_ids=[], + report=True, + format=format_, + devices=devs, + db_devices=[], + wal_devices=[], + bluestore=True, + block_db_size="1G", + dmcrypt=True, + ) + b = batch.Batch([]) + plan = b.get_plan(args) + b.args = args + b.report(plan) + + @pytest.mark.parametrize('format_', ['json', 'json-pretty']) + def test_json_report_valid_empty(self, format_, factory, conf_ceph_stub, mock_device_generator): + # ensure json reports are valid when empty + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + devs = [] + args = factory(data_slots=1, + osds_per_device=1, + osd_ids=[], + report=True, + format=format_, + devices=devs, + db_devices=[], + wal_devices=[], + bluestore=True, + block_db_size="1G", + dmcrypt=True, + ) + b = batch.Batch([]) + plan = b.get_plan(args) + b.args = args + report = b._create_report(plan) + json.loads(report) + + @pytest.mark.parametrize('format_', ['json', 'json-pretty']) + def test_json_report_valid_empty_unavailable_fast(self, format_, factory, conf_ceph_stub, mock_device_generator): + # ensure json reports are valid when empty + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + devs = [mock_device_generator() for _ in range(5)] + fast_devs = [mock_device_generator()] + fast_devs[0].available_lvm = False + args = factory(data_slots=1, + osds_per_device=1, + osd_ids=[], + report=True, + format=format_, + devices=devs, + db_devices=fast_devs, + wal_devices=[], + bluestore=True, + block_db_size="1G", + dmcrypt=True, + ) + b = batch.Batch([]) + plan = b.get_plan(args) + b.args = args + report = b._create_report(plan) + json.loads(report) + + + @pytest.mark.parametrize('format_', ['json', 'json-pretty']) + def test_json_report_valid_empty_unavailable_very_fast(self, format_, factory, conf_ceph_stub, mock_device_generator): + # ensure json reports are valid when empty + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + devs = [mock_device_generator() for _ in range(5)] + fast_devs = [mock_device_generator()] + very_fast_devs = [mock_device_generator()] + very_fast_devs[0].available_lvm = False + args = factory(data_slots=1, + osds_per_device=1, + osd_ids=[], + report=True, + format=format_, + devices=devs, + db_devices=fast_devs, + wal_devices=very_fast_devs, + bluestore=True, + block_db_size="1G", + dmcrypt=True, + ) + b = batch.Batch([]) + plan = b.get_plan(args) + b.args = args + report = b._create_report(plan) + json.loads(report) + + @pytest.mark.parametrize('rota', [0, 1]) + def test_batch_sort_full(self, factory, rota): + device1 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sda") + device2 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdb") + device3 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdc") + devices = [device1, device2, device3] + args = factory(report=True, + devices=devices, + filestore=False, + ) + b = batch.Batch([]) + b.args = args + b._sort_rotational_disks() + assert len(b.args.devices) == 3 + + @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore']) + def test_batch_sort_mixed(self, factory, objectstore): + device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda") + device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb") + device3 = factory(used_by_ceph=False, available=True, rotational=0, abspath="/dev/sdc") + devices = [device1, device2, device3] + args = factory(report=True, + devices=devices, + filestore=False if objectstore == 'bluestore' else True, + ) + b = batch.Batch([]) + b.args = args + b._sort_rotational_disks() + assert len(b.args.devices) == 2 + if objectstore == 'bluestore': + assert len(b.args.db_devices) == 1 + else: + assert len(b.args.journal_devices) == 1 + + def test_get_physical_osds_return_len(self, factory, + mock_devices_available, + conf_ceph_stub, + osds_per_device): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + args = factory(data_slots=1, osds_per_device=osds_per_device, + osd_ids=[], dmcrypt=False) + osds = batch.get_physical_osds(mock_devices_available, args) + assert len(osds) == len(mock_devices_available) * osds_per_device + + def test_get_physical_osds_rel_size(self, factory, + mock_devices_available, + conf_ceph_stub, + osds_per_device): + args = factory(data_slots=1, osds_per_device=osds_per_device, + osd_ids=[], dmcrypt=False) + osds = batch.get_physical_osds(mock_devices_available, args) + for osd in osds: + assert osd.data[1] == 1.0 / osds_per_device + + def test_get_physical_osds_abs_size(self, factory, + mock_devices_available, + conf_ceph_stub, + osds_per_device): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + args = factory(data_slots=1, osds_per_device=osds_per_device, + osd_ids=[], dmcrypt=False) + osds = batch.get_physical_osds(mock_devices_available, args) + for osd, dev in zip(osds, mock_devices_available): + assert osd.data[2] == int(dev.vg_size[0] / osds_per_device) + + def test_get_physical_osds_osd_ids(self, factory, + mock_devices_available, + osds_per_device): + pass + + def test_get_physical_fast_allocs_length(self, factory, + conf_ceph_stub, + mock_devices_available): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + args = factory(block_db_slots=None, get_block_db_size=None) + fast = batch.get_physical_fast_allocs(mock_devices_available, + 'block_db', 2, 2, args) + assert len(fast) == 2 + + @pytest.mark.parametrize('occupied_prior', range(7)) + @pytest.mark.parametrize('slots,num_devs', + [l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub]) + def test_get_physical_fast_allocs_length_existing(self, + num_devs, + slots, + occupied_prior, + factory, + conf_ceph_stub, + mock_device_generator): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + occupied_prior = min(occupied_prior, slots) + devs = [mock_device_generator() for _ in range(num_devs)] + already_assigned = 0 + while already_assigned < occupied_prior: + dev_i = random.randint(0, num_devs - 1) + dev = devs[dev_i] + if len(dev.lvs) < occupied_prior: + dev.lvs.append('foo') + dev.path = '/dev/bar' + already_assigned = sum([len(d.lvs) for d in devs]) + args = factory(block_db_slots=None, get_block_db_size=None) + expected_num_osds = max(len(devs) * slots - occupied_prior, 0) + fast = batch.get_physical_fast_allocs(devs, + 'block_db', slots, + expected_num_osds, args) + assert len(fast) == expected_num_osds + expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0]) + assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices + assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices + + def test_get_lvm_osds_return_len(self, factory, + mock_lv_device_generator, + conf_ceph_stub, + osds_per_device): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + args = factory(data_slots=1, osds_per_device=osds_per_device, + osd_ids=[], dmcrypt=False) + mock_lvs = [mock_lv_device_generator()] + osds = batch.get_lvm_osds(mock_lvs, args) + assert len(osds) == 1 + + +class TestBatchOsd(object): + + def test_osd_class_ctor(self): + osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None) + assert osd.data == batch.Batch.OSD.VolSpec('/dev/data', + 1, + '5G', + 1, + 'data') + def test_add_fast(self): + osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None) + osd.add_fast_device('/dev/db', 1, '5G', 1, 'block_db') + assert osd.fast == batch.Batch.OSD.VolSpec('/dev/db', + 1, + '5G', + 1, + 'block_db') + + def test_add_very_fast(self): + osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None) + osd.add_very_fast_device('/dev/wal', 1, '5G', 1) + assert osd.very_fast == batch.Batch.OSD.VolSpec('/dev/wal', + 1, + '5G', + 1, + 'block_wal') diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py new file mode 100644 index 00000000..fe792d5a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py @@ -0,0 +1,8 @@ +from ceph_volume.devices.lvm import common + + +class TestCommon(object): + + def test_get_default_args_smoke(self): + default_args = common.get_default_args() + assert default_args diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py new file mode 100644 index 00000000..994038f3 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py @@ -0,0 +1,48 @@ +import pytest +from ceph_volume.devices import lvm + + +class TestCreate(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + lvm.create.Create([]).main() + stdout, stderr = capsys.readouterr() + assert 'Create an OSD by assigning an ID and FSID' in stdout + + def test_main_shows_full_help(self, capsys): + with pytest.raises(SystemExit): + lvm.create.Create(argv=['--help']).main() + stdout, stderr = capsys.readouterr() + assert 'Use the filestore objectstore' in stdout + assert 'Use the bluestore objectstore' in stdout + assert 'A physical device or logical' in stdout + + def test_excludes_filestore_bluestore_flags(self, capsys, device_info): + device_info() + with pytest.raises(SystemExit): + lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main() + stdout, stderr = capsys.readouterr() + expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)' + assert expected in stderr + + def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info): + device_info() + with pytest.raises(SystemExit): + lvm.create.Create(argv=[ + '--bluestore', '--data', '/dev/sdfoo', + '--journal', '/dev/sf14', + ]).main() + stdout, stderr = capsys.readouterr() + expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)' + assert expected in stderr + + def test_excludes_block_and_journal_flags(self, capsys, device_info): + device_info() + with pytest.raises(SystemExit): + lvm.create.Create(argv=[ + '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1', + '--journal', '/dev/sf14', + ]).main() + stdout, stderr = capsys.readouterr() + expected = 'Cannot use --block.db (bluestore) with --journal (filestore)' + assert expected in stderr diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py new file mode 100644 index 00000000..4b8304ce --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py @@ -0,0 +1,59 @@ +import pytest +from mock.mock import patch +from ceph_volume.api import lvm +from ceph_volume.devices.lvm import deactivate + +class TestDeactivate(object): + + @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag") + def test_no_osd(self, p_get_lvs): + p_get_lvs.return_value = [] + with pytest.raises(StopIteration): + deactivate.deactivate_osd(0) + + @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag") + @patch("ceph_volume.util.system.unmount_tmpfs") + def test_unmount_tmpfs_called_osd_id(self, p_u_tmpfs, p_get_lvs): + FooVolume = lvm.Volume( + lv_name='foo', lv_path='/dev/vg/foo', + lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data") + p_get_lvs.return_value = [FooVolume] + + deactivate.deactivate_osd(0) + p_u_tmpfs.assert_called_with( + '/var/lib/ceph/osd/{}-{}'.format('foo', 0)) + + @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag") + @patch("ceph_volume.util.system.unmount_tmpfs") + def test_unmount_tmpfs_called_osd_uuid(self, p_u_tmpfs, p_get_lvs): + FooVolume = lvm.Volume( + lv_name='foo', lv_path='/dev/vg/foo', + lv_tags="ceph.osd_fsid=0,ceph.osd_id=1,ceph.cluster_name=foo,ceph.type=data") + p_get_lvs.return_value = [FooVolume] + + deactivate.deactivate_osd(None, 0) + p_u_tmpfs.assert_called_with( + '/var/lib/ceph/osd/{}-{}'.format('foo', 1)) + + @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag") + @patch("ceph_volume.util.system.unmount_tmpfs") + @patch("ceph_volume.util.encryption.dmcrypt_close") + def test_no_crypt_no_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs): + FooVolume = lvm.Volume( + lv_name='foo', lv_path='/dev/vg/foo', + lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data") + p_get_lvs.return_value = [FooVolume] + + deactivate.deactivate_osd(0) + + @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag") + @patch("ceph_volume.util.system.unmount_tmpfs") + @patch("ceph_volume.util.encryption.dmcrypt_close") + def test_crypt_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs): + FooVolume = lvm.Volume( + lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='123', + lv_tags="ceph.osd_id=0,ceph.encrypted=1,ceph.cluster_name=foo,ceph.type=data") + p_get_lvs.return_value = [FooVolume] + + deactivate.deactivate_osd(0) + p_dm_close.assert_called_with('123') diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py new file mode 100644 index 00000000..cf4b68c7 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py @@ -0,0 +1,265 @@ +import pytest +from ceph_volume.devices import lvm +from ceph_volume.api import lvm as api + +# TODO: add tests for following commands - +# ceph-volume list +# ceph-volume list <path-to-pv> +# ceph-volume list <path-to-vg> +# ceph-volume list <path-to-lv> + +class TestReadableTag(object): + + def test_dots_get_replaced(self): + result = lvm.listing.readable_tag('ceph.foo') + assert result == 'foo' + + def test_underscores_are_replaced_with_spaces(self): + result = lvm.listing.readable_tag('ceph.long_tag') + assert result == 'long tag' + + +class TestPrettyReport(object): + + def test_is_empty(self, capsys): + lvm.listing.pretty_report({}) + stdout, stderr = capsys.readouterr() + assert stdout == '\n' + + def test_type_and_path_are_reported(self, capsys): + lvm.listing.pretty_report({0: [ + {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']} + ]}) + stdout, stderr = capsys.readouterr() + assert '[data] /dev/sda1' in stdout + + def test_osd_id_header_is_reported(self, capsys): + lvm.listing.pretty_report({0: [ + {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']} + ]}) + stdout, stderr = capsys.readouterr() + assert '====== osd.0 =======' in stdout + + def test_tags_are_included(self, capsys): + lvm.listing.pretty_report( + {0: [{ + 'type': 'data', + 'path': '/dev/sda1', + 'tags': {'ceph.osd_id': '0'}, + 'devices': ['/dev/sda'], + }]} + ) + stdout, stderr = capsys.readouterr() + assert 'osd id' in stdout + + def test_devices_are_comma_separated(self, capsys): + lvm.listing.pretty_report({0: [ + {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda', '/dev/sdb1']} + ]}) + stdout, stderr = capsys.readouterr() + assert '/dev/sda,/dev/sdb1' in stdout + + +class TestList(object): + + def test_empty_full_json_zero_exit_status(self, is_root,factory,capsys): + args = factory(format='json', device=None) + lvm.listing.List([]).list(args) + stdout, stderr = capsys.readouterr() + assert stdout == '{}\n' + + def test_empty_device_json_zero_exit_status(self, is_root,factory,capsys): + args = factory(format='json', device='/dev/sda1') + lvm.listing.List([]).list(args) + stdout, stderr = capsys.readouterr() + assert stdout == '{}\n' + + def test_empty_full_zero_exit_status(self, is_root, factory): + args = factory(format='pretty', device=None) + with pytest.raises(SystemExit): + lvm.listing.List([]).list(args) + + def test_empty_device_zero_exit_status(self, is_root, factory): + args = factory(format='pretty', device='/dev/sda1') + with pytest.raises(SystemExit): + lvm.listing.List([]).list(args) + +class TestFullReport(object): + + def test_no_ceph_lvs(self, monkeypatch): + # ceph lvs are detected by looking into its tags + osd = api.Volume(lv_name='volume1', lv_path='/dev/VolGroup/lv', + lv_tags={}) + volumes = [] + volumes.append(osd) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + result = lvm.listing.List([]).full_report() + assert result == {} + + def test_ceph_data_lv_reported(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data' + pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000", + vg_name='VolGroup', lv_uuid="aaaa") + osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, + lv_path='/dev/VolGroup/lv', vg_name='VolGroup') + volumes = [] + volumes.append(osd) + monkeypatch.setattr(lvm.listing.api, 'get_first_pv', lambda **kwargs: pv) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + result = lvm.listing.List([]).full_report() + assert result['0'][0]['name'] == 'volume1' + + def test_ceph_journal_lv_reported(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data' + journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal' + pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000", + vg_name="VolGroup", lv_uuid="aaaa") + osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, + lv_path='/dev/VolGroup/lv', vg_name='VolGroup') + journal = api.Volume( + lv_name='journal', lv_uuid='x', lv_tags=journal_tags, + lv_path='/dev/VolGroup/journal', vg_name='VolGroup') + volumes = [] + volumes.append(osd) + volumes.append(journal) + monkeypatch.setattr(lvm.listing.api,'get_first_pv',lambda **kwargs:pv) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + result = lvm.listing.List([]).full_report() + assert result['0'][0]['name'] == 'volume1' + assert result['0'][1]['name'] == 'journal' + + def test_ceph_wal_lv_reported(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=data' + wal_tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal' + osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, + lv_path='/dev/VolGroup/lv', vg_name='VolGroup') + wal = api.Volume(lv_name='wal', lv_uuid='x', lv_tags=wal_tags, + lv_path='/dev/VolGroup/wal', vg_name='VolGroup') + volumes = [] + volumes.append(osd) + volumes.append(wal) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + result = lvm.listing.List([]).full_report() + assert result['0'][0]['name'] == 'volume1' + assert result['0'][1]['name'] == 'wal' + + @pytest.mark.parametrize('type_', ['journal', 'db', 'wal']) + def test_physical_2nd_device_gets_reported(self, type_, monkeypatch): + tags = ('ceph.osd_id=0,ceph.{t}_uuid=x,ceph.type=data,' + 'ceph.{t}_device=/dev/sda1').format(t=type_) + osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, + vg_name='VolGroup', lv_path='/dev/VolGroup/lv') + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + [osd]) + + result = lvm.listing.List([]).full_report() + assert result['0'][1]['path'] == '/dev/sda1' + assert result['0'][1]['tags'] == {'PARTUUID': 'x'} + assert result['0'][1]['type'] == type_ + + +class TestSingleReport(object): + + def test_not_a_ceph_lv(self, monkeypatch): + # ceph lvs are detected by looking into its tags + lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv', + vg_name='VolGroup') + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + [lv]) + + result = lvm.listing.List([]).single_report('VolGroup/lv') + assert result == {} + + def test_report_a_ceph_lv(self, monkeypatch): + # ceph lvs are detected by looking into its tags + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data' + lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa', + lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes = [] + volumes.append(lv) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + result = lvm.listing.List([]).single_report('VolGroup/lv') + assert result['0'][0]['name'] == 'lv' + assert result['0'][0]['lv_tags'] == tags + assert result['0'][0]['path'] == '/dev/VolGroup/lv' + assert result['0'][0]['devices'] == [] + + def test_report_a_ceph_journal_device(self, monkeypatch): + # ceph lvs are detected by looking into its tags + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,' + \ + 'ceph.journal_device=/dev/sda1' + lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags, + lv_path='/dev/VolGroup/lv', vg_name='VolGroup') + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + [lv] if 'tags' in kwargs else []) + + result = lvm.listing.List([]).single_report('/dev/sda1') + assert result['0'][0]['tags'] == {'PARTUUID': 'x'} + assert result['0'][0]['type'] == 'journal' + assert result['0'][0]['path'] == '/dev/sda1' + + def test_report_a_ceph_lv_with_devices(self, monkeypatch): + pvolumes = [] + + tags = 'ceph.osd_id=0,ceph.type=data' + pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1', + pv_uuid='', pv_tags={}, lv_uuid="aaaa") + pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1', + pv_uuid='', pv_tags={}, lv_uuid="aaaa") + pvolumes.append(pv1) + pvolumes.append(pv2) + + + volumes = [] + lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa', + lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes.append(lv) + + monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs: + pvolumes) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + listing = lvm.listing.List([]) + listing._pvs = [ + {'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''}, + {'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''}, + ] + + result = listing.single_report('VolGroup/lv') + assert result['0'][0]['name'] == 'lv' + assert result['0'][0]['lv_tags'] == tags + assert result['0'][0]['path'] == '/dev/VolGroup/lv' + assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1'] + + def test_report_a_ceph_lv_with_no_matching_devices(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.type=data' + lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa', + lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes = [] + volumes.append(lv) + monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs: + volumes) + + listing = lvm.listing.List([]) + listing._pvs = [ + {'lv_uuid': 'ffff', 'pv_name': '/dev/sda1', 'pv_tags': '', + 'pv_uuid': ''}, + {'lv_uuid': 'ffff', 'pv_name': '/dev/sdb1', 'pv_tags': '', + 'pv_uuid': ''}] + + result = listing.single_report('VolGroup/lv') + assert result['0'][0]['name'] == 'lv' + assert result['0'][0]['lv_tags'] == tags + assert result['0'][0]['path'] == '/dev/VolGroup/lv' + assert result['0'][0]['devices'] == [] diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py new file mode 100644 index 00000000..70915a0f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py @@ -0,0 +1,174 @@ +import pytest +from ceph_volume.devices import lvm +from ceph_volume.api import lvm as api +from mock.mock import patch, Mock + + +class TestLVM(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + lvm.main.LVM([]).main() + stdout, stderr = capsys.readouterr() + assert 'Use LVM and LVM-based technologies to deploy' in stdout + + def test_main_shows_activate_subcommands(self, capsys): + lvm.main.LVM([]).main() + stdout, stderr = capsys.readouterr() + assert 'activate ' in stdout + assert 'Discover and mount' in stdout + + def test_main_shows_prepare_subcommands(self, capsys): + lvm.main.LVM([]).main() + stdout, stderr = capsys.readouterr() + assert 'prepare ' in stdout + assert 'Format an LVM device' in stdout + + +class TestPrepareDevice(object): + + def test_cannot_use_device(self, factory): + args = factory(data='/dev/var/foo') + with pytest.raises(RuntimeError) as error: + p = lvm.prepare.Prepare([]) + p.args = args + p.prepare_data_device( 'data', '0') + assert 'Cannot use device (/dev/var/foo)' in str(error.value) + assert 'A vg/lv path or an existing device is needed' in str(error.value) + + +class TestGetClusterFsid(object): + + def test_fsid_is_passed_in(self, factory): + args = factory(cluster_fsid='aaaa-1111') + prepare_obj = lvm.prepare.Prepare([]) + prepare_obj.args = args + assert prepare_obj.get_cluster_fsid() == 'aaaa-1111' + + def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub): + conf_ceph_stub('[global]\nfsid = bbbb-2222') + prepare_obj = lvm.prepare.Prepare([]) + prepare_obj.args = factory(cluster_fsid=None) + assert prepare_obj.get_cluster_fsid() == 'bbbb-2222' + + +class TestPrepare(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + lvm.prepare.Prepare([]).main() + stdout, stderr = capsys.readouterr() + assert 'Prepare an OSD by assigning an ID and FSID' in stdout + + def test_main_shows_full_help(self, capsys): + with pytest.raises(SystemExit): + lvm.prepare.Prepare(argv=['--help']).main() + stdout, stderr = capsys.readouterr() + assert 'Use the filestore objectstore' in stdout + assert 'Use the bluestore objectstore' in stdout + assert 'A physical device or logical' in stdout + + def test_excludes_filestore_bluestore_flags(self, capsys, device_info): + device_info() + with pytest.raises(SystemExit): + lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main() + stdout, stderr = capsys.readouterr() + expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)' + assert expected in stderr + + def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info): + device_info() + with pytest.raises(SystemExit): + lvm.prepare.Prepare(argv=[ + '--bluestore', '--data', '/dev/sdfoo', + '--journal', '/dev/sf14', + ]).main() + stdout, stderr = capsys.readouterr() + expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)' + assert expected in stderr + + def test_excludes_block_and_journal_flags(self, capsys, device_info): + device_info() + with pytest.raises(SystemExit): + lvm.prepare.Prepare(argv=[ + '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1', + '--journal', '/dev/sf14', + ]).main() + stdout, stderr = capsys.readouterr() + expected = 'Cannot use --block.db (bluestore) with --journal (filestore)' + assert expected in stderr + + def test_journal_is_required_with_filestore(self, is_root, monkeypatch, device_info): + monkeypatch.setattr("os.path.exists", lambda path: True) + device_info() + with pytest.raises(SystemExit) as error: + lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main() + expected = '--journal is required when using --filestore' + assert expected in str(error.value) + + @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device') + def test_safe_prepare_osd_already_created(self, m_is_ceph_device): + m_is_ceph_device.return_value = True + with pytest.raises(RuntimeError) as error: + prepare = lvm.prepare.Prepare(argv=[]) + prepare.args = Mock() + prepare.args.data = '/dev/sdfoo' + prepare.get_lv = Mock() + prepare.safe_prepare() + expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo') + assert expected in str(error.value) + + def test_setup_device_device_name_is_none(self): + result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None) + assert result == ('', '', {'ceph.type': 'data'}) + + @patch('ceph_volume.api.lvm.Volume.set_tags') + @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv') + def test_setup_device_lv_passed(self, m_get_first_lv, m_set_tags): + fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid') + m_get_first_lv.return_value = fake_volume + result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None) + + assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data', + 'ceph.vdo': '0', + 'ceph.data_uuid': 'fake-uuid', + 'ceph.data_device': '/fake-path'}) + + @patch('ceph_volume.devices.lvm.prepare.api.create_lv') + @patch('ceph_volume.api.lvm.Volume.set_tags') + @patch('ceph_volume.util.disk.is_device') + def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv): + fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid') + m_is_device.return_value = True + m_create_lv.return_value = fake_volume + result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None) + + assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data', + 'ceph.vdo': '0', + 'ceph.data_uuid': 'fake-uuid', + 'ceph.data_device': '/fake-path'}) + + @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid') + @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv') + def test_setup_device_partition_passed(self, m_get_first_lv, m_get_ptuuid): + m_get_first_lv.side_effect = ValueError() + m_get_ptuuid.return_value = 'fake-uuid' + result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None) + + assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data', + 'ceph.vdo': '0', + 'ceph.data_uuid': 'fake-uuid', + 'ceph.data_device': '/dev/sdx'}) + + +class TestActivate(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + lvm.activate.Activate([]).main() + stdout, stderr = capsys.readouterr() + assert 'Activate OSDs by discovering them with' in stdout + + def test_main_shows_full_help(self, capsys): + with pytest.raises(SystemExit): + lvm.activate.Activate(argv=['--help']).main() + stdout, stderr = capsys.readouterr() + assert 'optional arguments' in stdout + assert 'positional arguments' in stdout diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py new file mode 100644 index 00000000..b5280f93 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py @@ -0,0 +1,45 @@ +import pytest +from ceph_volume import exceptions +from ceph_volume.devices.lvm import trigger + + +class TestParseOSDid(object): + + def test_no_id_found_if_no_digit(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa') + + def test_no_id_found(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_id('ljahsdfaslkjhdfa') + + def test_id_found(self): + result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa') + assert result == '1' + + +class TestParseOSDUUID(object): + + def test_uuid_is_parsed(self): + result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf') + assert result == 'asdf-ljkh-asdf-ljkh-asdf' + + def test_uuid_is_parsed_longer_sha1(self): + result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf') + assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf' + + def test_uuid_is_not_found(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_uuid('ljahsdfaslkjhdfa') + + def test_uuid_is_not_found_missing_id(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo') + + def test_robust_double_id_in_uuid(self): + # it is possible to have the id in the SHA1, this should + # be fine parsing that + result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed") + assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed' + + diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py new file mode 100644 index 00000000..1fa22e5b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py @@ -0,0 +1,236 @@ +import os +import pytest +from copy import deepcopy +from mock.mock import patch, call +from ceph_volume import process +from ceph_volume.api import lvm as api +from ceph_volume.devices.lvm import zap + + +class TestFindAssociatedDevices(object): + + def test_no_lvs_found_that_match_id(self, monkeypatch, device_info): + tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data' + osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_tags=tags, lv_path='/dev/VolGroup/lv') + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {}) + + with pytest.raises(RuntimeError): + zap.find_associated_devices(osd_id=10) + + def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info): + tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\ + 'ceph.type=data' + osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags, + vg_name='vg', lv_path='/dev/VolGroup/lv') + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {}) + + with pytest.raises(RuntimeError): + zap.find_associated_devices(osd_fsid='aaaa-lkjh') + + def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info): + tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\ + 'ceph.type=data' + osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg', + lv_tags=tags, lv_path='/dev/VolGroup/lv') + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {}) + + with pytest.raises(RuntimeError): + zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh') + + def test_no_ceph_lvs_found(self, monkeypatch): + osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='', + lv_path='/dev/VolGroup/lv') + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {}) + + with pytest.raises(RuntimeError): + zap.find_associated_devices(osd_id=100) + + def test_lv_is_matched_id(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data' + osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = zap.find_associated_devices(osd_id='0') + assert result[0].abspath == '/dev/VolGroup/lv' + + def test_lv_is_matched_fsid(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\ + 'ceph.type=data' + osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes)) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = zap.find_associated_devices(osd_fsid='asdf-lkjh') + assert result[0].abspath == '/dev/VolGroup/lv' + + def test_lv_is_matched_id_fsid(self, monkeypatch): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\ + 'ceph.type=data' + osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='', + lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes = [] + volumes.append(osd) + monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes) + monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0)) + + result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh') + assert result[0].abspath == '/dev/VolGroup/lv' + + +class TestEnsureAssociatedLVs(object): + + def test_nothing_is_found(self): + volumes = [] + result = zap.ensure_associated_lvs(volumes) + assert result == [] + + def test_data_is_found(self): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data' + osd = api.Volume( + lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags) + volumes = [] + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert result == ['/dev/VolGroup/data'] + + def test_block_is_found(self): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block' + osd = api.Volume( + lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags) + volumes = [] + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert result == ['/dev/VolGroup/block'] + + def test_success_message_for_fsid(self, factory, is_root, capsys): + cli_zap = zap.Zap([]) + args = factory(devices=[], osd_id=None, osd_fsid='asdf-lkjh') + cli_zap.args = args + cli_zap.zap() + out, err = capsys.readouterr() + assert "Zapping successful for OSD: asdf-lkjh" in err + + def test_success_message_for_id(self, factory, is_root, capsys): + cli_zap = zap.Zap([]) + args = factory(devices=[], osd_id='1', osd_fsid=None) + cli_zap.args = args + cli_zap.zap() + out, err = capsys.readouterr() + assert "Zapping successful for OSD: 1" in err + + def test_block_and_partition_are_found(self, monkeypatch): + monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1') + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block' + osd = api.Volume( + lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags) + volumes = [] + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert '/dev/sdb1' in result + assert '/dev/VolGroup/block' in result + + def test_journal_is_found(self): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal' + osd = api.Volume( + lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags) + volumes = [] + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert result == ['/dev/VolGroup/lv'] + + def test_multiple_journals_are_found(self): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal' + volumes = [] + for i in range(3): + osd = api.Volume( + lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags) + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert '/dev/VolGroup/lv0' in result + assert '/dev/VolGroup/lv1' in result + assert '/dev/VolGroup/lv2' in result + + def test_multiple_dbs_are_found(self): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db' + volumes = [] + for i in range(3): + osd = api.Volume( + lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags) + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert '/dev/VolGroup/lv0' in result + assert '/dev/VolGroup/lv1' in result + assert '/dev/VolGroup/lv2' in result + + def test_multiple_wals_are_found(self): + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=wal' + volumes = [] + for i in range(3): + osd = api.Volume( + lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags) + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert '/dev/VolGroup/lv0' in result + assert '/dev/VolGroup/lv1' in result + assert '/dev/VolGroup/lv2' in result + + def test_multiple_backing_devs_are_found(self): + volumes = [] + for _type in ['journal', 'db', 'wal']: + tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=%s' % _type + osd = api.Volume( + lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags) + volumes.append(osd) + result = zap.ensure_associated_lvs(volumes) + assert '/dev/VolGroup/lvjournal' in result + assert '/dev/VolGroup/lvwal' in result + assert '/dev/VolGroup/lvdb' in result + + @patch('ceph_volume.devices.lvm.zap.api.get_lvs') + def test_ensure_associated_lvs(self, m_get_lvs): + zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'}) + calls = [ + call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}), + call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}), + call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'}) + ] + m_get_lvs.assert_has_calls(calls, any_order=True) + + +class TestWipeFs(object): + + def setup(self): + os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0' + + def test_works_on_second_try(self, stub_call): + os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2' + stub_call([('wiping /dev/sda', '', 1), ('', '', 0)]) + result = zap.wipefs('/dev/sda') + assert result is None + + def test_does_not_work_after_several_tries(self, stub_call): + os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2' + stub_call([('wiping /dev/sda', '', 1), ('', '', 1)]) + with pytest.raises(RuntimeError): + zap.wipefs('/dev/sda') + + def test_does_not_work_default_tries(self, stub_call): + stub_call([('wiping /dev/sda', '', 1)]*8) + with pytest.raises(RuntimeError): + zap.wipefs('/dev/sda') diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py new file mode 100644 index 00000000..e4cf8ce1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py @@ -0,0 +1,97 @@ +import pytest +from ceph_volume.devices import raw +from mock.mock import patch + + +class TestRaw(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + raw.main.Raw([]).main() + stdout, stderr = capsys.readouterr() + assert 'Manage a single-device OSD on a raw block device.' in stdout + + def test_main_shows_activate_subcommands(self, capsys): + raw.main.Raw([]).main() + stdout, stderr = capsys.readouterr() + assert 'activate ' in stdout + assert 'Discover and prepare' in stdout + + def test_main_shows_prepare_subcommands(self, capsys): + raw.main.Raw([]).main() + stdout, stderr = capsys.readouterr() + assert 'prepare ' in stdout + assert 'Format a raw device' in stdout + + +class TestPrepare(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + raw.prepare.Prepare([]).main() + stdout, stderr = capsys.readouterr() + assert 'Prepare an OSD by assigning an ID and FSID' in stdout + + def test_main_shows_full_help(self, capsys): + with pytest.raises(SystemExit): + raw.prepare.Prepare(argv=['--help']).main() + stdout, stderr = capsys.readouterr() + assert 'a raw device to use for the OSD' in stdout + assert 'Crush device class to assign this OSD to' in stdout + assert 'Use BlueStore backend' in stdout + assert 'Path to bluestore block.db block device' in stdout + assert 'Path to bluestore block.wal block device' in stdout + assert 'Enable device encryption via dm-crypt' in stdout + + @patch('ceph_volume.util.arg_validators.ValidDevice.__call__') + def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys): + m_valid_device.return_value = '/dev/foo' + with pytest.raises(SystemExit): + raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo', '--dmcrypt']).main() + stdout, stderr = capsys.readouterr() + assert 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set' in stderr + + @patch('ceph_volume.util.encryption.luks_open') + @patch('ceph_volume.util.encryption.luks_format') + @patch('ceph_volume.util.disk.lsblk') + def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open): + m_lsblk.return_value = {'KNAME': 'foo'} + m_luks_format.return_value = True + m_luks_open.return_value = True + result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'block', '123') + m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-block-dmcrypt') + m_luks_format.assert_called_with('foo', '/dev/foo') + assert result == '/dev/mapper/ceph-123-foo-block-dmcrypt' + + @patch('ceph_volume.util.encryption.luks_open') + @patch('ceph_volume.util.encryption.luks_format') + @patch('ceph_volume.util.disk.lsblk') + def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open): + m_lsblk.return_value = {'KNAME': 'foo'} + m_luks_format.return_value = True + m_luks_open.return_value = True + result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'db', '123') + m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-db-dmcrypt') + m_luks_format.assert_called_with('foo', '/dev/foo') + assert result == '/dev/mapper/ceph-123-foo-db-dmcrypt' + + @patch('ceph_volume.util.encryption.luks_open') + @patch('ceph_volume.util.encryption.luks_format') + @patch('ceph_volume.util.disk.lsblk') + def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open): + m_lsblk.return_value = {'KNAME': 'foo'} + m_luks_format.return_value = True + m_luks_open.return_value = True + result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'wal', '123') + m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-wal-dmcrypt') + m_luks_format.assert_called_with('foo', '/dev/foo') + assert result == '/dev/mapper/ceph-123-foo-wal-dmcrypt' + + @patch('ceph_volume.devices.raw.prepare.rollback_osd') + @patch('ceph_volume.devices.raw.prepare.Prepare.prepare') + @patch('ceph_volume.util.arg_validators.ValidDevice.__call__') + def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd): + m_valid_device.return_value = '/dev/foo' + m_prepare.side_effect=Exception('foo') + m_rollback_osd.return_value = 'foobar' + with pytest.raises(Exception): + raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo']).main() + m_rollback_osd.assert_called() diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py new file mode 100644 index 00000000..ac2dd0e7 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py @@ -0,0 +1,200 @@ +import os +import pytest +from ceph_volume.devices.simple import activate + + +class TestActivate(object): + + def test_no_data_uuid(self, factory, tmpfile, is_root, monkeypatch, capture): + json_config = tmpfile(contents='{}') + args = factory(osd_id='0', osd_fsid='1234', json_config=json_config) + with pytest.raises(RuntimeError): + activate.Activate([]).activate(args) + + def test_invalid_json_path(self): + os.environ['CEPH_VOLUME_SIMPLE_JSON_DIR'] = '/non/existing/path' + with pytest.raises(RuntimeError) as error: + activate.Activate(['1', 'asdf']).main() + assert 'Expected JSON config path not found' in str(error.value) + + def test_main_spits_help_with_no_arguments(self, capsys): + activate.Activate([]).main() + stdout, stderr = capsys.readouterr() + assert 'Activate OSDs by mounting devices previously configured' in stdout + + def test_activate_all(self, is_root, monkeypatch): + ''' + make sure Activate calls activate for each file returned by glob + ''' + mocked_glob = [] + def mock_glob(glob): + path = os.path.dirname(glob) + mocked_glob.extend(['{}/{}.json'.format(path, file_) for file_ in + ['1', '2', '3']]) + return mocked_glob + activate_files = [] + def mock_activate(self, args): + activate_files.append(args.json_config) + monkeypatch.setattr('glob.glob', mock_glob) + monkeypatch.setattr(activate.Activate, 'activate', mock_activate) + activate.Activate(['--all']).main() + assert activate_files == mocked_glob + + + + +class TestEnableSystemdUnits(object): + + def test_nothing_is_activated(self, tmpfile, is_root, capsys): + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + stdout, stderr = capsys.readouterr() + assert 'Skipping enabling of `simple`' in stderr + assert 'Skipping masking of ceph-disk' in stderr + assert 'Skipping enabling and starting OSD simple' in stderr + + def test_no_systemd_flag_is_true(self, tmpfile, is_root): + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True) + activation.activate = lambda x: True + activation.main() + assert activation.skip_systemd is True + + def test_no_systemd_flag_is_false(self, tmpfile, is_root): + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=True) + activation.activate = lambda x: True + activation.main() + assert activation.skip_systemd is False + + def test_masks_ceph_disk(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + + def test_enables_simple_unit(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + assert capture.calls[0]['args'] == ('0', '1234', 'simple') + + def test_enables_osd_unit(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + assert capture.calls[0]['args'] == ('0',) + + def test_starts_osd_unit(self, tmpfile, is_root, monkeypatch, capture): + monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True) + monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture) + + json_config = tmpfile(contents='{}') + activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False) + activation.activate = lambda x: True + activation.main() + activation.enable_systemd_units('0', '1234') + assert len(capture.calls) == 1 + assert capture.calls[0]['args'] == ('0',) + + +class TestValidateDevices(object): + + def test_filestore_missing_journal(self): + activation = activate.Activate([]) + with pytest.raises(RuntimeError) as error: + activation.validate_devices({'type': 'filestore', 'data': {}}) + assert 'Unable to activate filestore OSD due to missing devices' in str(error.value) + + def test_filestore_missing_data(self): + activation = activate.Activate([]) + with pytest.raises(RuntimeError) as error: + activation.validate_devices({'type': 'filestore', 'journal': {}}) + assert 'Unable to activate filestore OSD due to missing devices' in str(error.value) + + def test_filestore_journal_device_found(self, capsys): + activation = activate.Activate([]) + with pytest.raises(RuntimeError): + activation.validate_devices({'type': 'filestore', 'journal': {}}) + stdout, stderr = capsys.readouterr() + assert "devices found: ['journal']" in stderr + + def test_filestore_data_device_found(self, capsys): + activation = activate.Activate([]) + with pytest.raises(RuntimeError): + activation.validate_devices({'type': 'filestore', 'data': {}}) + stdout, stderr = capsys.readouterr() + assert "devices found: ['data']" in stderr + + def test_filestore_with_all_devices(self): + activation = activate.Activate([]) + result = activation.validate_devices({'type': 'filestore', 'journal': {}, 'data': {}}) + assert result is True + + def test_filestore_without_type(self): + activation = activate.Activate([]) + result = activation.validate_devices({'journal': {}, 'data': {}}) + assert result is True + + def test_bluestore_with_all_devices(self): + activation = activate.Activate([]) + result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}}) + assert result is True + + def test_bluestore_without_type(self): + activation = activate.Activate([]) + result = activation.validate_devices({'data': {}, 'block': {}}) + assert result is True + + def test_bluestore_is_default(self): + activation = activate.Activate([]) + result = activation.validate_devices({'data': {}, 'block': {}}) + assert result is True + + def test_bluestore_data_device_found(self, capsys): + activation = activate.Activate([]) + with pytest.raises(RuntimeError): + activation.validate_devices({'data': {}}) + stdout, stderr = capsys.readouterr() + assert "devices found: ['data']" in stderr + + def test_bluestore_missing_data(self): + activation = activate.Activate([]) + with pytest.raises(RuntimeError) as error: + activation.validate_devices({'type': 'bluestore', 'block': {}}) + assert 'Unable to activate bluestore OSD due to missing devices' in str(error.value) + + def test_bluestore_block_device_found(self, capsys): + activation = activate.Activate([]) + with pytest.raises(RuntimeError): + activation.validate_devices({'block': {}}) + stdout, stderr = capsys.readouterr() + assert "devices found: ['block']" in stderr diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py new file mode 100644 index 00000000..11849362 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py @@ -0,0 +1,68 @@ +import os +import pytest +from ceph_volume.devices.simple import scan + + +class TestGetContents(object): + + def test_multiple_lines_are_left_as_is(self, tmpfile): + magic_file = tmpfile(contents='first\nsecond\n') + scanner = scan.Scan([]) + assert scanner.get_contents(magic_file) == 'first\nsecond\n' + + def test_extra_whitespace_gets_removed(self, tmpfile): + magic_file = tmpfile(contents='first ') + scanner = scan.Scan([]) + assert scanner.get_contents(magic_file) == 'first' + + def test_single_newline_values_are_trimmed(self, tmpfile): + magic_file = tmpfile(contents='first\n') + scanner = scan.Scan([]) + assert scanner.get_contents(magic_file) == 'first' + + +class TestEtcPath(object): + + def test_directory_is_valid(self, tmpdir): + path = str(tmpdir) + scanner = scan.Scan([]) + scanner._etc_path = path + assert scanner.etc_path == path + + def test_directory_does_not_exist_gets_created(self, tmpdir): + path = os.path.join(str(tmpdir), 'subdir') + scanner = scan.Scan([]) + scanner._etc_path = path + assert scanner.etc_path == path + assert os.path.isdir(path) + + def test_complains_when_file(self, tmpfile): + path = tmpfile() + scanner = scan.Scan([]) + scanner._etc_path = path + with pytest.raises(RuntimeError): + scanner.etc_path + + +class TestParseKeyring(object): + + def test_newlines_are_removed(self): + contents = [ + '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]', + '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', ''] + assert '\n' not in scan.parse_keyring('\n'.join(contents)) + + def test_key_has_spaces_removed(self): + contents = [ + '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]', + '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', ''] + result = scan.parse_keyring('\n'.join(contents)) + assert result.startswith(' ') is False + assert result.endswith(' ') is False + + def test_actual_key_is_extracted(self): + contents = [ + '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]', + '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', ''] + result = scan.parse_keyring('\n'.join(contents)) + assert result == 'AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==' diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py new file mode 100644 index 00000000..d3220f2b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py @@ -0,0 +1,45 @@ +import pytest +from ceph_volume import exceptions +from ceph_volume.devices.simple import trigger + + +class TestParseOSDid(object): + + def test_no_id_found_if_no_digit(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa') + + def test_no_id_found(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_id('ljahsdfaslkjhdfa') + + def test_id_found(self): + result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa') + assert result == '1' + + +class TestParseOSDUUID(object): + + def test_uuid_is_parsed(self): + result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf') + assert result == 'asdf-ljkh-asdf-ljkh-asdf' + + def test_uuid_is_parsed_longer_sha1(self): + result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf') + assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf' + + def test_uuid_is_not_found(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_uuid('ljahsdfaslkjhdfa') + + def test_uuid_is_not_found_missing_id(self): + with pytest.raises(exceptions.SuffixParsingError): + trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo') + + def test_robust_double_id_in_uuid(self): + # it is possible to have the id in the SHA1, this should + # be fine parsing that + result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed") + assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed' + + diff --git a/src/ceph-volume/ceph_volume/tests/devices/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py new file mode 100644 index 00000000..42c4940f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py @@ -0,0 +1,28 @@ +import pytest +from ceph_volume.devices import lvm + + +class TestZap(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + lvm.zap.Zap([]).main() + stdout, stderr = capsys.readouterr() + assert 'Zaps the given logical volume(s), raw device(s) or partition(s)' in stdout + + def test_main_shows_full_help(self, capsys): + with pytest.raises(SystemExit): + lvm.zap.Zap(argv=['--help']).main() + stdout, stderr = capsys.readouterr() + assert 'optional arguments' in stdout + + @pytest.mark.parametrize('device_name', [ + '/dev/mapper/foo', + '/dev/dm-0', + ]) + def test_can_not_zap_mapper_device(self, monkeypatch, device_info, capsys, is_root, device_name): + monkeypatch.setattr('os.path.exists', lambda x: True) + device_info() + with pytest.raises(SystemExit): + lvm.zap.Zap(argv=[device_name]).main() + stdout, stderr = capsys.readouterr() + assert 'Refusing to zap' in stderr diff --git a/src/ceph-volume/ceph_volume/tests/functional/.gitignore b/src/ceph-volume/ceph_volume/tests/functional/.gitignore new file mode 100644 index 00000000..a2ee2e58 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/.gitignore @@ -0,0 +1,5 @@ +*.vdi +.vagrant/ +vagrant_ssh_config +fetch/ +global_vagrant_variables.yml diff --git a/src/ceph-volume/ceph_volume/tests/functional/README.md b/src/ceph-volume/ceph_volume/tests/functional/README.md new file mode 100644 index 00000000..b9e892ac --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/README.md @@ -0,0 +1,24 @@ +# ceph-volume functional test suite + +This test suite is based on vagrant and is normally run via Jenkins on github +PRs. With a functioning Vagrant installation these test can also be run locally +(tested with vagrant's libvirt provider). + +## Vagrant with libvirt +By default the tests make assumption on the network segments to use (public and +cluster network), as well as the libvirt storage pool and uri. In an unused +vagrant setup these defaults should be fine. +If you prefer to explicitly configure the storage pool and libvirt +uri, create a file +`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/global_vagrant_variables.yml` +with content as follows: +``` yaml +libvirt_uri: qemu:///system +libvirt_storage_pool: 'vagrant-ceph-nvme' +``` +Adjust the values as needed. + +After this descend into a test directory (e.g. +`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm` and run `tox -vre +centos7-bluestore-create -- --provider=libvirt` to execute the tests in +`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/` diff --git a/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile new file mode 100644 index 00000000..9341698e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile @@ -0,0 +1,429 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require 'yaml' +require 'time' +VAGRANTFILE_API_VERSION = '2' + +DEBUG = false + +global_settings = {} +if File.symlink?(__FILE__) + global_config = File.expand_path( + File.join( + File.dirname(File.readlink(__FILE__)), + 'global_vagrant_variables.yml') + ) + if File.exist?(global_config) + global_settings = YAML.load_file(global_config) + end +end + +LIBVIRT_URI = global_settings.fetch('libvirt_uri', '') +LIBVIRT_STORAGE_POOL = global_settings.fetch('libvirt_storage_pool', '') + +config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml')) +settings=YAML.load_file(config_file) + +LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : "" +NMONS = settings['mon_vms'] +NOSDS = settings['osd_vms'] +NMDSS = settings['mds_vms'] +NRGWS = settings['rgw_vms'] +NNFSS = settings['nfs_vms'] +RESTAPI = settings['restapi'] +NRBD_MIRRORS = settings['rbd_mirror_vms'] +CLIENTS = settings['client_vms'] +NISCSI_GWS = settings['iscsi_gw_vms'] +PUBLIC_SUBNET = settings['public_subnet'] +CLUSTER_SUBNET = settings['cluster_subnet'] +BOX = settings['vagrant_box'] +CLIENT_BOX = settings['client_vagrant_box'] +BOX_URL = settings['vagrant_box_url'] +SYNC_DIR = settings['vagrant_sync_dir'] +MEMORY = settings['memory'] +ETH = settings['eth'] +USER = settings['ssh_username'] + +ASSIGN_STATIC_IP = settings.fetch('assign_static_ip', true) +DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false) +DISK_UUID = Time.now.utc.to_i + +def create_vmdk(name, size) + dir = Pathname.new(__FILE__).expand_path.dirname + path = File.join(dir, '.vagrant', name + '.vmdk') + `vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \ + 2>&1 > /dev/null` unless File.exist?(path) +end + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048 + config.ssh.private_key_path = settings['ssh_private_key_path'] + config.ssh.username = USER + + config.vm.provider :libvirt do |lv| + # When using libvirt, avoid errors like: + # "CPU feature cmt not found" + lv.cpu_mode = 'host-passthrough' + # set libvirt uri if present + if not LIBVIRT_URI.empty? + lv.uri = LIBVIRT_URI + end + # set libvirt storage pool if present + if not LIBVIRT_STORAGE_POOL.empty? + lv.storage_pool_name = LIBVIRT_STORAGE_POOL + end + end + + # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox + if DISABLE_SYNCED_FOLDER + config.vm.provider :virtualbox do |v,override| + override.vm.synced_folder '.', SYNC_DIR, disabled: true + end + config.vm.provider :libvirt do |v,override| + override.vm.synced_folder '.', SYNC_DIR, disabled: true + end + end + + (0..CLIENTS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}client#{i}" do |client| + client.vm.box = CLIENT_BOX + client.vm.hostname = "#{LABEL_PREFIX}client#{i}" + if ASSIGN_STATIC_IP + client.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.4#{i}" + end + # Virtualbox + client.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + client.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + client.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + + # Parallels + client.vm.provider "parallels" do |prl| + prl.name = "client#{i}" + prl.memory = "#{MEMORY}" + end + + client.vm.provider :linode do |provider| + provider.label = client.vm.hostname + end + end + end + + (0..NRGWS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw| + rgw.vm.box = BOX + rgw.vm.box_url = BOX_URL + rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}" + if ASSIGN_STATIC_IP + rgw.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.5#{i}" + end + + # Virtualbox + rgw.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + rgw.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + rgw.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + + # Parallels + rgw.vm.provider "parallels" do |prl| + prl.name = "rgw#{i}" + prl.memory = "#{MEMORY}" + end + + rgw.vm.provider :linode do |provider| + provider.label = rgw.vm.hostname + end + end + end + + (0..NNFSS - 1).each do |i| + config.vm.define "nfs#{i}" do |nfs| + nfs.vm.box = BOX + nfs.vm.box_url = BOX_URL + nfs.vm.hostname = "nfs#{i}" + if ASSIGN_STATIC_IP + nfs.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.6#{i}" + end + + # Virtualbox + nfs.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + nfs.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + nfs.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + + # Parallels + nfs.vm.provider "parallels" do |prl| + prl.name = "nfs#{i}" + prl.memory = "#{MEMORY}" + end + + nfs.vm.provider :linode do |provider| + provider.label = nfs.vm.hostname + end + end + end + + (0..NMDSS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds| + mds.vm.box = BOX + mds.vm.box_url = BOX_URL + mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}" + if ASSIGN_STATIC_IP + mds.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.7#{i}" + end + # Virtualbox + mds.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + mds.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + mds.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + # Parallels + mds.vm.provider "parallels" do |prl| + prl.name = "mds#{i}" + prl.memory = "#{MEMORY}" + end + + mds.vm.provider :linode do |provider| + provider.label = mds.vm.hostname + end + end + end + + (0..NRBD_MIRRORS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror| + rbd_mirror.vm.box = BOX + rbd_mirror.vm.box_url = BOX_URL + rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}" + if ASSIGN_STATIC_IP + rbd_mirror.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.8#{i}" + end + # Virtualbox + rbd_mirror.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + rbd_mirror.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + rbd_mirror.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + # Parallels + rbd_mirror.vm.provider "parallels" do |prl| + prl.name = "rbd-mirror#{i}" + prl.memory = "#{MEMORY}" + end + + rbd_mirror.vm.provider :linode do |provider| + provider.label = rbd_mirror.vm.hostname + end + end + end + + (0..NISCSI_GWS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw| + iscsi_gw.vm.box = BOX + iscsi_gw.vm.box_url = BOX_URL + iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}" + if ASSIGN_STATIC_IP + iscsi_gw.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.9#{i}" + end + # Virtualbox + iscsi_gw.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + iscsi_gw.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + iscsi_gw.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + # Parallels + iscsi_gw.vm.provider "parallels" do |prl| + prl.name = "iscsi-gw#{i}" + prl.memory = "#{MEMORY}" + end + + iscsi_gw.vm.provider :linode do |provider| + provider.label = iscsi_gw.vm.hostname + end + end + end + + (0..NMONS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon| + mon.vm.box = BOX + mon.vm.box_url = BOX_URL + mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}" + if ASSIGN_STATIC_IP + mon.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.1#{i}" + end + # Virtualbox + mon.vm.provider :virtualbox do |vb| + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + mon.vm.provider :vmware_fusion do |v| + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + mon.vm.provider :libvirt do |lv| + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + + # Parallels + mon.vm.provider "parallels" do |prl| + prl.name = "mon#{i}" + prl.memory = "#{MEMORY}" + end + + mon.vm.provider :linode do |provider| + provider.label = mon.vm.hostname + end + end + end + + (0..NOSDS - 1).each do |i| + config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd| + osd.vm.box = BOX + osd.vm.box_url = BOX_URL + osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}" + if ASSIGN_STATIC_IP + osd.vm.network :private_network, + ip: "#{PUBLIC_SUBNET}.10#{i}" + osd.vm.network :private_network, + ip: "#{CLUSTER_SUBNET}.20#{i}" + end + # Virtualbox + osd.vm.provider :virtualbox do |vb| + # Create our own controller for consistency and to remove VM dependency + # but only do it once, otherwise it would fail when rebooting machines. + # We assume this has run if one disk was created before + unless File.exist?("disk-#{i}-0.vdi") + vb.customize ['storagectl', :id, + '--name', 'OSD Controller', + '--add', 'scsi'] + end + (0..2).each do |d| + vb.customize ['createhd', + '--filename', "disk-#{i}-#{d}", + '--size', '12000'] unless File.exist?("disk-#{i}-#{d}.vdi") + vb.customize ['storageattach', :id, + '--storagectl', 'OSD Controller', + '--port', 3 + d, + '--device', 0, + '--type', 'hdd', + '--medium', "disk-#{i}-#{d}.vdi"] + end + vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"] + end + + # VMware + osd.vm.provider :vmware_fusion do |v| + (0..1).each do |d| + v.vmx["scsi0:#{d + 1}.present"] = 'TRUE' + v.vmx["scsi0:#{d + 1}.fileName"] = + create_vmdk("disk-#{i}-#{d}", '11000MB') + end + v.vmx['memsize'] = "#{MEMORY}" + end + + # Libvirt + driverletters = ('a'..'z').to_a + osd.vm.provider :libvirt do |lv| + # always make /dev/sd{a/b/c/d} so that CI can ensure that + # virtualbox and libvirt will have the same devices to use for OSDs + (0..3).each do |d| + lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide" + end + lv.memory = MEMORY + lv.random_hostname = true + lv.nic_model_type = "e1000" + end + + # Parallels + osd.vm.provider "parallels" do |prl| + prl.name = "osd#{i}" + prl.memory = "#{MEMORY}" + (0..1).each do |d| + prl.customize ["set", :id, + "--device-add", + "hdd", + "--iface", + "sata"] + end + end + + osd.vm.provider :linode do |provider| + provider.label = osd.vm.hostname + end + + end + end +end diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all new file mode 100644 index 00000000..ae65f9b5 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all @@ -0,0 +1,33 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +dmcrypt: true +num_osds: 2 +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml new file mode 120000 index 00000000..66d44c72 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_explicit.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all new file mode 100644 index 00000000..ae65f9b5 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all @@ -0,0 +1,33 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +dmcrypt: true +num_osds: 2 +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all new file mode 100644 index 00000000..c2e356fe --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all @@ -0,0 +1,34 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +num_osds: 2 +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 +# 9GB in bytes +block_db_size: 9663676416 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml new file mode 120000 index 00000000..66d44c72 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_explicit.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all new file mode 100644 index 00000000..c2e356fe --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all @@ -0,0 +1,34 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +num_osds: 2 +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 +# 9GB in bytes +block_db_size: 9663676416 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all new file mode 100644 index 00000000..92ca5bce --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all @@ -0,0 +1,30 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +dmcrypt: true +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all new file mode 100644 index 00000000..f71c89ef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all new file mode 100644 index 00000000..006e9b85 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all @@ -0,0 +1,33 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +dmcrypt: true +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +num_osds: 2 +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml new file mode 120000 index 00000000..66d44c72 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_explicit.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all new file mode 100644 index 00000000..006e9b85 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all @@ -0,0 +1,33 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +dmcrypt: true +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +num_osds: 2 +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all new file mode 100644 index 00000000..0b287c58 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +num_osds: 2 +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml new file mode 120000 index 00000000..66d44c72 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_explicit.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all new file mode 100644 index 00000000..0b287c58 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +num_osds: 2 +devices: + - /dev/sdb + - /dev/sdc + - /dev/nvme0n1 + - /dev/nvme1n1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml new file mode 120000 index 00000000..8cf11d4e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_mixed_type.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all new file mode 100644 index 00000000..719321cb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +dmcrypt: true +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_journal_size: 2048 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all new file mode 100644 index 00000000..8cf7a0c9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml new file mode 100644 index 00000000..5922ecf2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml @@ -0,0 +1,12 @@ +--- + +# Allows to always include a 'setup.yml' file in functional tests, and execute +# only on the ones that actually need it + +- hosts: all + gather_facts: no + + tasks: + + - debug: + msg: "This is an empty setup playbook. The current scenario didn't need any work done" diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml new file mode 100644 index 00000000..5ed9fdef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml @@ -0,0 +1,188 @@ + +- hosts: osds + become: yes + tasks: + + - name: install lvm2 + package: + name: lvm2 + state: present + - name: tell lvm to globally ignore loop devices + lineinfile: + path: /etc/lvm/lvm.conf + line: ' global_filter = [ "r|loop|", "a|.*|" ]' + insertafter: '^devices {' + - name: tell lvm to ignore loop devices + lineinfile: + path: /etc/lvm/lvm.conf + line: ' filter = [ "r|loop|", "a|.*|" ]' + insertafter: '^devices {' + - name: rescan pvs + command: "pvscan --cache" + - name: create the nvme image systemd unit + copy: + content: | + [Unit] + Description=NVMe loop device + After=local-fs.target + Wants=local-fs.target + + [Service] + Type=simple + ExecStart=/bin/bash /opt/ceph-nvme.sh + StandardOutput=journal + StandardError=journal + + [Install] + WantedBy=multi-user.target + dest: "/etc/systemd/system/ceph-nvme.service" + + - name: create the ceph-nvme startup script + copy: + content: | + set -x + set -e + modprobe nvmet + modprobe nvme_loop + modprobe nvme_fabrics + modprobe loop + losetup -v /dev/loop0 /opt/loop0_nvme0 + losetup -v /dev/loop1 /opt/loop1_nvme1 + losetup -l + nvmetcli restore /opt/loop.json + nvme connect -t loop -n testnqn1 -q hostnqn + nvme connect -t loop -n testnqn2 -q hostnqn + nvme list + dest: "/opt/ceph-nvme.sh" + + - name: ensure ceph-nvme is enabled + service: + name: ceph-nvme + state: stopped + enabled: yes + + - name: install nvme dependencies + package: + name: "{{ item }}" + state: present + with_items: + - nvme-cli + - nvmetcli + + - name: enable NVME kernel modules + modprobe: + name: "{{ item }}" + state: present + with_items: + - nvmet + - nvme_loop + - nvme_fabrics + + - name: detach nvme files from loop devices + command: "losetup -d /dev/{{ item }}" + failed_when: false + loop: + - loop0 + - loop1 + + - name: remove previous nvme files + file: + path: "{{ item }}" + state: absent + loop: + - /opt/loop0_nvme0 + - /opt/loop1_nvme1 + + - name: create 11GB sparse files for NVMe + command: "fallocate -l 11G {{ item }}" + loop: + - /opt/loop0_nvme0 + - /opt/loop1_nvme1 + + - name: setup loop devices with sparse files + command: "losetup /dev/loop{{ item }} /opt/loop{{ item }}_nvme{{ item }}" + failed_when: false + loop: + - 0 + - 1 + + - name: create the loop.json file for nvmetcli + copy: + content: | + { + "hosts": [ + { + "nqn": "hostnqn" + } + ], + "ports": [ + { + "addr": { + "adrfam": "", + "traddr": "", + "treq": "not specified", + "trsvcid": "", + "trtype": "loop" + }, + "portid": 1, + "referrals": [], + "subsystems": [ + "testnqn1", + "testnqn2" + ] + } + ], + "subsystems": [ + { + "allowed_hosts": [ + "hostnqn" + ], + "attr": { + "allow_any_host": "0" + }, + "namespaces": [ + { + "device": { + "nguid": "ef90689c-6c46-d44c-89c1-4067801309a8", + "path": "/dev/loop0" + }, + "enable": 1, + "nsid": 1 + } + ], + "nqn": "testnqn1" + }, + { + "allowed_hosts": [ + "hostnqn" + ], + "attr": { + "allow_any_host": "0" + }, + "namespaces": [ + { + "device": { + "nguid": "ef90689c-6c46-d44c-89c1-4067801309a7", + "path": "/dev/loop1" + }, + "enable": 1, + "nsid": 2 + } + ], + "nqn": "testnqn2" + } + ] + } + dest: "/opt/loop.json" + + - name: setup the /dev/loop0 target with nvmetcli + command: nvmetcli restore /opt/loop.json + + - name: connect the new target as an nvme device + command: "nvme connect -t loop -n testnqn{{ item }} -q hostnqn" + loop: + - 1 + - 2 + + - name: debug output for nvme list + command: nvme list diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml new file mode 100644 index 00000000..7c6c3059 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml @@ -0,0 +1,72 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd daemons + service: + name: "ceph-osd@{{ item }}" + state: stopped + with_items: "{{ osd_ids }}" + + +- hosts: mons + become: yes + tasks: + + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" + with_items: "{{ osd_ids }}" + - name: purge osds + command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it" + with_items: "{{ osd_ids }}" + + +- hosts: osds + become: yes + tasks: + + - name: rescan pvs + command: "pvscan --cache" + - name: rescan vgs + command: "vgscan" + - name: list lvs + command: "lvs" + - name: zap devices used for OSDs + command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy" + with_items: "{{ devices }}" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create devices again + command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: ensure batch create is idempotent + command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}" + register: batch_cmd + failed_when: false + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: check batch idempotency + fail: + msg: "lvm batch failed idempotency check" + when: + - batch_cmd.rc != 0 + - "'strategy changed' not in batch_cmd.stderr" + + - name: run batch --report to see if devices get filtered + command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}" + register: report_cmd + failed_when: false + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: check batch --report idempotency + fail: + msg: "lvm batch --report failed idempotency check" + when: + - report_cmd.rc != 0 + - "'strategy changed' not in report_cmd.stderr" diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml new file mode 100644 index 00000000..1ff0acc9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml @@ -0,0 +1,64 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd daemons + service: + name: "ceph-osd@{{ item }}" + state: stopped + with_items: "{{ osd_ids }}" + +- hosts: mons + become: yes + tasks: + + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" + with_items: "{{ osd_ids }}" + - name: purge osds + command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it" + with_items: "{{ osd_ids }}" + +- hosts: osds + become: yes + vars: + external_devices: "{{ '--db-devices' if osd_objectstore == 'bluestore' else '--journal-devices' }}" + tasks: + + - name: zap devices used for OSDs + command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy" + with_items: "{{ devices }}" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create devices again + command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: ensure batch create is idempotent when all data devices are filtered + command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}" + register: batch_cmd + failed_when: false + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: check batch idempotency + fail: + msg: "lvm batch failed idempotency check" + when: + - batch_cmd.rc != 0 + + - name: run batch --report to see if devices get filtered + command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}" + register: report_cmd + failed_when: false + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: check batch --report idempotency + fail: + msg: "lvm batch --report failed idempotency check" + when: + - report_cmd.rc != 0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml new file mode 100644 index 00000000..9d63df9e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml @@ -0,0 +1,34 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd daemons + service: + name: "ceph-osd@{{ item }}" + state: stopped + with_items: "{{ osd_ids }}" + + +- hosts: mons + become: yes + tasks: + + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" + with_items: "{{ osd_ids }}" + - name: purge osds + command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it" + with_items: "{{ osd_ids }}" + + +- hosts: osds + become: yes + tasks: + + - name: zap devices used for OSDs + command: "ceph-volume --cluster {{ cluster }} lvm zap --osd-id {{ item }} --destroy" + with_items: "{{ osd_ids }}" + environment: + CEPH_VOLUME_DEBUG: 1 + diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini new file mode 100644 index 00000000..017853a2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini @@ -0,0 +1,78 @@ +[tox] +envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos7-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit} +skipsdist = True + +[testenv] +deps = mock +whitelist_externals = + vagrant + bash + git + cp +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_STDOUT_CALLBACK = debug + ANSIBLE_RETRY_FILES_ENABLED = False + ANSIBLE_SSH_RETRIES = 5 + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 + DEBIAN_FRONTEND=noninteractive +changedir= + centos7-filestore-single_type: {toxinidir}/centos7/filestore/single-type + centos7-filestore-single_type_dmcrypt: {toxinidir}/centos7/filestore/single-type-dmcrypt + centos7-filestore-mixed_type: {toxinidir}/centos7/filestore/mixed-type + centos7-filestore-mixed_type_dmcrypt: {toxinidir}/centos7/filestore/mixed-type-dmcrypt + centos7-filestore-mixed_type_explicit: {toxinidir}/centos7/filestore/mixed-type-explicit + centos7-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/filestore/mixed-type-dmcrypt-explicit + centos7-bluestore-single_type: {toxinidir}/centos7/bluestore/single-type + centos7-bluestore-single_type_dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt + centos7-bluestore-mixed_type: {toxinidir}/centos7/bluestore/mixed-type + centos7-bluestore-mixed_type_dmcrypt: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt + centos7-bluestore-mixed_type_explicit: {toxinidir}/centos7/bluestore/mixed-type-explicit + centos7-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt-explicit + xenial-filestore-single_type: {toxinidir}/xenial/filestore/single-type + xenial-filestore-single_type_dmcrypt: {toxinidir}/xenial/filestore/single-type-dmcrypt + xenial-bluestore-single_type: {toxinidir}/xenial/bluestore/single-type + xenial-bluestore-single_type_dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + + bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible + + # individual scenario setup + ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state using testinfra + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # reboot all vms - attempt + bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + + # after a reboot, osds may take about 20 seconds to come back up + sleep 30 + + # retest to ensure cluster came back up correctly after rebooting + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # destroy an OSD, zap it's device and recreate it using it's ID + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # retest to ensure cluster came back up correctly + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # test zap OSDs by ID + ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml + + vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Store b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Store Binary files differnew file mode 100644 index 00000000..5008ddfc --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Store diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all new file mode 100644 index 00000000..59151426 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all @@ -0,0 +1,30 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all new file mode 100644 index 00000000..f71c89ef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all new file mode 100644 index 00000000..a4eafa10 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all @@ -0,0 +1,32 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_journal_size: 2048 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all new file mode 100644 index 00000000..8cf7a0c9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +devices: + - /dev/sdb + - /dev/sdc +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + +# The following is only needed for testing purposes and is not part of +# ceph-ansible supported variables + +osd_ids: + - 0 + - 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml new file mode 120000 index 00000000..30874dfb --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml @@ -0,0 +1 @@ +../../../playbooks/noop.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml new file mode 120000 index 00000000..aa867bcd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml @@ -0,0 +1 @@ +../../../playbooks/test.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml new file mode 120000 index 00000000..cb969fa1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml @@ -0,0 +1 @@ +../../../playbooks/test_zap.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all new file mode 100644 index 00000000..01ae1dae --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml new file mode 120000 index 00000000..165d9da2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_bluestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..9d4f50de --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all @@ -0,0 +1,30 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml new file mode 100644 index 00000000..bbd5b45d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml @@ -0,0 +1,104 @@ +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: redeploy osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all new file mode 100644 index 00000000..5af1b7ac --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml new file mode 120000 index 00000000..1a8c37c1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_filestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..7544678b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all @@ -0,0 +1,33 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml new file mode 100644 index 00000000..91c9a1b8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml @@ -0,0 +1,108 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: re-create partition /dev/sdc1 + parted: + device: /dev/sdc + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + state: present + label: gpt + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml new file mode 100644 index 00000000..37a48949 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml @@ -0,0 +1,27 @@ +--- + +- hosts: osds + gather_facts: false + become: yes + tasks: + + - name: partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml new file mode 100644 index 00000000..1e9b8c3e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml @@ -0,0 +1,148 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 device (zap without --destroy that removes the LV) + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD directories + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_directories + + - name: find all OSD symlinks + find: + paths: /var/lib/ceph/osd + recurse: yes + depth: 2 + file_type: link + register: osd_symlinks + + # set the OSD dir and the block/block.db links to root:root permissions, to + # ensure that the OSD will be able to activate regardless + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_directories.files }}" + + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_symlinks.files }}" + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml new file mode 100644 index 00000000..4e43839e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml @@ -0,0 +1,169 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.2 journal + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 data lv + # note: we don't use --destroy here to test this works without that flag. + # --destroy is used in the bluestore tests + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 journal device (zap without --destroy that removes the LV) + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD paths + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_paths + + # set all OSD paths to root:rootto ensure that the OSD will be able to + # activate regardless + - name: mangle permissions to root + file: + path: "{{ item.path }}" + owner: root + group: root + recurse: yes + with_items: + - "{{ osd_paths.files }}" + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini new file mode 100644 index 00000000..0b38c85b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -0,0 +1,79 @@ +[tox] +envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt} +skipsdist = True + +[testenv] +deps = mock +whitelist_externals = + vagrant + bash + git + cp + sleep +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_STDOUT_CALLBACK = debug + ANSIBLE_RETRY_FILES_ENABLED = False + ANSIBLE_SSH_RETRIES = 5 + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 + DEBIAN_FRONTEND=noninteractive +changedir= + # plain/unencrypted + centos7-filestore-create: {toxinidir}/centos7/filestore/create + centos7-bluestore-create: {toxinidir}/centos7/bluestore/create + xenial-filestore-create: {toxinidir}/xenial/filestore/create + xenial-bluestore-create: {toxinidir}/xenial/bluestore/create + # dmcrypt + centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt + centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt + xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt + xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt + # TODO: these are placeholders for now, eventually we want to + # test the prepare/activate workflow of ceph-volume as well + xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate + xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate + centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate + centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + + bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + # create logical volumes to test with on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml + + # ad-hoc/local test setup for lvm + ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml + + cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state using testinfra + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # reboot all vms - attempt + bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + + # after a reboot, osds may take about 20 seconds to come back up + sleep 30 + + # retest to ensure cluster came back up correctly after rebooting + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # destroy an OSD, zap it's device and recreate it using it's ID + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # retest to ensure cluster came back up correctly + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all new file mode 100644 index 00000000..01ae1dae --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml new file mode 120000 index 00000000..165d9da2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_bluestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..9d4f50de --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all @@ -0,0 +1,30 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml new file mode 100644 index 00000000..27290d93 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml @@ -0,0 +1,104 @@ +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: redeploy osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all new file mode 100644 index 00000000..5af1b7ac --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml new file mode 120000 index 00000000..1a8c37c1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_filestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml new file mode 100644 index 00000000..82b330ef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml @@ -0,0 +1,54 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..7544678b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all @@ -0,0 +1,33 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml new file mode 100644 index 00000000..91c9a1b8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml @@ -0,0 +1,108 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: re-create partition /dev/sdc1 + parted: + device: /dev/sdc + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + state: present + label: gpt + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..82b330ef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,54 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml new file mode 100644 index 00000000..0b65a172 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml @@ -0,0 +1,153 @@ +--- +# Defines deployment design and assigns role to server groups + +- hosts: + - mons + - osds + - mgrs + + gather_facts: false + any_errors_fatal: true + become: true + + tags: + - always + + vars: + delegate_facts_host: True + dashboard_enabled: False + + environment: + DEBIAN_FRONTEND: noninteractive + + pre_tasks: + # If we can't get python2 installed before any module is used we will fail + # so just try what we can to get it installed + - name: check for python2 + stat: + path: /usr/bin/python + ignore_errors: yes + register: systempython2 + + - name: install python2 for debian based systems + raw: sudo apt-get -y install python-simplejson + ignore_errors: yes + when: + - systempython2.stat is undefined or systempython2.stat.exists == false + + # Ansible will try to auto-install python-apt, in some systems this might be + # python3-apt, or python-apt, and it has caused whole runs to fail because + # it is trying to do an interactive prompt + - name: install python-apt and aptitude in debian based systems + raw: sudo apt-get -y install "{{ item }}" + ignore_errors: yes + with_items: + - python3-apt + - python-apt + - aptitude + + - name: install python2 for fedora + raw: sudo dnf -y install python creates=/usr/bin/python + ignore_errors: yes + when: + - systempython2.stat is undefined or systempython2.stat.exists == false + + - name: install python2 for opensuse + raw: sudo zypper -n install python-base creates=/usr/bin/python2.7 + ignore_errors: yes + when: + - systempython2.stat is undefined or systempython2.stat.exists == false + + - name: gather facts + setup: + when: + - not delegate_facts_host | bool + + - name: gather and delegate facts + setup: + delegate_to: "{{ item }}" + delegate_facts: True + with_items: "{{ groups['all'] }}" + run_once: true + when: + - delegate_facts_host | bool + + - name: install required packages for fedora > 23 + raw: sudo dnf -y install python2-dnf libselinux-python ntp + when: + - ansible_distribution == 'Fedora' + - ansible_distribution_major_version|int >= 23 + + - name: check if it is atomic host + stat: + path: /run/ostree-booted + register: stat_ostree + + - name: set_fact is_atomic + set_fact: + is_atomic: '{{ stat_ostree.stat.exists }}' + + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-validate + +- hosts: + - mons + - osds + - mgrs + gather_facts: false + become: True + any_errors_fatal: true + vars: + dashboard_enabled: False + tasks: + - import_role: + name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: + name: ceph-handler + - import_role: + name: ceph-common + + - name: rsync ceph-volume to test nodes on centos + synchronize: + src: "{{ toxinidir }}/../../../../ceph_volume" + dest: "/usr/lib/python2.7/site-packages" + use_ssh_args: true + when: + - ansible_os_family == "RedHat" + - inventory_hostname in groups.get(osd_group_name, []) + + - name: rsync ceph-volume to test nodes on ubuntu + synchronize: + src: "{{ toxinidir }}/../../../../ceph_volume" + dest: "/usr/lib/python2.7/dist-packages" + use_ssh_args: true + when: + - ansible_os_family == "Debian" + - inventory_hostname in groups.get(osd_group_name, []) + + - name: run ceph-config role + import_role: + name: ceph-config + + - name: run ceph-mon role + import_role: + name: ceph-mon + when: + - inventory_hostname in groups.get(mon_group_name, []) + + - name: run ceph-mgr role + import_role: + name: ceph-mgr + when: + - inventory_hostname in groups.get(mgr_group_name, []) + + - name: run ceph-osd role + import_role: + name: ceph-osd + when: + - inventory_hostname in groups.get(osd_group_name, []) diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh new file mode 100644 index 00000000..43e64a65 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Generate a custom ssh config from Vagrant so that it can then be used by +# ansible.cfg + +path=$1 + +if [ $# -eq 0 ] + then + echo "A path to the scenario is required as an argument and it wasn't provided" + exit 1 +fi + +cd "$path" +vagrant ssh-config > vagrant_ssh_config diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py b/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py new file mode 100644 index 00000000..16071944 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py @@ -0,0 +1,5 @@ +import os +from ceph_volume import terminal + +char = os.environ.get('INVALID') +terminal.stdout(char) diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh new file mode 100644 index 00000000..e4ba4f0a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Not entirely sure why these executables don't seem to be available in the +# $PATH when running from tox. Calling out to `which` seems to fix it, at the +# expense of making the script a bit obtuse + +mktemp=$(which mktemp) +cat=$(which cat) +grep=$(which grep) +PYTHON_EXECUTABLE=`which python` +STDERR_FILE=$($mktemp) +INVALID="→" + +echo "stderr file created: $STDERR_FILE" + +INVALID="$INVALID" $PYTHON_EXECUTABLE $1 2> ${STDERR_FILE} + +retVal=$? + +if [ $retVal -ne 0 ]; then + echo "Failed test: Unexpected failure from running Python script" + echo "Below is output of stderr captured:" + $cat "${STDERR_FILE}" + exit $retVal +fi + +$grep --quiet "$INVALID" ${STDERR_FILE} + +retVal=$? +if [ $retVal -ne 0 ]; then + echo "Failed test: expected to find \"${INVALID}\" character in tmpfile: \"${STDERR_FILE}\"" + echo "Below is output of stderr captured:" + $cat "${STDERR_FILE}" +fi +exit $retVal diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh new file mode 100644 index 00000000..3211b066 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# vagrant-libvirt has a common behavior where it times out when "reloading" vms. Instead +# of calling `vagrant reload` attempt to halt everything, and then start everything, which gives +# this script the ability to try the `vagrant up` again in case of failure +# + +vagrant halt +# This should not really be needed, but in case of a possible race condition between halt +# and up, it might improve things +sleep 5 + + +retries=0 +until [ $retries -ge 5 ] +do + echo "Attempting to start VMs. Attempts: $retries" + timeout 10m vagrant up "$@" && break + retries=$[$retries+1] + sleep 5 +done diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh new file mode 100644 index 00000000..2f9a15f8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +retries=0 +until [ $retries -ge 5 ] +do + echo "Attempting to start VMs. Attempts: $retries" + timeout 10m vagrant up "$@" && break + retries=$[$retries+1] + sleep 5 +done + +sleep 10 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all new file mode 100644 index 00000000..c265e783 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all @@ -0,0 +1,19 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml new file mode 100644 index 00000000..63700c3c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all new file mode 100644 index 00000000..885c2c82 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: luks diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml new file mode 100644 index 00000000..55ae7cc8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml @@ -0,0 +1,15 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: scan all running OSDs + command: "ceph-volume --cluster={{ cluster }} simple scan" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml new file mode 100644 index 00000000..63700c3c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all new file mode 100644 index 00000000..30bcf5be --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: plain diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml new file mode 100644 index 00000000..63700c3c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all new file mode 100644 index 00000000..7ab573b0 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all @@ -0,0 +1,19 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml new file mode 100644 index 00000000..0745f257 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml @@ -0,0 +1,29 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml new file mode 100644 index 00000000..63700c3c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all new file mode 100644 index 00000000..a27cfbad --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: luks diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml new file mode 100644 index 00000000..63700c3c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all new file mode 100644 index 00000000..edac61b2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: plain diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml new file mode 100644 index 00000000..63700c3c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini new file mode 100644 index 00000000..e462d3b8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -0,0 +1,66 @@ +[tox] +envlist = {centos7,xenial}-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks} +skipsdist = True + +[testenv] +deps = mock +whitelist_externals = + vagrant + bash + git + sleep + cp +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_STDOUT_CALLBACK = debug + ANSIBLE_RETRY_FILES_ENABLED = False + ANSIBLE_SSH_RETRIES = 5 + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 + DEBIAN_FRONTEND=noninteractive +changedir= + centos7-filestore-activate: {toxinidir}/centos7/filestore/activate + centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate + xenial-filestore-activate: {toxinidir}/xenial/filestore/activate + xenial-bluestore-activate: {toxinidir}/xenial/bluestore/activate + xenial-bluestore-dmcrypt_plain: {toxinidir}/xenial/bluestore/dmcrypt-plain + xenial-bluestore-dmcrypt_luks: {toxinidir}/xenial/bluestore/dmcrypt-luks + xenial-filestore-dmcrypt_plain: {toxinidir}/xenial/filestore/dmcrypt-plain + xenial-filestore-dmcrypt_luks: {toxinidir}/xenial/filestore/dmcrypt-luks + centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain + centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks + centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain + centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + + bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state testinfra + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # reboot all vms + bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + + # wait 2 minutes for services to be ready + sleep 120 + + # retest to ensure cluster came back up correctly after rebooting + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all new file mode 100644 index 00000000..c265e783 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all @@ -0,0 +1,19 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml new file mode 100644 index 00000000..b4aa759a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all new file mode 100644 index 00000000..885c2c82 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: luks diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml new file mode 100644 index 00000000..b4aa759a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all new file mode 100644 index 00000000..30bcf5be --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: plain diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml new file mode 100644 index 00000000..b4aa759a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all new file mode 100644 index 00000000..7ab573b0 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all @@ -0,0 +1,19 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml new file mode 100644 index 00000000..55ae7cc8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml @@ -0,0 +1,15 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: scan all running OSDs + command: "ceph-volume --cluster={{ cluster }} simple scan" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml new file mode 100644 index 00000000..b4aa759a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all new file mode 100644 index 00000000..a27cfbad --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: luks diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml new file mode 100644 index 00000000..b4aa759a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all new file mode 100644 index 00000000..edac61b2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all @@ -0,0 +1,22 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.1.0/24" +cluster_network: "192.168.2.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 + osd: + osd_dmcrypt_type: plain diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml new file mode 100644 index 00000000..2e1c7ee9 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml @@ -0,0 +1,7 @@ +--- + +devices: + - '/dev/sdb' +dedicated_devices: + - '/dev/sdc' +osd_scenario: "non-collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml new file mode 100644 index 00000000..7e90071c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml @@ -0,0 +1,6 @@ +--- + +devices: + - '/dev/sdb' + - '/dev/sdc' +osd_scenario: "collocated" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts new file mode 100644 index 00000000..e0c08b94 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts @@ -0,0 +1,9 @@ +[mons] +mon0 monitor_interface=eth1 + +[osds] +osd0 +osd1 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml new file mode 100644 index 00000000..24e2c035 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml @@ -0,0 +1,31 @@ +--- + +- hosts: osds + become: yes + tasks: + + - name: list all OSD directories + find: + paths: /var/lib/ceph/osd + file_type: directory + register: osd_paths + + - name: scan all OSD directories + command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_paths.files }}" + + - name: list all OSD JSON files + find: + paths: /etc/ceph/osd + file_type: file + register: osd_configs + + - name: activate all scanned OSDs + command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: + - "{{ osd_configs.files }}" diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml new file mode 100644 index 00000000..b4aa759a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml @@ -0,0 +1,73 @@ +--- + +# DEPLOY CONTAINERIZED DAEMONS +docker: false + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 2 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + + +# INSTALL SOURCE OF CEPH +# valid values are 'stable' and 'dev' +ceph_install_source: stable + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.1 +cluster_subnet: 192.168.2 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# Disks +# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" +# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" +disks: "[ '/dev/sdb', '/dev/sdc' ]" + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box + +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py new file mode 100644 index 00000000..17cc996e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py @@ -0,0 +1,103 @@ +import pytest +import os + + +@pytest.fixture() +def node(host, request): + """ This fixture represents a single node in the ceph cluster. Using the + host.ansible fixture provided by testinfra it can access all the ansible + variables provided to it by the specific test scenario being ran. + + You must include this fixture on any tests that operate on specific type + of node because it contains the logic to manage which tests a node + should run. + """ + ansible_vars = host.ansible.get_variables() + # tox/jenkins/user will pass in this environment variable. we need to do it this way + # because testinfra does not collect and provide ansible config passed in + # from using --extra-vars + ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master") + group_names = ansible_vars["group_names"] + num_osd_ports = 4 + if 'mimic' in ceph_dev_branch or 'luminous' in ceph_dev_branch: + num_osd_ports = 2 + + # capture the initial/default state + test_is_applicable = False + for marker in request.node.iter_markers(): + if marker.name in group_names or marker.name == 'all': + test_is_applicable = True + break + # Check if any markers on the test method exist in the nodes group_names. + # If they do not, this test is not valid for the node being tested. + if not test_is_applicable: + reason = "%s: Not a valid test for node type: %s" % ( + request.function, group_names) + pytest.skip(reason) + + osd_ids = [] + osds = [] + cluster_address = "" + # I can assume eth1 because I know all the vagrant + # boxes we test with use that interface + address = host.interface("eth1").addresses[0] + subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1]) + num_mons = len(ansible_vars["groups"]["mons"]) + num_osds = len(ansible_vars.get("devices", [])) + if not num_osds: + num_osds = len(ansible_vars.get("lvm_volumes", [])) + osds_per_device = ansible_vars.get("osds_per_device", 1) + num_osds = num_osds * osds_per_device + + # If number of devices doesn't map to number of OSDs, allow tests to define + # that custom number, defaulting it to ``num_devices`` + num_osds = ansible_vars.get('num_osds', num_osds) + cluster_name = ansible_vars.get("cluster", "ceph") + conf_path = "/etc/ceph/{}.conf".format(cluster_name) + if "osds" in group_names: + # I can assume eth2 because I know all the vagrant + # boxes we test with use that interface. OSDs are the only + # nodes that have this interface. + cluster_address = host.interface("eth2").addresses[0] + cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"') + if cmd.rc == 0: + osd_ids = cmd.stdout.rstrip("\n").split("\n") + osds = osd_ids + + data = dict( + address=address, + subnet=subnet, + vars=ansible_vars, + osd_ids=osd_ids, + num_mons=num_mons, + num_osds=num_osds, + num_osd_ports=num_osd_ports, + cluster_name=cluster_name, + conf_path=conf_path, + cluster_address=cluster_address, + osds=osds, + ) + return data + + +def pytest_collection_modifyitems(session, config, items): + for item in items: + test_path = item.location[0] + if "mon" in test_path: + item.add_marker(pytest.mark.mons) + elif "osd" in test_path: + item.add_marker(pytest.mark.osds) + elif "mds" in test_path: + item.add_marker(pytest.mark.mdss) + elif "mgr" in test_path: + item.add_marker(pytest.mark.mgrs) + elif "rbd-mirror" in test_path: + item.add_marker(pytest.mark.rbdmirrors) + elif "rgw" in test_path: + item.add_marker(pytest.mark.rgws) + elif "nfs" in test_path: + item.add_marker(pytest.mark.nfss) + elif "iscsi" in test_path: + item.add_marker(pytest.mark.iscsigws) + else: + item.add_marker(pytest.mark.all) diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py new file mode 100644 index 00000000..6d12babd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py @@ -0,0 +1,60 @@ +import json + + +class TestOSDs(object): + + def test_ceph_osd_package_is_installed(self, node, host): + assert host.package("ceph-osd").is_installed + + def test_osds_listen_on_public_network(self, node, host): + # TODO: figure out way to paramaterize this test + nb_port = (node["num_osds"] * node["num_osd_ports"]) + assert host.check_output( + "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501 + + def test_osds_listen_on_cluster_network(self, node, host): + # TODO: figure out way to paramaterize this test + nb_port = (node["num_osds"] * node["num_osd_ports"]) + assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501 + (node["cluster_address"])) == str(nb_port) + + def test_osd_services_are_running(self, node, host): + # TODO: figure out way to paramaterize node['osds'] for this test + for osd in node["osds"]: + assert host.service("ceph-osd@%s" % osd).is_running + + def test_osd_are_mounted(self, node, host): + # TODO: figure out way to paramaterize node['osd_ids'] for this test + for osd_id in node["osd_ids"]: + osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format( + cluster=node["cluster_name"], + osd_id=osd_id, + ) + assert host.mount_point(osd_path).exists + + def test_ceph_volume_is_installed(self, node, host): + host.exists('ceph-volume') + + def test_ceph_volume_systemd_is_installed(self, node, host): + host.exists('ceph-volume-systemd') + + def _get_osd_id_from_host(self, node, osd_tree): + children = [] + for n in osd_tree['nodes']: + if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501 + children = n['children'] + return children + + def _get_nb_up_osds_from_ids(self, node, osd_tree): + nb_up = 0 + ids = self._get_osd_id_from_host(node, osd_tree) + for n in osd_tree['nodes']: + if n['id'] in ids and n['status'] == 'up': + nb_up += 1 + return nb_up + + def test_all_osds_are_up_and_in(self, node, host): + cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501 + cluster=node["cluster_name"]) + output = json.loads(host.check_output(cmd)) + assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output) diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_main.py b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py new file mode 100644 index 00000000..0af52e8d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py @@ -0,0 +1,51 @@ +import pytest +from ceph_volume import exceptions, conf +from ceph_volume.systemd.main import parse_subcommand, main, process + + +class TestParseSubcommand(object): + + def test_no_subcommand_found(self): + with pytest.raises(exceptions.SuffixParsingError): + parse_subcommand('') + + def test_sub_command_is_found(self): + result = parse_subcommand('lvm-1-sha-1-something-0') + assert result == 'lvm' + + +class Capture(object): + + def __init__(self, *a, **kw): + self.a = a + self.kw = kw + self.calls = [] + + def __call__(self, *a, **kw): + self.calls.append(a) + self.calls.append(kw) + + +class TestMain(object): + + def setup(self): + conf.log_path = '/tmp/' + + def test_no_arguments_parsing_error(self): + with pytest.raises(RuntimeError): + main(args=[]) + + def test_parsing_suffix_error(self): + with pytest.raises(exceptions.SuffixParsingError): + main(args=['asdf']) + + def test_correct_command(self, monkeypatch): + run = Capture() + monkeypatch.setattr(process, 'run', run) + main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ]) + command = run.calls[0][0] + assert command == [ + 'ceph-volume', + 'lvm', 'trigger', + '8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' + ] diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py b/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py new file mode 100644 index 00000000..8eec4a3d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py @@ -0,0 +1,21 @@ +import pytest +from ceph_volume.systemd import systemctl + +class TestSystemctl(object): + + @pytest.mark.parametrize("stdout,expected", [ + (['Id=ceph-osd@1.service', '', 'Id=ceph-osd@2.service'], ['1','2']), + (['Id=ceph-osd1.service',], []), + (['Id=ceph-osd@1'], ['1']), + ([], []), + ]) + def test_get_running_osd_ids(self, stub_call, stdout, expected): + stub_call((stdout, [], 0)) + osd_ids = systemctl.get_running_osd_ids() + assert osd_ids == expected + + def test_returns_empty_list_on_nonzero_return_code(self, stub_call): + stdout = ['Id=ceph-osd@1.service', '', 'Id=ceph-osd@2.service'] + stub_call((stdout, [], 1)) + osd_ids = systemctl.get_running_osd_ids() + assert osd_ids == [] diff --git a/src/ceph-volume/ceph_volume/tests/test_configuration.py b/src/ceph-volume/ceph_volume/tests/test_configuration.py new file mode 100644 index 00000000..9af6cd9b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/test_configuration.py @@ -0,0 +1,117 @@ +import os +try: + from cStringIO import StringIO +except ImportError: # pragma: no cover + from io import StringIO # pragma: no cover +from textwrap import dedent +import pytest +from ceph_volume import configuration, exceptions + +tabbed_conf = """ +[global] + default = 0 + other_h = 1 # comment + other_c = 1 ; comment + colon = ; + hash = # +""" + + +class TestConf(object): + + def setup(self): + self.conf_file = StringIO(dedent(""" + [foo] + default = 0 + """)) + + def test_get_non_existing_list(self): + cfg = configuration.Conf() + cfg.is_valid = lambda: True + cfg.read_conf(self.conf_file) + assert cfg.get_list('global', 'key') == [] + + def test_get_non_existing_list_get_default(self): + cfg = configuration.Conf() + cfg.is_valid = lambda: True + cfg.read_conf(self.conf_file) + assert cfg.get_list('global', 'key', ['a']) == ['a'] + + def test_get_rid_of_comments(self): + cfg = configuration.Conf() + cfg.is_valid = lambda: True + conf_file = StringIO(dedent(""" + [foo] + default = 0 # this is a comment + """)) + + cfg.read_conf(conf_file) + assert cfg.get_list('foo', 'default') == ['0'] + + def test_gets_split_on_commas(self): + cfg = configuration.Conf() + cfg.is_valid = lambda: True + conf_file = StringIO(dedent(""" + [foo] + default = 0,1,2,3 # this is a comment + """)) + + cfg.read_conf(conf_file) + assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3'] + + def test_spaces_and_tabs_are_ignored(self): + cfg = configuration.Conf() + cfg.is_valid = lambda: True + conf_file = StringIO(dedent(""" + [foo] + default = 0, 1, 2 ,3 # this is a comment + """)) + + cfg.read_conf(conf_file) + assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3'] + + +class TestLoad(object): + + def test_load_from_path(self, tmpdir): + conf_path = os.path.join(str(tmpdir), 'ceph.conf') + with open(conf_path, 'w') as conf: + conf.write(tabbed_conf) + result = configuration.load(conf_path) + assert result.get('global', 'default') == '0' + + def test_load_with_colon_comments(self, tmpdir): + conf_path = os.path.join(str(tmpdir), 'ceph.conf') + with open(conf_path, 'w') as conf: + conf.write(tabbed_conf) + result = configuration.load(conf_path) + assert result.get('global', 'other_c') == '1' + + def test_load_with_hash_comments(self, tmpdir): + conf_path = os.path.join(str(tmpdir), 'ceph.conf') + with open(conf_path, 'w') as conf: + conf.write(tabbed_conf) + result = configuration.load(conf_path) + assert result.get('global', 'other_h') == '1' + + def test_path_does_not_exist(self): + with pytest.raises(exceptions.ConfigurationError): + conf = configuration.load('/path/does/not/exist/ceph.con') + conf.is_valid() + + def test_unable_to_read_configuration(self, tmpdir, capsys): + ceph_conf = os.path.join(str(tmpdir), 'ceph.conf') + with open(ceph_conf, 'w') as config: + config.write(']broken] config\n[[') + with pytest.raises(RuntimeError): + configuration.load(ceph_conf) + stdout, stderr = capsys.readouterr() + assert 'File contains no section headers' in stderr + + @pytest.mark.parametrize('commented', ['colon','hash']) + def test_coment_as_a_value(self, tmpdir, commented): + conf_path = os.path.join(str(tmpdir), 'ceph.conf') + with open(conf_path, 'w') as conf: + conf.write(tabbed_conf) + result = configuration.load(conf_path) + assert result.get('global', commented) == '' diff --git a/src/ceph-volume/ceph_volume/tests/test_decorators.py b/src/ceph-volume/ceph_volume/tests/test_decorators.py new file mode 100644 index 00000000..8df89145 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/test_decorators.py @@ -0,0 +1,71 @@ +import os +import pytest +from ceph_volume import exceptions, decorators, terminal + + +class TestNeedsRoot(object): + + def test_is_root(self, monkeypatch): + def func(): + return True + monkeypatch.setattr(decorators.os, 'getuid', lambda: 0) + assert decorators.needs_root(func)() is True + + def test_is_not_root(self, monkeypatch): + def func(): + return True # pragma: no cover + monkeypatch.setattr(decorators.os, 'getuid', lambda: 20) + with pytest.raises(exceptions.SuperUserError) as error: + decorators.needs_root(func)() + + msg = 'This command needs to be executed with sudo or as root' + assert str(error.value) == msg + + +class TestExceptionMessage(object): + + def test_has_str_method(self): + result = decorators.make_exception_message(RuntimeError('an error')) + expected = "%s %s\n" % (terminal.red_arrow, 'RuntimeError: an error') + assert result == expected + + def test_has_no_str_method(self): + class Error(Exception): + pass + result = decorators.make_exception_message(Error()) + expected = "%s %s\n" % (terminal.red_arrow, 'Error') + assert result == expected + + +class TestCatches(object): + + def teardown(self): + try: + del(os.environ['CEPH_VOLUME_DEBUG']) + except KeyError: + pass + + def test_ceph_volume_debug_enabled(self): + os.environ['CEPH_VOLUME_DEBUG'] = '1' + @decorators.catches() # noqa + def func(): + raise RuntimeError() + with pytest.raises(RuntimeError): + func() + + def test_ceph_volume_debug_disabled_no_exit(self, capsys): + @decorators.catches(exit=False) + def func(): + raise RuntimeError() + func() + stdout, stderr = capsys.readouterr() + assert 'RuntimeError\n' in stderr + + def test_ceph_volume_debug_exits(self, capsys): + @decorators.catches() + def func(): + raise RuntimeError() + with pytest.raises(SystemExit): + func() + stdout, stderr = capsys.readouterr() + assert 'RuntimeError\n' in stderr diff --git a/src/ceph-volume/ceph_volume/tests/test_inventory.py b/src/ceph-volume/ceph_volume/tests/test_inventory.py new file mode 100644 index 00000000..9721fccd --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/test_inventory.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- + +import pytest +from ceph_volume.util.device import Devices + + +@pytest.fixture +def device_report_keys(device_info): + device_info(devices={ + # example output of disk.get_devices() + '/dev/sdb': {'human_readable_size': '1.82 TB', + 'locked': 0, + 'model': 'PERC H700', + 'nr_requests': '128', + 'partitions': {}, + 'path': '/dev/sdb', + 'removable': '0', + 'rev': '2.10', + 'ro': '0', + 'rotational': '1', + 'sas_address': '', + 'sas_device_handle': '', + 'scheduler_mode': 'cfq', + 'sectors': 0, + 'sectorsize': '512', + 'size': 1999844147200.0, + 'support_discard': '', + 'vendor': 'DELL', + 'device_id': 'Vendor-Model-Serial'} + } + ) + report = Devices().json_report()[0] + return list(report.keys()) + +@pytest.fixture +def device_sys_api_keys(device_info): + device_info(devices={ + # example output of disk.get_devices() + '/dev/sdb': {'human_readable_size': '1.82 TB', + 'locked': 0, + 'model': 'PERC H700', + 'nr_requests': '128', + 'partitions': {}, + 'path': '/dev/sdb', + 'removable': '0', + 'rev': '2.10', + 'ro': '0', + 'rotational': '1', + 'sas_address': '', + 'sas_device_handle': '', + 'scheduler_mode': 'cfq', + 'sectors': 0, + 'sectorsize': '512', + 'size': 1999844147200.0, + 'support_discard': '', + 'vendor': 'DELL'} + } + ) + report = Devices().json_report()[0] + return list(report['sys_api'].keys()) + + +class TestInventory(object): + + expected_keys = [ + 'path', + 'rejected_reasons', + 'sys_api', + 'available', + 'lvs', + 'device_id', + ] + + expected_sys_api_keys = [ + 'human_readable_size', + 'locked', + 'model', + 'nr_requests', + 'partitions', + 'path', + 'removable', + 'rev', + 'ro', + 'rotational', + 'sas_address', + 'sas_device_handle', + 'scheduler_mode', + 'sectors', + 'sectorsize', + 'size', + 'support_discard', + 'vendor', + ] + + def test_json_inventory_keys_unexpected(self, device_report_keys): + for k in device_report_keys: + assert k in self.expected_keys, "unexpected key {} in report".format(k) + + def test_json_inventory_keys_missing(self, device_report_keys): + for k in self.expected_keys: + assert k in device_report_keys, "expected key {} in report".format(k) + + def test_sys_api_keys_unexpected(self, device_sys_api_keys): + for k in device_sys_api_keys: + assert k in self.expected_sys_api_keys, "unexpected key {} in sys_api field".format(k) + + def test_sys_api_keys_missing(self, device_sys_api_keys): + for k in self.expected_sys_api_keys: + assert k in device_sys_api_keys, "expected key {} in sys_api field".format(k) + diff --git a/src/ceph-volume/ceph_volume/tests/test_main.py b/src/ceph-volume/ceph_volume/tests/test_main.py new file mode 100644 index 00000000..afe9a234 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/test_main.py @@ -0,0 +1,69 @@ +import os +import pytest +from ceph_volume import main + + +class TestVolume(object): + + def test_main_spits_help_with_no_arguments(self, capsys): + with pytest.raises(SystemExit): + main.Volume(argv=[]) + stdout, stderr = capsys.readouterr() + assert 'Log Path' in stdout + + def test_warn_about_using_help_for_full_options(self, capsys): + with pytest.raises(SystemExit): + main.Volume(argv=[]) + stdout, stderr = capsys.readouterr() + assert 'See "ceph-volume --help" for full list' in stdout + + def test_environ_vars_show_up(self, capsys): + os.environ['CEPH_CONF'] = '/opt/ceph.conf' + with pytest.raises(SystemExit): + main.Volume(argv=[]) + stdout, stderr = capsys.readouterr() + assert 'CEPH_CONF' in stdout + assert '/opt/ceph.conf' in stdout + + def test_flags_are_parsed_with_help(self, capsys): + with pytest.raises(SystemExit): + main.Volume(argv=['ceph-volume', '--help']) + stdout, stderr = capsys.readouterr() + assert '--cluster' in stdout + assert '--log-path' in stdout + + def test_log_ignoring_missing_ceph_conf(self, caplog): + with pytest.raises(SystemExit) as error: + main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help']) + # make sure we aren't causing an actual error + assert error.value.code == 0 + log = caplog.records[-1] + assert log.message == 'ignoring inability to load ceph.conf' + assert log.levelname == 'ERROR' + + def test_logs_current_command(self, caplog): + with pytest.raises(SystemExit) as error: + main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help']) + # make sure we aren't causing an actual error + assert error.value.code == 0 + log = caplog.records[-2] + assert log.message == 'Running command: ceph-volume --cluster barnacle lvm --help' + assert log.levelname == 'INFO' + + def test_logs_set_level_error(self, caplog): + with pytest.raises(SystemExit) as error: + main.Volume(argv=['ceph-volume', '--log-level', 'error', '--cluster', 'barnacle', 'lvm', '--help']) + # make sure we aren't causing an actual error + assert error.value.code == 0 + assert caplog.records + # only log levels of 'ERROR' or above should be captured + for log in caplog.records: + assert log.levelname in ['ERROR', 'CRITICAL'] + + def test_logs_incorrect_log_level(self, capsys): + with pytest.raises(SystemExit) as error: + main.Volume(argv=['ceph-volume', '--log-level', 'foo', '--cluster', 'barnacle', 'lvm', '--help']) + # make sure this is an error + assert error.value.code != 0 + stdout, stderr = capsys.readouterr() + assert "invalid choice" in stderr diff --git a/src/ceph-volume/ceph_volume/tests/test_process.py b/src/ceph-volume/ceph_volume/tests/test_process.py new file mode 100644 index 00000000..46e5c40e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/test_process.py @@ -0,0 +1,92 @@ +import pytest +import logging +from ceph_volume.tests.conftest import Factory +from ceph_volume import process + + +@pytest.fixture +def mock_call(monkeypatch): + """ + Monkeypatches process.call, so that a caller can add behavior to the response + """ + def apply(stdout=None, stderr=None, returncode=0): + stdout_stream = Factory(read=lambda: stdout) + stderr_stream = Factory(read=lambda: stderr) + return_value = Factory( + stdout=stdout_stream, + stderr=stderr_stream, + wait=lambda: returncode, + communicate=lambda x: (stdout, stderr, returncode) + ) + + monkeypatch.setattr( + 'ceph_volume.process.subprocess.Popen', + lambda *a, **kw: return_value) + + return apply + + +class TestCall(object): + + def test_stderr_terminal_and_logfile(self, mock_call, caplog, capsys): + caplog.set_level(logging.INFO) + mock_call(stdout='stdout\n', stderr='some stderr message\n') + process.call(['ls'], terminal_verbose=True) + out, err = capsys.readouterr() + log_lines = [line[-1] for line in caplog.record_tuples] + assert 'Running command: ' in log_lines[0] + assert 'ls' in log_lines[0] + assert 'stderr some stderr message' in log_lines[-1] + assert 'some stderr message' in err + + def test_stderr_terminal_and_logfile_off(self, mock_call, caplog, capsys): + caplog.set_level(logging.INFO) + mock_call(stdout='stdout\n', stderr='some stderr message\n') + process.call(['ls'], terminal_verbose=False) + out, err = capsys.readouterr() + log_lines = [line[-1] for line in caplog.record_tuples] + assert 'Running command: ' in log_lines[0] + assert 'ls' in log_lines[0] + assert 'stderr some stderr message' in log_lines[-1] + assert out == '' + + def test_verbose_on_failure(self, mock_call, caplog, capsys): + caplog.set_level(logging.INFO) + mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1) + process.call(['ls'], terminal_verbose=False, logfile_verbose=False) + out, err = capsys.readouterr() + log_lines = '\n'.join([line[-1] for line in caplog.record_tuples]) + assert 'Running command: ' in log_lines + assert 'ls' in log_lines + assert 'stderr' in log_lines + assert 'stdout: stdout' in err + assert out == '' + + def test_silent_verbose_on_failure(self, mock_call, caplog, capsys): + caplog.set_level(logging.INFO) + mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1) + process.call(['ls'], verbose_on_failure=False) + out, err = capsys.readouterr() + log_lines = '\n'.join([line[-1] for line in caplog.record_tuples]) + assert 'Running command: ' in log_lines + assert 'ls' in log_lines + assert 'stderr' in log_lines + assert out == '' + + +class TestFunctionalCall(object): + + def test_stdin(self): + process.call(['xargs', 'ls'], stdin="echo '/'") + + def test_unicode_encoding(self): + process.call(['echo', u'\xd0']) + + def test_unicode_encoding_stdin(self): + process.call(['echo'], stdin=u'\xd0'.encode('utf-8')) + + +class TestFunctionalRun(object): + + def test_log_descriptors(self): + process.run(['ls', '-l']) diff --git a/src/ceph-volume/ceph_volume/tests/test_terminal.py b/src/ceph-volume/ceph_volume/tests/test_terminal.py new file mode 100644 index 00000000..fdf21907 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/test_terminal.py @@ -0,0 +1,143 @@ +# -*- mode:python; tab-width:4; indent-tabs-mode:nil; coding:utf-8 -*- + +import codecs +import io +try: + from io import StringIO +except ImportError: + from StringIO import StringIO +import pytest +import sys +from ceph_volume import terminal +from ceph_volume.log import setup_console + + +class SubCommand(object): + + help = "this is the subcommand help" + + def __init__(self, argv): + self.argv = argv + + def main(self): + pass + + +class BadSubCommand(object): + + def __init__(self, argv): + self.argv = argv + + def main(self): + raise SystemExit(100) + + +class TestSubhelp(object): + + def test_no_sub_command_help(self): + assert terminal.subhelp({}) == '' + + def test_single_level_help(self): + result = terminal.subhelp({'sub': SubCommand}) + + assert 'this is the subcommand help' in result + + def test_has_title_header(self): + result = terminal.subhelp({'sub': SubCommand}) + assert 'Available subcommands:' in result + + def test_command_with_no_help(self): + class SubCommandNoHelp(object): + pass + result = terminal.subhelp({'sub': SubCommandNoHelp}) + assert result == '' + + +class TestDispatch(object): + + def test_no_subcommand_found(self): + result = terminal.dispatch({'sub': SubCommand}, argv=[]) + assert result is None + + def test_no_main_found(self): + class NoMain(object): + + def __init__(self, argv): + pass + result = terminal.dispatch({'sub': NoMain}, argv=['sub']) + assert result is None + + def test_subcommand_found_and_dispatched(self): + with pytest.raises(SystemExit) as error: + terminal.dispatch({'sub': SubCommand}, argv=['sub']) + assert str(error.value) == '0' + + def test_subcommand_found_and_dispatched_with_errors(self): + with pytest.raises(SystemExit) as error: + terminal.dispatch({'sub': BadSubCommand}, argv=['sub']) + assert str(error.value) == '100' + + +@pytest.fixture +def stream(): + def make_stream(buffer, encoding): + # mock a stdout with given encoding + if sys.version_info >= (3, 0): + stderr = sys.stderr + stream = io.TextIOWrapper(buffer, + encoding=encoding, + errors=stderr.errors, + newline=stderr.newlines, + line_buffering=stderr.line_buffering) + else: + stream = codecs.getwriter(encoding)(buffer) + # StreamWriter does not have encoding attached to it, it will ask + # the inner buffer for "encoding" attribute in this case + stream.encoding = encoding + return stream + return make_stream + + +class TestWriteUnicode(object): + + def setup(self): + self.octpus_and_squid_en = u'octpus and squid' + self.octpus_and_squid_zh = u'ç« é±¼å’Œé±¿é±¼' + self.message = self.octpus_and_squid_en + self.octpus_and_squid_zh + setup_console() + + def test_stdout_writer(self, capsys): + # should work with whatever stdout is + terminal.stdout(self.message) + _, err = capsys.readouterr() + assert self.octpus_and_squid_en in err + assert self.octpus_and_squid_zh in err + + @pytest.mark.parametrize('encoding', ['ascii', 'utf8']) + def test_writer_log(self, stream, encoding, monkeypatch, caplog): + writer = StringIO() + terminal._Write(_writer=writer).raw(self.message) + writer.flush() + writer.seek(0) + output = writer.readlines()[0] + assert self.octpus_and_squid_en in output + + @pytest.mark.parametrize('encoding', ['utf8']) + def test_writer(self, encoding, stream, monkeypatch, capsys, caplog): + buffer = io.BytesIO() + writer = stream(buffer, encoding) + terminal._Write(_writer=writer).raw(self.message) + writer.flush() + writer.seek(0) + val = buffer.getvalue() + assert self.octpus_and_squid_en.encode(encoding) in val + + def test_writer_uses_log_on_unicodeerror(self, stream, monkeypatch, capture): + + if sys.version_info > (3,): + pytest.skip("Something breaks inside of pytest's capsys") + monkeypatch.setattr(terminal.terminal_logger, 'info', capture) + buffer = io.BytesIO() + writer = stream(buffer, 'ascii') + terminal._Write(_writer=writer).raw(self.message) + assert self.octpus_and_squid_en in capture.calls[0]['args'][0] diff --git a/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py new file mode 100644 index 00000000..d4565ef4 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py @@ -0,0 +1,89 @@ +import argparse +import pytest +import os +from ceph_volume import exceptions +from ceph_volume.util import arg_validators + + +class TestOSDPath(object): + + def setup(self): + self.validator = arg_validators.OSDPath() + + def test_is_not_root(self, monkeypatch): + monkeypatch.setattr(os, 'getuid', lambda: 100) + with pytest.raises(exceptions.SuperUserError): + self.validator('') + + def test_path_is_not_a_directory(self, is_root, tmpfile, monkeypatch): + monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False) + validator = arg_validators.OSDPath() + with pytest.raises(argparse.ArgumentError): + validator(tmpfile()) + + def test_files_are_missing(self, is_root, tmpdir, monkeypatch): + tmppath = str(tmpdir) + monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False) + validator = arg_validators.OSDPath() + with pytest.raises(argparse.ArgumentError) as error: + validator(tmppath) + assert 'Required file (ceph_fsid) was not found in OSD' in str(error.value) + + +class TestExcludeGroupOptions(object): + + def setup(self): + self.parser = argparse.ArgumentParser() + + def test_flags_in_one_group(self): + argv = ['<prog>', '--filestore', '--bar'] + filestore_group = self.parser.add_argument_group('filestore') + bluestore_group = self.parser.add_argument_group('bluestore') + filestore_group.add_argument('--filestore') + bluestore_group.add_argument('--bluestore') + result = arg_validators.exclude_group_options( + self.parser, + ['filestore', 'bluestore'], + argv=argv + ) + assert result is None + + def test_flags_in_no_group(self): + argv = ['<prog>', '--foo', '--bar'] + filestore_group = self.parser.add_argument_group('filestore') + bluestore_group = self.parser.add_argument_group('bluestore') + filestore_group.add_argument('--filestore') + bluestore_group.add_argument('--bluestore') + result = arg_validators.exclude_group_options( + self.parser, + ['filestore', 'bluestore'], + argv=argv + ) + assert result is None + + def test_flags_conflict(self, capsys): + argv = ['<prog>', '--filestore', '--bluestore'] + filestore_group = self.parser.add_argument_group('filestore') + bluestore_group = self.parser.add_argument_group('bluestore') + filestore_group.add_argument('--filestore') + bluestore_group.add_argument('--bluestore') + + arg_validators.exclude_group_options( + self.parser, ['filestore', 'bluestore'], argv=argv + ) + stdout, stderr = capsys.readouterr() + assert 'Cannot use --filestore (filestore) with --bluestore (bluestore)' in stderr + + +class TestValidDevice(object): + + def setup(self): + self.validator = arg_validators.ValidDevice() + + def test_path_is_valid(self, fake_call): + result = self.validator('/') + assert result.abspath == '/' + + def test_path_is_invalid(self, fake_call): + with pytest.raises(argparse.ArgumentError): + self.validator('/device/does/not/exist') diff --git a/src/ceph-volume/ceph_volume/tests/util/test_device.py b/src/ceph-volume/ceph_volume/tests/util/test_device.py new file mode 100644 index 00000000..7dd8982f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_device.py @@ -0,0 +1,577 @@ +import pytest +from copy import deepcopy +from ceph_volume.util import device +from ceph_volume.api import lvm as api + + +class TestDevice(object): + + def test_sys_api(self, monkeypatch, device_info): + volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg', + lv_tags={}, lv_path='/dev/VolGroup/lv') + volumes = [] + volumes.append(volume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: + deepcopy(volumes)) + + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.sys_api + assert "foo" in disk.sys_api + + def test_lvm_size(self, monkeypatch, device_info): + volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg', + lv_tags={}, lv_path='/dev/VolGroup/lv') + volumes = [] + volumes.append(volume) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: + deepcopy(volumes)) + + # 5GB in size + data = {"/dev/sda": {"size": "5368709120"}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.lvm_size.gb == 4 + + def test_lvm_size_rounds_down(self, device_info): + # 5.5GB in size + data = {"/dev/sda": {"size": "5905580032"}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.lvm_size.gb == 4 + + def test_is_lv(self, device_info): + data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"} + lsblk = {"TYPE": "lvm"} + device_info(lv=data,lsblk=lsblk) + disk = device.Device("vg/lv") + assert disk.is_lv + + def test_vgs_is_empty(self, device_info, monkeypatch): + BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", + pv_tags={}) + pvolumes = [] + pvolumes.append(BarPVolume) + lsblk = {"TYPE": "disk"} + device_info(lsblk=lsblk) + monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {}) + + disk = device.Device("/dev/nvme0n1") + assert disk.vgs == [] + + def test_vgs_is_not_empty(self, device_info, monkeypatch): + vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, + vg_extent_size=1073741824) + monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + lsblk = {"TYPE": "disk"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/nvme0n1") + assert len(disk.vgs) == 1 + + def test_device_is_device(self, device_info): + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "device"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.is_device is True + + def test_device_is_rotational(self, device_info): + data = {"/dev/sda": {"rotational": "1"}} + lsblk = {"TYPE": "device"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.rotational + + def test_device_is_not_rotational(self, device_info): + data = {"/dev/sda": {"rotational": "0"}} + lsblk = {"TYPE": "device"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert not disk.rotational + + def test_device_is_rotational_lsblk(self, device_info): + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "device", "ROTA": "1"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.rotational + + def test_device_is_not_rotational_lsblk(self, device_info): + data = {"/dev/sda": {"rotational": "0"}} + lsblk = {"TYPE": "device", "ROTA": "0"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert not disk.rotational + + def test_device_is_rotational_defaults_true(self, device_info): + # rotational will default true if no info from sys_api or lsblk is found + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "device", "foo": "bar"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.rotational + + def test_disk_is_device(self, device_info): + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "disk"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.is_device is True + + def test_is_partition(self, device_info): + data = {"/dev/sda1": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda1") + assert disk.is_partition + + def test_is_not_acceptable_device(self, device_info): + data = {"/dev/dm-0": {"foo": "bar"}} + lsblk = {"TYPE": "mpath"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/dm-0") + assert not disk.is_device + + def test_is_not_lvm_memeber(self, device_info): + data = {"/dev/sda1": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda1") + assert not disk.is_lvm_member + + def test_is_lvm_memeber(self, device_info): + data = {"/dev/sda1": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/sda1") + assert not disk.is_lvm_member + + def test_is_mapper_device(self, device_info): + lsblk = {"TYPE": "lvm"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/mapper/foo") + assert disk.is_mapper + + def test_dm_is_mapper_device(self, device_info): + lsblk = {"TYPE": "lvm"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/dm-4") + assert disk.is_mapper + + def test_is_not_mapper_device(self, device_info): + lsblk = {"TYPE": "disk"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/sda") + assert not disk.is_mapper + + @pytest.mark.usefixtures("lsblk_ceph_disk_member", + "disable_kernel_queries") + def test_is_ceph_disk_lsblk(self, monkeypatch, patch_bluestore_label): + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member + + @pytest.mark.usefixtures("blkid_ceph_disk_member", + "disable_kernel_queries") + def test_is_ceph_disk_blkid(self, monkeypatch, patch_bluestore_label): + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member + + @pytest.mark.usefixtures("lsblk_ceph_disk_member", + "disable_kernel_queries") + def test_is_ceph_disk_member_not_available_lsblk(self, monkeypatch, patch_bluestore_label): + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member + assert not disk.available + assert "Used by ceph-disk" in disk.rejected_reasons + + @pytest.mark.usefixtures("blkid_ceph_disk_member", + "disable_kernel_queries") + def test_is_ceph_disk_member_not_available_blkid(self, monkeypatch, patch_bluestore_label): + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member + assert not disk.available + assert "Used by ceph-disk" in disk.rejected_reasons + + def test_reject_removable_device(self, device_info): + data = {"/dev/sdb": {"removable": 1}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sdb") + assert not disk.available + + def test_accept_non_removable_device(self, device_info): + data = {"/dev/sdb": {"removable": 0, "size": 5368709120}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sdb") + assert disk.available + + def test_reject_not_acceptable_device(self, device_info): + data = {"/dev/dm-0": {"foo": "bar"}} + lsblk = {"TYPE": "mpath"} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/dm-0") + assert not disk.available + + def test_reject_readonly_device(self, device_info): + data = {"/dev/cdrom": {"ro": 1}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/cdrom") + assert not disk.available + + def test_reject_smaller_than_5gb(self, device_info): + data = {"/dev/sda": {"size": 5368709119}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert not disk.available, 'too small device is available' + + def test_accept_non_readonly_device(self, device_info): + data = {"/dev/sda": {"ro": 0, "size": 5368709120}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.available + + def test_reject_bluestore_device(self, monkeypatch, patch_bluestore_label, device_info): + patch_bluestore_label.return_value = True + lsblk = {"TYPE": "disk"} + device_info(lsblk=lsblk) + disk = device.Device("/dev/sda") + assert not disk.available + assert "Has BlueStore device label" in disk.rejected_reasons + + @pytest.mark.usefixtures("device_info_not_ceph_disk_member", + "disable_kernel_queries") + def test_is_not_ceph_disk_member_lsblk(self, patch_bluestore_label): + disk = device.Device("/dev/sda") + assert disk.is_ceph_disk_member is False + + def test_existing_vg_available(self, monkeypatch, device_info): + vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536, + vg_extent_size=4194304) + monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + lsblk = {"TYPE": "disk"} + data = {"/dev/nvme0n1": {"size": "6442450944"}} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/nvme0n1") + assert disk.available_lvm + assert not disk.available + assert not disk.available_raw + + def test_existing_vg_too_small(self, monkeypatch, device_info): + vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4, + vg_extent_size=1073741824) + monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + lsblk = {"TYPE": "disk"} + data = {"/dev/nvme0n1": {"size": "6442450944"}} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/nvme0n1") + assert not disk.available_lvm + assert not disk.available + assert not disk.available_raw + + def test_multiple_existing_vgs(self, monkeypatch, device_info): + vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000, + vg_extent_size=4194304) + vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536, + vg_extent_size=4194304) + monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2]) + lsblk = {"TYPE": "disk"} + data = {"/dev/nvme0n1": {"size": "6442450944"}} + device_info(devices=data, lsblk=lsblk) + disk = device.Device("/dev/nvme0n1") + assert disk.available_lvm + assert not disk.available + assert not disk.available_raw + + @pytest.mark.parametrize("ceph_type", ["data", "block"]) + def test_used_by_ceph(self, device_info, + monkeypatch, ceph_type): + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", + lv_uuid="0000", pv_tags={}, vg_name="vg") + pvolumes = [] + pvolumes.append(FooPVolume) + lv_data = {"lv_name": "lv", "lv_path": "vg/lv", "vg_name": "vg", + "lv_uuid": "0000", "lv_tags": + "ceph.osd_id=0,ceph.type="+ceph_type} + volumes = [] + lv = api.Volume(**lv_data) + volumes.append(lv) + monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) + monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: + deepcopy(volumes)) + + device_info(devices=data, lsblk=lsblk, lv=lv_data) + vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, + vg_extent_size=1073741824) + monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + disk = device.Device("/dev/sda") + assert disk.used_by_ceph + + def test_not_used_by_ceph(self, device_info, monkeypatch): + FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg") + pvolumes = [] + pvolumes.append(FooPVolume) + data = {"/dev/sda": {"foo": "bar"}} + lsblk = {"TYPE": "part"} + lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}} + monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes) + + device_info(devices=data, lsblk=lsblk, lv=lv_data) + disk = device.Device("/dev/sda") + assert not disk.used_by_ceph + + def test_get_device_id(self, device_info): + udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']} + lsblk = {"TYPE": "disk"} + device_info(udevadm=udev,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk._get_device_id() == 'ID_VENDOR_ID_MODEL_ID_SCSI_SERIAL' + + + +class TestDeviceEncryption(object): + + def test_partition_is_not_encrypted_lsblk(self, device_info): + lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs'} + device_info(lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.is_encrypted is False + + def test_partition_is_encrypted_lsblk(self, device_info): + lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS'} + device_info(lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.is_encrypted is True + + def test_partition_is_not_encrypted_blkid(self, device_info): + lsblk = {'TYPE': 'part'} + blkid = {'TYPE': 'ceph data'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + assert disk.is_encrypted is False + + def test_partition_is_encrypted_blkid(self, device_info): + lsblk = {'TYPE': 'part'} + blkid = {'TYPE': 'crypto_LUKS'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + assert disk.is_encrypted is True + + def test_mapper_is_encrypted_luks1(self, device_info, monkeypatch): + status = {'type': 'LUKS1'} + monkeypatch.setattr(device, 'encryption_status', lambda x: status) + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/mapper/uuid") + assert disk.is_encrypted is True + + def test_mapper_is_encrypted_luks2(self, device_info, monkeypatch): + status = {'type': 'LUKS2'} + monkeypatch.setattr(device, 'encryption_status', lambda x: status) + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/mapper/uuid") + assert disk.is_encrypted is True + + def test_mapper_is_encrypted_plain(self, device_info, monkeypatch): + status = {'type': 'PLAIN'} + monkeypatch.setattr(device, 'encryption_status', lambda x: status) + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/mapper/uuid") + assert disk.is_encrypted is True + + def test_mapper_is_not_encrypted_plain(self, device_info, monkeypatch): + monkeypatch.setattr(device, 'encryption_status', lambda x: {}) + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/mapper/uuid") + assert disk.is_encrypted is False + + def test_lv_is_encrypted_blkid(self, device_info): + lsblk = {'TYPE': 'lvm'} + blkid = {'TYPE': 'crypto_LUKS'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + disk.lv_api = {} + assert disk.is_encrypted is True + + def test_lv_is_not_encrypted_blkid(self, factory, device_info): + lsblk = {'TYPE': 'lvm'} + blkid = {'TYPE': 'xfs'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + disk.lv_api = factory(encrypted=None) + assert disk.is_encrypted is False + + def test_lv_is_encrypted_lsblk(self, device_info): + lsblk = {'FSTYPE': 'crypto_LUKS', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + disk.lv_api = {} + assert disk.is_encrypted is True + + def test_lv_is_not_encrypted_lsblk(self, factory, device_info): + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + disk.lv_api = factory(encrypted=None) + assert disk.is_encrypted is False + + def test_lv_is_encrypted_lvm_api(self, factory, device_info): + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + disk.lv_api = factory(encrypted=True) + assert disk.is_encrypted is True + + def test_lv_is_not_encrypted_lvm_api(self, factory, device_info): + lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'} + blkid = {'TYPE': 'mapper'} + device_info(lsblk=lsblk, blkid=blkid) + disk = device.Device("/dev/sda") + disk.lv_api = factory(encrypted=False) + assert disk.is_encrypted is False + + +class TestDeviceOrdering(object): + + def setup(self): + self.data = { + "/dev/sda": {"removable": 0}, + "/dev/sdb": {"removable": 1}, # invalid + "/dev/sdc": {"removable": 0}, + "/dev/sdd": {"removable": 1}, # invalid + } + + def test_valid_before_invalid(self, device_info): + lsblk = {"TYPE": "disk"} + device_info(devices=self.data,lsblk=lsblk) + sda = device.Device("/dev/sda") + sdb = device.Device("/dev/sdb") + + assert sda < sdb + assert sdb > sda + + def test_valid_alphabetical_ordering(self, device_info): + lsblk = {"TYPE": "disk"} + device_info(devices=self.data,lsblk=lsblk) + sda = device.Device("/dev/sda") + sdc = device.Device("/dev/sdc") + + assert sda < sdc + assert sdc > sda + + def test_invalid_alphabetical_ordering(self, device_info): + lsblk = {"TYPE": "disk"} + device_info(devices=self.data,lsblk=lsblk) + sdb = device.Device("/dev/sdb") + sdd = device.Device("/dev/sdd") + + assert sdb < sdd + assert sdd > sdb + + +class TestCephDiskDevice(object): + + def test_partlabel_lsblk(self, device_info): + lsblk = {"TYPE": "disk", "PARTLABEL": ""} + device_info(lsblk=lsblk) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.partlabel == '' + + def test_partlabel_blkid(self, device_info): + blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"} + device_info(blkid=blkid) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.partlabel == 'ceph data' + + @pytest.mark.usefixtures("blkid_ceph_disk_member", + "disable_kernel_queries") + def test_is_member_blkid(self, monkeypatch, patch_bluestore_label): + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.is_member is True + + def test_reject_removable_device(self, device_info): + data = {"/dev/sdb": {"removable": 1}} + device_info(devices=data) + disk = device.Device("/dev/sdb") + assert not disk.available + + def test_accept_non_removable_device(self, device_info): + data = {"/dev/sdb": {"removable": 0, "size": 5368709120}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sdb") + assert disk.available + + def test_reject_readonly_device(self, device_info): + data = {"/dev/cdrom": {"ro": 1}} + device_info(devices=data) + disk = device.Device("/dev/cdrom") + assert not disk.available + + def test_reject_smaller_than_5gb(self, device_info): + data = {"/dev/sda": {"size": 5368709119}} + device_info(devices=data) + disk = device.Device("/dev/sda") + assert not disk.available, 'too small device is available' + + def test_accept_non_readonly_device(self, device_info): + data = {"/dev/sda": {"ro": 0, "size": 5368709120}} + lsblk = {"TYPE": "disk"} + device_info(devices=data,lsblk=lsblk) + disk = device.Device("/dev/sda") + assert disk.available + + @pytest.mark.usefixtures("lsblk_ceph_disk_member", + "disable_kernel_queries") + def test_is_member_lsblk(self, patch_bluestore_label, device_info): + lsblk = {"TYPE": "disk", "PARTLABEL": "ceph"} + device_info(lsblk=lsblk) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.is_member is True + + def test_unknown_type(self, device_info): + lsblk = {"TYPE": "disk", "PARTLABEL": "gluster"} + device_info(lsblk=lsblk) + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.type == 'unknown' + + ceph_types = ['data', 'wal', 'db', 'lockbox', 'journal', 'block'] + + @pytest.mark.usefixtures("blkid_ceph_disk_member", + "disable_kernel_queries") + def test_type_blkid(self, monkeypatch, device_info, ceph_partlabel): + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.type in self.ceph_types + + @pytest.mark.usefixtures("blkid_ceph_disk_member", + "lsblk_ceph_disk_member", + "disable_kernel_queries") + def test_type_lsblk(self, device_info, ceph_partlabel): + disk = device.CephDiskDevice(device.Device("/dev/sda")) + + assert disk.type in self.ceph_types diff --git a/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/src/ceph-volume/ceph_volume/tests/util/test_disk.py new file mode 100644 index 00000000..5f4d5734 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_disk.py @@ -0,0 +1,540 @@ +import os +import pytest +from mock.mock import patch +from ceph_volume.util import disk + + +class TestLsblkParser(object): + + def test_parses_whitespace_values(self): + output = 'NAME="sdaa5" PARTLABEL="ceph data" RM="0" SIZE="10M" RO="0" TYPE="part"' + result = disk._lsblk_parser(output) + assert result['PARTLABEL'] == 'ceph data' + + def test_ignores_bogus_pairs(self): + output = 'NAME="sdaa5" PARTLABEL RM="0" SIZE="10M" RO="0" TYPE="part" MOUNTPOINT=""' + result = disk._lsblk_parser(output) + assert result['SIZE'] == '10M' + + +class TestBlkidParser(object): + + def test_parses_whitespace_values(self): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + result = disk._blkid_parser(output) + assert result['PARTLABEL'] == 'ceph data' + + def test_ignores_unmapped(self): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + result = disk._blkid_parser(output) + assert len(result.keys()) == 4 + + def test_translates_to_partuuid(self): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + result = disk._blkid_parser(output) + assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f' + + +class TestBlkid(object): + + def test_parses_translated(self, stub_call): + output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa + stub_call((output.split(), [], 0)) + result = disk.blkid('/dev/sdb1') + assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f' + assert result['PARTLABEL'] == 'ceph data' + assert result['UUID'] == '62416664-cbaf-40bd-9689-10bd337379c3' + assert result['TYPE'] == 'xfs' + +class TestUdevadmProperty(object): + + def test_good_output(self, stub_call): + output = """ID_MODEL=SK_hynix_SC311_SATA_512GB +ID_PART_TABLE_TYPE=gpt +ID_SERIAL_SHORT=MS83N71801150416A""".split() + stub_call((output, [], 0)) + result = disk.udevadm_property('dev/sda') + assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB' + assert result['ID_PART_TABLE_TYPE'] == 'gpt' + assert result['ID_SERIAL_SHORT'] == 'MS83N71801150416A' + + def test_property_filter(self, stub_call): + output = """ID_MODEL=SK_hynix_SC311_SATA_512GB +ID_PART_TABLE_TYPE=gpt +ID_SERIAL_SHORT=MS83N71801150416A""".split() + stub_call((output, [], 0)) + result = disk.udevadm_property('dev/sda', ['ID_MODEL', + 'ID_SERIAL_SHORT']) + assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB' + assert 'ID_PART_TABLE_TYPE' not in result + + def test_fail_on_broken_output(self, stub_call): + output = ["ID_MODEL:SK_hynix_SC311_SATA_512GB"] + stub_call((output, [], 0)) + with pytest.raises(ValueError): + disk.udevadm_property('dev/sda') + + +class TestDeviceFamily(object): + + def test_groups_multiple_devices(self, stub_call): + out = [ + 'NAME="sdaa5" PARLABEL="ceph lockbox"', + 'NAME="sdaa" RO="0"', + 'NAME="sdaa1" PARLABEL="ceph data"', + 'NAME="sdaa2" PARLABEL="ceph journal"', + ] + stub_call((out, '', 0)) + result = disk.device_family('sdaa5') + assert len(result) == 4 + + def test_parses_output_correctly(self, stub_call): + names = ['sdaa', 'sdaa5', 'sdaa1', 'sdaa2'] + out = [ + 'NAME="sdaa5" PARLABEL="ceph lockbox"', + 'NAME="sdaa" RO="0"', + 'NAME="sdaa1" PARLABEL="ceph data"', + 'NAME="sdaa2" PARLABEL="ceph journal"', + ] + stub_call((out, '', 0)) + result = disk.device_family('sdaa5') + for parsed in result: + assert parsed['NAME'] in names + + +class TestHumanReadableSize(object): + + def test_bytes(self): + result = disk.human_readable_size(800) + assert result == '800.00 B' + + def test_kilobytes(self): + result = disk.human_readable_size(800*1024) + assert result == '800.00 KB' + + def test_megabytes(self): + result = disk.human_readable_size(800*1024*1024) + assert result == '800.00 MB' + + def test_gigabytes(self): + result = disk.human_readable_size(8.19*1024*1024*1024) + assert result == '8.19 GB' + + def test_terabytes(self): + result = disk.human_readable_size(81.2*1024*1024*1024*1024) + assert result == '81.20 TB' + + +class TestSizeFromHumanReadable(object): + + def test_bytes(self): + result = disk.size_from_human_readable('2') + assert result == disk.Size(b=2) + + def test_kilobytes(self): + result = disk.size_from_human_readable('2 K') + assert result == disk.Size(kb=2) + + def test_megabytes(self): + result = disk.size_from_human_readable('2 M') + assert result == disk.Size(mb=2) + + def test_gigabytes(self): + result = disk.size_from_human_readable('2 G') + assert result == disk.Size(gb=2) + + def test_terrabytes(self): + result = disk.size_from_human_readable('2 T') + assert result == disk.Size(tb=2) + + def test_case(self): + result = disk.size_from_human_readable('2 t') + assert result == disk.Size(tb=2) + + def test_space(self): + result = disk.size_from_human_readable('2T') + assert result == disk.Size(tb=2) + + def test_float(self): + result = disk.size_from_human_readable('2.0') + assert result == disk.Size(b=2) + result = disk.size_from_human_readable('2.0T') + assert result == disk.Size(tb=2) + result = disk.size_from_human_readable('1.8T') + assert result == disk.Size(tb=1.8) + + +class TestSizeParse(object): + + def test_bytes(self): + result = disk.Size.parse('2') + assert result == disk.Size(b=2) + + def test_kilobytes(self): + result = disk.Size.parse('2K') + assert result == disk.Size(kb=2) + + def test_megabytes(self): + result = disk.Size.parse('2M') + assert result == disk.Size(mb=2) + + def test_gigabytes(self): + result = disk.Size.parse('2G') + assert result == disk.Size(gb=2) + + def test_terrabytes(self): + result = disk.Size.parse('2T') + assert result == disk.Size(tb=2) + + def test_tb(self): + result = disk.Size.parse('2Tb') + assert result == disk.Size(tb=2) + + def test_case(self): + result = disk.Size.parse('2t') + assert result == disk.Size(tb=2) + + def test_space(self): + result = disk.Size.parse('2T') + assert result == disk.Size(tb=2) + + def test_float(self): + result = disk.Size.parse('2.0') + assert result == disk.Size(b=2) + result = disk.Size.parse('2.0T') + assert result == disk.Size(tb=2) + result = disk.Size.parse('1.8T') + assert result == disk.Size(tb=1.8) + + +class TestGetBlockDevsLsblk(object): + + @patch('ceph_volume.process.call') + def test_return_structure(self, patched_call): + lsblk_stdout = [ + '/dev/dm-0 /dev/mapper/ceph--8b2684eb--56ff--49e4--8f28--522e04cbd6ab-osd--data--9fc29fbf--3b5b--4066--be10--61042569b5a7 lvm', + '/dev/vda /dev/vda disk', + '/dev/vda1 /dev/vda1 part', + '/dev/vdb /dev/vdb disk',] + patched_call.return_value = (lsblk_stdout, '', 0) + disks = disk.get_block_devs_lsblk() + assert len(disks) == len(lsblk_stdout) + assert len(disks[0]) == 3 + + @patch('ceph_volume.process.call') + def test_empty_lsblk(self, patched_call): + patched_call.return_value = ([], '', 0) + disks = disk.get_block_devs_lsblk() + assert len(disks) == 0 + + @patch('ceph_volume.process.call') + def test_raise_on_failure(self, patched_call): + patched_call.return_value = ([], 'error', 1) + with pytest.raises(OSError): + disk.get_block_devs_lsblk() + + +class TestGetDevices(object): + + def setup_path(self, tmpdir): + path = os.path.join(str(tmpdir), 'block') + os.makedirs(path) + return path + + def test_no_devices_are_found(self, tmpdir, patched_get_block_devs_lsblk): + patched_get_block_devs_lsblk.return_value = [] + result = disk.get_devices(_sys_block_path=str(tmpdir)) + assert result == {} + + def test_sda_block_is_found(self, tmpdir, patched_get_block_devs_lsblk): + sda_path = '/dev/sda' + patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] + block_path = self.setup_path(tmpdir) + os.makedirs(os.path.join(block_path, 'sda')) + result = disk.get_devices(_sys_block_path=block_path) + assert len(result.keys()) == 1 + assert result[sda_path]['human_readable_size'] == '0.00 B' + assert result[sda_path]['model'] == '' + assert result[sda_path]['partitions'] == {} + + + def test_sda_size(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + sda_path = '/dev/sda' + patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] + block_path = self.setup_path(tmpdir) + block_sda_path = os.path.join(block_path, 'sda') + os.makedirs(block_sda_path) + tmpfile('size', '1024', directory=block_sda_path) + result = disk.get_devices(_sys_block_path=block_path) + assert list(result.keys()) == [sda_path] + assert result[sda_path]['human_readable_size'] == '512.00 KB' + + def test_sda_sectorsize_fallsback(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + # if no sectorsize, it will use queue/hw_sector_size + sda_path = '/dev/sda' + patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] + block_path = self.setup_path(tmpdir) + block_sda_path = os.path.join(block_path, 'sda') + sda_queue_path = os.path.join(block_sda_path, 'queue') + os.makedirs(block_sda_path) + os.makedirs(sda_queue_path) + tmpfile('hw_sector_size', contents='1024', directory=sda_queue_path) + result = disk.get_devices(_sys_block_path=block_path) + assert list(result.keys()) == [sda_path] + assert result[sda_path]['sectorsize'] == '1024' + + def test_sda_sectorsize_from_logical_block(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + sda_path = '/dev/sda' + patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] + block_path = self.setup_path(tmpdir) + block_sda_path = os.path.join(block_path, 'sda') + sda_queue_path = os.path.join(block_sda_path, 'queue') + os.makedirs(block_sda_path) + os.makedirs(sda_queue_path) + tmpfile('logical_block_size', contents='99', directory=sda_queue_path) + result = disk.get_devices(_sys_block_path=block_path) + assert result[sda_path]['sectorsize'] == '99' + + def test_sda_sectorsize_does_not_fallback(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + sda_path = '/dev/sda' + patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] + block_path = self.setup_path(tmpdir) + block_sda_path = os.path.join(block_path, 'sda') + sda_queue_path = os.path.join(block_sda_path, 'queue') + os.makedirs(block_sda_path) + os.makedirs(sda_queue_path) + tmpfile('logical_block_size', contents='99', directory=sda_queue_path) + tmpfile('hw_sector_size', contents='1024', directory=sda_queue_path) + result = disk.get_devices(_sys_block_path=block_path) + assert result[sda_path]['sectorsize'] == '99' + + def test_is_rotational(self, tmpfile, tmpdir, patched_get_block_devs_lsblk): + sda_path = '/dev/sda' + patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']] + block_path = self.setup_path(tmpdir) + block_sda_path = os.path.join(block_path, 'sda') + sda_queue_path = os.path.join(block_sda_path, 'queue') + os.makedirs(block_sda_path) + os.makedirs(sda_queue_path) + tmpfile('rotational', contents='1', directory=sda_queue_path) + result = disk.get_devices(_sys_block_path=block_path) + assert result[sda_path]['rotational'] == '1' + + +class TestSizeCalculations(object): + + @pytest.mark.parametrize('aliases', [ + ('b', 'bytes'), + ('kb', 'kilobytes'), + ('mb', 'megabytes'), + ('gb', 'gigabytes'), + ('tb', 'terabytes'), + ]) + def test_aliases(self, aliases): + short_alias, long_alias = aliases + s = disk.Size(b=1) + short_alias = getattr(s, short_alias) + long_alias = getattr(s, long_alias) + assert short_alias == long_alias + + @pytest.mark.parametrize('values', [ + ('b', 857619069665.28), + ('kb', 837518622.72), + ('mb', 817889.28), + ('gb', 798.72), + ('tb', 0.78), + ]) + def test_terabytes(self, values): + # regardless of the input value, all the other values correlate to each + # other the same, every time + unit, value = values + s = disk.Size(**{unit: value}) + assert s.b == 857619069665.28 + assert s.kb == 837518622.72 + assert s.mb == 817889.28 + assert s.gb == 798.72 + assert s.tb == 0.78 + + +class TestSizeOperators(object): + + @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001]) + def test_gigabytes_is_smaller(self, larger): + assert disk.Size(gb=1) < disk.Size(mb=larger) + + @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001]) + def test_gigabytes_is_larger(self, smaller): + assert disk.Size(gb=1) > disk.Size(mb=smaller) + + @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001, 1024]) + def test_gigabytes_is_smaller_or_equal(self, larger): + assert disk.Size(gb=1) <= disk.Size(mb=larger) + + @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001, 1024]) + def test_gigabytes_is_larger_or_equal(self, smaller): + assert disk.Size(gb=1) >= disk.Size(mb=smaller) + + @pytest.mark.parametrize('values', [ + ('b', 857619069665.28), + ('kb', 837518622.72), + ('mb', 817889.28), + ('gb', 798.72), + ('tb', 0.78), + ]) + def test_equality(self, values): + unit, value = values + s = disk.Size(**{unit: value}) + # both tb and b, since b is always calculated regardless, and is useful + # when testing tb + assert disk.Size(tb=0.78) == s + assert disk.Size(b=857619069665.28) == s + + @pytest.mark.parametrize('values', [ + ('b', 857619069665.28), + ('kb', 837518622.72), + ('mb', 817889.28), + ('gb', 798.72), + ('tb', 0.78), + ]) + def test_inequality(self, values): + unit, value = values + s = disk.Size(**{unit: value}) + # both tb and b, since b is always calculated regardless, and is useful + # when testing tb + assert disk.Size(tb=1) != s + assert disk.Size(b=100) != s + + +class TestSizeOperations(object): + + def test_assignment_addition_with_size_objects(self): + result = disk.Size(mb=256) + disk.Size(gb=1) + assert result.gb == 1.25 + assert result.gb.as_int() == 1 + assert result.gb.as_float() == 1.25 + + def test_self_addition_with_size_objects(self): + base = disk.Size(mb=256) + base += disk.Size(gb=1) + assert base.gb == 1.25 + + def test_self_addition_does_not_alter_state(self): + base = disk.Size(mb=256) + base + disk.Size(gb=1) + assert base.mb == 256 + + def test_addition_with_non_size_objects(self): + with pytest.raises(TypeError): + disk.Size(mb=100) + 4 + + def test_assignment_subtraction_with_size_objects(self): + base = disk.Size(gb=1) + base -= disk.Size(mb=256) + assert base.mb == 768 + + def test_self_subtraction_does_not_alter_state(self): + base = disk.Size(gb=1) + base - disk.Size(mb=256) + assert base.gb == 1 + + def test_subtraction_with_size_objects(self): + result = disk.Size(gb=1) - disk.Size(mb=256) + assert result.mb == 768 + + def test_subtraction_with_non_size_objects(self): + with pytest.raises(TypeError): + disk.Size(mb=100) - 4 + + def test_multiplication_with_size_objects(self): + with pytest.raises(TypeError): + disk.Size(mb=100) * disk.Size(mb=1) + + def test_multiplication_with_non_size_objects(self): + base = disk.Size(gb=1) + result = base * 2 + assert result.gb == 2 + assert result.gb.as_int() == 2 + + def test_division_with_size_objects(self): + result = disk.Size(gb=1) / disk.Size(mb=1) + assert int(result) == 1024 + + def test_division_with_non_size_objects(self): + base = disk.Size(gb=1) + result = base / 2 + assert result.mb == 512 + assert result.mb.as_int() == 512 + + def test_division_with_non_size_objects_without_state(self): + base = disk.Size(gb=1) + base / 2 + assert base.gb == 1 + assert base.gb.as_int() == 1 + + +class TestSizeAttributes(object): + + def test_attribute_does_not_exist(self): + with pytest.raises(AttributeError): + disk.Size(mb=1).exabytes + + +class TestSizeFormatting(object): + + def test_default_formatting_tb_to_b(self): + size = disk.Size(tb=0.0000000001) + result = "%s" % size + assert result == "109.95 B" + + def test_default_formatting_tb_to_kb(self): + size = disk.Size(tb=0.00000001) + result = "%s" % size + assert result == "10.74 KB" + + def test_default_formatting_tb_to_mb(self): + size = disk.Size(tb=0.000001) + result = "%s" % size + assert result == "1.05 MB" + + def test_default_formatting_tb_to_gb(self): + size = disk.Size(tb=0.001) + result = "%s" % size + assert result == "1.02 GB" + + def test_default_formatting_tb_to_tb(self): + size = disk.Size(tb=10) + result = "%s" % size + assert result == "10.00 TB" + + +class TestSizeSpecificFormatting(object): + + def test_formatting_b(self): + size = disk.Size(b=2048) + result = "%s" % size.b + assert "%s" % size.b == "%s" % size.bytes + assert result == "2048.00 B" + + def test_formatting_kb(self): + size = disk.Size(kb=5700) + result = "%s" % size.kb + assert "%s" % size.kb == "%s" % size.kilobytes + assert result == "5700.00 KB" + + def test_formatting_mb(self): + size = disk.Size(mb=4000) + result = "%s" % size.mb + assert "%s" % size.mb == "%s" % size.megabytes + assert result == "4000.00 MB" + + def test_formatting_gb(self): + size = disk.Size(gb=77777) + result = "%s" % size.gb + assert "%s" % size.gb == "%s" % size.gigabytes + assert result == "77777.00 GB" + + def test_formatting_tb(self): + size = disk.Size(tb=1027) + result = "%s" % size.tb + assert "%s" % size.tb == "%s" % size.terabytes + assert result == "1027.00 TB" diff --git a/src/ceph-volume/ceph_volume/tests/util/test_encryption.py b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py new file mode 100644 index 00000000..e1420b44 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py @@ -0,0 +1,53 @@ +from ceph_volume.util import encryption + + +class TestStatus(object): + + def test_skips_unuseful_lines(self, stub_call): + out = ['some line here', ' device: /dev/sdc1'] + stub_call((out, '', 0)) + assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'} + + def test_removes_extra_quotes(self, stub_call): + out = ['some line here', ' device: "/dev/sdc1"'] + stub_call((out, '', 0)) + assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'} + + def test_ignores_bogus_lines(self, stub_call): + out = ['some line here', ' '] + stub_call((out, '', 0)) + assert encryption.status('/dev/sdc1') == {} + + +class TestDmcryptClose(object): + + def test_mapper_exists(self, fake_run, tmpfile): + file_name = tmpfile(name='mapper-device') + encryption.dmcrypt_close(file_name) + arguments = fake_run.calls[0]['args'][0] + assert arguments[0] == 'cryptsetup' + assert arguments[1] == 'remove' + assert arguments[2].startswith('/') + + def test_mapper_does_not_exist(self, fake_run): + file_name = '/path/does/not/exist' + encryption.dmcrypt_close(file_name) + assert fake_run.calls == [] + + +class TestDmcryptKey(object): + + def test_dmcrypt_with_default_size(self, conf_ceph_stub): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') + result = encryption.create_dmcrypt_key() + assert len(result) == 172 + + def test_dmcrypt_with_custom_size(self, conf_ceph_stub): + conf_ceph_stub(''' + [global] + fsid=asdf + [osd] + osd_dmcrypt_size=8 + ''') + result = encryption.create_dmcrypt_key() + assert len(result) == 172 diff --git a/src/ceph-volume/ceph_volume/tests/util/test_prepare.py b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py new file mode 100644 index 00000000..ced5d49e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py @@ -0,0 +1,422 @@ +import pytest +from textwrap import dedent +import json +from ceph_volume.util import prepare +from ceph_volume.util.prepare import system +from ceph_volume import conf +from ceph_volume.tests.conftest import Factory + + +class TestOSDIDAvailable(object): + + def test_false_if_id_is_none(self): + assert not prepare.osd_id_available(None) + + def test_returncode_is_not_zero(self, monkeypatch): + monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: ('', '', 1)) + with pytest.raises(RuntimeError): + prepare.osd_id_available(1) + + def test_id_does_exist_but_not_available(self, monkeypatch): + stdout = dict(nodes=[ + dict(id=0, status="up"), + ]) + stdout = ['', json.dumps(stdout)] + monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) + result = prepare.osd_id_available(0) + assert not result + + def test_id_does_not_exist(self, monkeypatch): + stdout = dict(nodes=[ + dict(id=0), + ]) + stdout = ['', json.dumps(stdout)] + monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) + result = prepare.osd_id_available(1) + assert not result + + def test_invalid_osd_id(self, monkeypatch): + stdout = dict(nodes=[ + dict(id=0), + ]) + stdout = ['', json.dumps(stdout)] + monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) + result = prepare.osd_id_available("foo") + assert not result + + def test_returns_true_when_id_is_destroyed(self, monkeypatch): + stdout = dict(nodes=[ + dict(id=0, status="destroyed"), + ]) + stdout = ['', json.dumps(stdout)] + monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0)) + result = prepare.osd_id_available(0) + assert result + + +class TestFormatDevice(object): + + def test_include_force(self, fake_run, monkeypatch): + monkeypatch.setattr(conf, 'ceph', Factory(get_list=lambda *a, **kw: [])) + prepare.format_device('/dev/sxx') + flags = fake_run.calls[0]['args'][0] + assert '-f' in flags + + def test_device_is_always_appended(self, fake_run, conf_ceph): + conf_ceph(get_list=lambda *a, **kw: []) + prepare.format_device('/dev/sxx') + flags = fake_run.calls[0]['args'][0] + assert flags[-1] == '/dev/sxx' + + def test_extra_flags_are_added(self, fake_run, conf_ceph): + conf_ceph(get_list=lambda *a, **kw: ['--why-yes']) + prepare.format_device('/dev/sxx') + flags = fake_run.calls[0]['args'][0] + assert '--why-yes' in flags + + def test_default_options(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234""")) + conf.cluster = 'ceph' + prepare.format_device('/dev/sda1') + expected = [ + 'mkfs', '-t', 'xfs', + '-f', '-i', 'size=2048', # default flags + '/dev/sda1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_multiple_options_are_used(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd mkfs options xfs = -f -i size=1024""")) + conf.cluster = 'ceph' + prepare.format_device('/dev/sda1') + expected = [ + 'mkfs', '-t', 'xfs', + '-f', '-i', 'size=1024', + '/dev/sda1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_multiple_options_will_get_the_force_flag(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd mkfs options xfs = -i size=1024""")) + conf.cluster = 'ceph' + prepare.format_device('/dev/sda1') + expected = [ + 'mkfs', '-t', 'xfs', + '-f', '-i', 'size=1024', + '/dev/sda1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_underscore_options_are_used(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd_mkfs_options_xfs = -i size=128""")) + conf.cluster = 'ceph' + prepare.format_device('/dev/sda1') + expected = [ + 'mkfs', '-t', 'xfs', + '-f', '-i', 'size=128', + '/dev/sda1'] + assert expected == fake_run.calls[0]['args'][0] + + +mkfs_filestore_flags = [ + 'ceph-osd', + '--cluster', + '--osd-objectstore', 'filestore', + '--mkfs', + '-i', + '--monmap', + '--keyfile', '-', # goes through stdin + '--osd-data', + '--osd-journal', + '--osd-uuid', + '--setuser', 'ceph', + '--setgroup', 'ceph' +] + + +class TestOsdMkfsFilestore(object): + + @pytest.mark.parametrize('flag', mkfs_filestore_flags) + def test_keyring_is_used(self, fake_call, monkeypatch, flag): + monkeypatch.setattr(prepare, '__release__', 'mimic') + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret') + assert flag in fake_call.calls[0]['args'][0] + + def test_keyring_is_used_luminous(self, fake_call, monkeypatch): + monkeypatch.setattr(prepare, '__release__', 'luminous') + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret') + assert '--keyfile' not in fake_call.calls[0]['args'][0] + + +class TestOsdMkfsBluestore(object): + + def test_keyring_is_added(self, fake_call, monkeypatch): + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_bluestore(1, 'asdf', keyring='secret') + assert '--keyfile' in fake_call.calls[0]['args'][0] + + def test_keyring_is_not_added(self, fake_call, monkeypatch): + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_bluestore(1, 'asdf') + assert '--keyfile' not in fake_call.calls[0]['args'][0] + + def test_keyring_is_not_added_luminous(self, fake_call, monkeypatch): + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_bluestore(1, 'asdf') + monkeypatch.setattr(prepare, '__release__', 'luminous') + assert '--keyfile' not in fake_call.calls[0]['args'][0] + + def test_wal_is_added(self, fake_call, monkeypatch): + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_bluestore(1, 'asdf', wal='/dev/smm1') + assert '--bluestore-block-wal-path' in fake_call.calls[0]['args'][0] + assert '/dev/smm1' in fake_call.calls[0]['args'][0] + + def test_db_is_added(self, fake_call, monkeypatch): + monkeypatch.setattr(system, 'chown', lambda path: True) + prepare.osd_mkfs_bluestore(1, 'asdf', db='/dev/smm2') + assert '--bluestore-block-db-path' in fake_call.calls[0]['args'][0] + assert '/dev/smm2' in fake_call.calls[0]['args'][0] + + +class TestMountOSD(object): + + def test_default_options(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234""")) + conf.cluster = 'ceph' + prepare.mount_osd('/dev/sda1', 1) + expected = [ + 'mount', '-t', 'xfs', '-o', + 'rw,noatime,inode64', # default flags + '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_mount_options_are_used(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd mount options xfs = rw""")) + conf.cluster = 'ceph' + prepare.mount_osd('/dev/sda1', 1) + expected = [ + 'mount', '-t', 'xfs', '-o', + 'rw', + '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_multiple_whitespace_options_are_used(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd mount options xfs = rw auto exec""")) + conf.cluster = 'ceph' + prepare.mount_osd('/dev/sda1', 1) + expected = [ + 'mount', '-t', 'xfs', '-o', + 'rw,auto,exec', + '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_multiple_comma_whitespace_options_are_used(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd mount options xfs = rw, auto, exec""")) + conf.cluster = 'ceph' + prepare.mount_osd('/dev/sda1', 1) + expected = [ + 'mount', '-t', 'xfs', '-o', + 'rw,auto,exec', + '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] + assert expected == fake_run.calls[0]['args'][0] + + def test_underscore_mount_options_are_used(self, conf_ceph_stub, fake_run): + conf_ceph_stub(dedent("""[global] + fsid = 1234lkjh1234 + [osd] + osd mount options xfs = rw""")) + conf.cluster = 'ceph' + prepare.mount_osd('/dev/sda1', 1) + expected = [ + 'mount', '-t', 'xfs', '-o', + 'rw', + '/dev/sda1', '/var/lib/ceph/osd/ceph-1'] + assert expected == fake_run.calls[0]['args'][0] + + +ceph_conf_mount_values = [ + ['rw,', 'auto,' 'exec'], + ['rw', 'auto', 'exec'], + [' rw ', ' auto ', ' exec '], + ['rw,', 'auto,', 'exec,'], + [',rw ', ',auto ', ',exec,'], + [',rw,', ',auto,', ',exec,'], +] + +string_mount_values = [ + 'rw, auto exec ', + 'rw auto exec', + ',rw, auto, exec,', + ' rw auto exec ', + ' rw,auto,exec ', + 'rw,auto,exec', + ',rw,auto,exec,', + 'rw,auto,exec ', + 'rw, auto, exec ', +] + + +class TestNormalizeFlags(object): + # a bit overkill since most of this is already tested in prepare.mount_osd + # tests + + @pytest.mark.parametrize("flags", ceph_conf_mount_values) + def test_normalize_lists(self, flags): + result = sorted(prepare._normalize_mount_flags(flags).split(',')) + assert ','.join(result) == 'auto,exec,rw' + + @pytest.mark.parametrize("flags", string_mount_values) + def test_normalize_strings(self, flags): + result = sorted(prepare._normalize_mount_flags(flags).split(',')) + assert ','.join(result) == 'auto,exec,rw' + + @pytest.mark.parametrize("flags", ceph_conf_mount_values) + def test_normalize_extra_flags(self, flags): + result = prepare._normalize_mount_flags(flags, extras=['discard']) + assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw'] + + @pytest.mark.parametrize("flags", ceph_conf_mount_values) + def test_normalize_duplicate_extra_flags(self, flags): + result = prepare._normalize_mount_flags(flags, extras=['rw', 'discard']) + assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw'] + + @pytest.mark.parametrize("flags", string_mount_values) + def test_normalize_strings_flags(self, flags): + result = sorted(prepare._normalize_mount_flags(flags, extras=['discard']).split(',')) + assert ','.join(result) == 'auto,discard,exec,rw' + + @pytest.mark.parametrize("flags", string_mount_values) + def test_normalize_strings_duplicate_flags(self, flags): + result = sorted(prepare._normalize_mount_flags(flags, extras=['discard','rw']).split(',')) + assert ','.join(result) == 'auto,discard,exec,rw' + + +class TestMkfsFilestore(object): + + def test_non_zero_exit_status(self, stub_call, monkeypatch): + conf.cluster = 'ceph' + monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True) + stub_call(([], [], 1)) + with pytest.raises(RuntimeError) as error: + prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring') + assert "Command failed with exit code 1" in str(error.value) + + def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch): + conf.cluster = 'ceph' + monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True) + stub_call(([], [], 1)) + with pytest.raises(RuntimeError) as error: + prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring') + expected = ' '.join([ + 'ceph-osd', + '--cluster', + 'ceph', + '--osd-objectstore', 'filestore', '--mkfs', + '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap', + '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/', + '--osd-journal', '/var/lib/ceph/osd/ceph-1/journal', + '--osd-uuid', 'asdf-1234', + '--setuser', 'ceph', '--setgroup', 'ceph']) + assert expected in str(error.value) + + +class TestMkfsBluestore(object): + + def test_non_zero_exit_status(self, stub_call, monkeypatch): + conf.cluster = 'ceph' + monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True) + stub_call(([], [], 1)) + with pytest.raises(RuntimeError) as error: + prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring') + assert "Command failed with exit code 1" in str(error.value) + + def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch): + conf.cluster = 'ceph' + monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True) + stub_call(([], [], 1)) + with pytest.raises(RuntimeError) as error: + prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring') + expected = ' '.join([ + 'ceph-osd', + '--cluster', + 'ceph', + '--osd-objectstore', 'bluestore', '--mkfs', + '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap', + '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/', + '--osd-uuid', 'asdf-1234', + '--setuser', 'ceph', '--setgroup', 'ceph']) + assert expected in str(error.value) + + +class TestGetJournalSize(object): + + def test_undefined_size_fallbacks_formatted(self, conf_ceph_stub): + conf_ceph_stub(dedent(""" + [global] + fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f + """)) + result = prepare.get_journal_size() + assert result == '5G' + + def test_undefined_size_fallbacks_unformatted(self, conf_ceph_stub): + conf_ceph_stub(dedent(""" + [global] + fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f + """)) + result = prepare.get_journal_size(lv_format=False) + assert result.gb.as_int() == 5 + + def test_defined_size_unformatted(self, conf_ceph_stub): + conf_ceph_stub(dedent(""" + [global] + fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f + + [osd] + osd journal size = 10240 + """)) + result = prepare.get_journal_size(lv_format=False) + assert result.gb.as_int() == 10 + + def test_defined_size_formatted(self, conf_ceph_stub): + conf_ceph_stub(dedent(""" + [global] + fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f + + [osd] + osd journal size = 10240 + """)) + result = prepare.get_journal_size() + assert result == '10G' + + def test_refuse_tiny_journals(self, conf_ceph_stub): + conf_ceph_stub(dedent(""" + [global] + fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f + + [osd] + osd journal size = 1024 + """)) + with pytest.raises(RuntimeError) as error: + prepare.get_journal_size() + assert 'journal sizes must be larger' in str(error.value) + assert 'detected: 1024.00 MB' in str(error.value) diff --git a/src/ceph-volume/ceph_volume/tests/util/test_system.py b/src/ceph-volume/ceph_volume/tests/util/test_system.py new file mode 100644 index 00000000..74999949 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_system.py @@ -0,0 +1,279 @@ +import os +import pwd +import getpass +import pytest +from textwrap import dedent +from ceph_volume.util import system +from mock.mock import patch + + +class TestMkdirP(object): + + def test_existing_dir_does_not_raise_w_chown(self, monkeypatch, tmpdir): + user = pwd.getpwnam(getpass.getuser()) + uid, gid = user[2], user[3] + monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,)) + path = str(tmpdir) + system.mkdir_p(path) + assert os.path.isdir(path) + + def test_new_dir_w_chown(self, monkeypatch, tmpdir): + user = pwd.getpwnam(getpass.getuser()) + uid, gid = user[2], user[3] + monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,)) + path = os.path.join(str(tmpdir), 'new') + system.mkdir_p(path) + assert os.path.isdir(path) + + def test_existing_dir_does_not_raise_no_chown(self, tmpdir): + path = str(tmpdir) + system.mkdir_p(path, chown=False) + assert os.path.isdir(path) + + def test_new_dir_no_chown(self, tmpdir): + path = os.path.join(str(tmpdir), 'new') + system.mkdir_p(path, chown=False) + assert os.path.isdir(path) + + +@pytest.fixture +def fake_proc(tmpdir, monkeypatch): + PROCDIR = str(tmpdir) + proc_path = os.path.join(PROCDIR, 'mounts') + with open(proc_path, 'w') as f: + f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 + rootfs / rootfs rw 0 0 + sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 + proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 + devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=238292k,nr_inodes=59573,mode=755 0 0 + securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 + tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 + devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 + tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 + tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 + cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 + cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 + configfs /sys/kernel/config configfs rw,relatime 0 0 + /dev/mapper/VolGroup00-LogVol00 / xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0 + selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 + debugfs /sys/kernel/debug debugfs rw,relatime 0 0 + hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 + mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 + sunrpc /far/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0 + /dev/sde4 /two/field/path + nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 + /dev/sde2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0 + tmpfs /far/lib/ceph/osd/ceph-5 tmpfs rw,seclabel,relatime 0 0 + tmpfs /far/lib/ceph/osd/ceph-7 tmpfs rw,seclabel,relatime 0 0 + /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,seclabel,noatime,attr2,inode64,noquota 0 0 + tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=50040k,mode=700,uid=1000,gid=1000 0 0 + /dev/sdc2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0 + tmpfs /run/user/1000 tmpfs rw,seclabel,mode=700,uid=1000,gid=1000 0 0""")) + monkeypatch.setattr(system, 'PROCDIR', PROCDIR) + monkeypatch.setattr(os.path, 'exists', lambda x: True) + + +class TestPathIsMounted(object): + + def test_is_mounted(self, fake_proc): + assert system.path_is_mounted('/boot') is True + + def test_is_not_mounted(self, fake_proc): + assert system.path_is_mounted('/far/fib/feph') is False + + def test_is_not_mounted_at_destination(self, fake_proc): + assert system.path_is_mounted('/boot', destination='/dev/sda1') is False + + def test_is_mounted_at_destination(self, fake_proc): + assert system.path_is_mounted('/boot', destination='/dev/sdc2') is True + + +class TestDeviceIsMounted(object): + + def test_is_mounted(self, fake_proc): + assert system.device_is_mounted('/dev/sda1') is True + + def test_path_is_not_device(self, fake_proc): + assert system.device_is_mounted('/far/lib/ceph/osd/ceph-7') is False + + def test_is_not_mounted_at_destination(self, fake_proc): + assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/test-1') is False + + def test_is_mounted_at_destination(self, fake_proc): + assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/ceph-7') is False + + def test_is_realpath_dev_mounted_at_destination(self, fake_proc, monkeypatch): + monkeypatch.setattr(system.os.path, 'realpath', lambda x: '/dev/sda1' if 'foo' in x else x) + result = system.device_is_mounted('/dev/maper/foo', destination='/far/lib/ceph/osd/ceph-0') + assert result is True + + def test_is_realpath_path_mounted_at_destination(self, fake_proc, monkeypatch): + monkeypatch.setattr( + system.os.path, 'realpath', + lambda x: '/far/lib/ceph/osd/ceph-0' if 'symlink' in x else x) + result = system.device_is_mounted('/dev/sda1', destination='/symlink/lib/ceph/osd/ceph-0') + assert result is True + + +class TestGetMounts(object): + + def test_not_mounted(self, tmpdir, monkeypatch): + PROCDIR = str(tmpdir) + proc_path = os.path.join(PROCDIR, 'mounts') + with open(proc_path, 'w') as f: + f.write('') + monkeypatch.setattr(system, 'PROCDIR', PROCDIR) + assert system.get_mounts() == {} + + def test_is_mounted_(self, fake_proc): + result = system.get_mounts() + assert result['/dev/sdc2'] == ['/boot'] + + def test_ignores_two_fields(self, fake_proc): + result = system.get_mounts() + assert result.get('/dev/sde4') is None + + def test_tmpfs_is_reported(self, fake_proc): + result = system.get_mounts() + assert result['tmpfs'][0] == '/dev/shm' + + def test_non_skip_devs_arent_reported(self, fake_proc): + result = system.get_mounts() + assert result.get('cgroup') is None + + def test_multiple_mounts_are_appended(self, fake_proc): + result = system.get_mounts() + assert len(result['tmpfs']) == 7 + + def test_nonexistent_devices_are_skipped(self, tmpdir, monkeypatch): + PROCDIR = str(tmpdir) + proc_path = os.path.join(PROCDIR, 'mounts') + with open(proc_path, 'w') as f: + f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0 + /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,attr2,inode64,noquota 0 0 + /dev/sda2 /far/lib/ceph/osd/ceph-1 xfs rw,attr2,inode64,noquota 0 0""")) + monkeypatch.setattr(system, 'PROCDIR', PROCDIR) + monkeypatch.setattr(os.path, 'exists', lambda x: False if x == '/dev/sda1' else True) + result = system.get_mounts() + assert result.get('/dev/sda1') is None + + +class TestIsBinary(object): + + def test_is_binary(self, tmpfile): + binary_path = tmpfile(contents='asd\n\nlkjh\x00') + assert system.is_binary(binary_path) + + def test_is_not_binary(self, tmpfile): + binary_path = tmpfile(contents='asd\n\nlkjh0') + assert system.is_binary(binary_path) is False + + +class TestGetFileContents(object): + + def test_path_does_not_exist(self, tmpdir): + filepath = os.path.join(str(tmpdir), 'doesnotexist') + assert system.get_file_contents(filepath, 'default') == 'default' + + def test_path_has_contents(self, tmpfile): + interesting_file = tmpfile(contents="1") + result = system.get_file_contents(interesting_file) + assert result == "1" + + def test_path_has_multiline_contents(self, tmpfile): + interesting_file = tmpfile(contents="0\n1") + result = system.get_file_contents(interesting_file) + assert result == "0\n1" + + def test_exception_returns_default(self, tmpfile): + interesting_file = tmpfile(contents="0") + # remove read, causes IOError + os.chmod(interesting_file, 0o000) + result = system.get_file_contents(interesting_file) + assert result == '' + + +class TestWhich(object): + + def test_executable_exists_but_is_not_file(self, monkeypatch): + monkeypatch.setattr(system.os.path, 'isfile', lambda x: False) + monkeypatch.setattr(system.os.path, 'exists', lambda x: True) + assert system.which('exedir') == 'exedir' + + def test_executable_does_not_exist(self, monkeypatch): + monkeypatch.setattr(system.os.path, 'isfile', lambda x: False) + monkeypatch.setattr(system.os.path, 'exists', lambda x: False) + assert system.which('exedir') == 'exedir' + + def test_executable_exists_as_file(self, monkeypatch): + monkeypatch.setattr(system.os, 'getenv', lambda x, y: '') + monkeypatch.setattr(system.os.path, 'isfile', lambda x: x != 'ceph') + monkeypatch.setattr(system.os.path, 'exists', lambda x: x != 'ceph') + assert system.which('ceph') == '/usr/local/bin/ceph' + + def test_warnings_when_executable_isnt_matched(self, monkeypatch, capsys): + monkeypatch.setattr(system.os.path, 'isfile', lambda x: True) + monkeypatch.setattr(system.os.path, 'exists', lambda x: False) + system.which('exedir') + cap = capsys.readouterr() + assert 'Executable exedir not in PATH' in cap.err + +@pytest.fixture +def stub_which(monkeypatch): + def apply(value='/bin/restorecon'): + monkeypatch.setattr(system, 'which', lambda x: value) + return apply + + +# python2 has no FileNotFoundError +try: + FileNotFoundError +except NameError: + FileNotFoundError = OSError + + +class TestSetContext(object): + + def setup(self): + try: + os.environ.pop('CEPH_VOLUME_SKIP_RESTORECON') + except KeyError: + pass + + @pytest.mark.parametrize('value', ['1', 'True', 'true', 'TRUE', 'yes']) + def test_set_context_skips(self, stub_call, fake_run, value): + stub_call(('', '', 0)) + os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value + system.set_context('/tmp/foo') + assert fake_run.calls == [] + + @pytest.mark.parametrize('value', ['0', 'False', 'false', 'FALSE', 'no']) + def test_set_context_doesnt_skip_with_env(self, stub_call, stub_which, fake_run, value): + stub_call(('', '', 0)) + stub_which() + os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value + system.set_context('/tmp/foo') + assert len(fake_run.calls) + + def test_set_context_skips_on_executable(self, stub_call, stub_which, fake_run): + stub_call(('', '', 0)) + stub_which('restorecon') + system.set_context('/tmp/foo') + assert fake_run.calls == [] + + def test_set_context_no_skip_on_executable(self, stub_call, stub_which, fake_run): + stub_call(('', '', 0)) + stub_which('/bin/restorecon') + system.set_context('/tmp/foo') + assert len(fake_run.calls) + + @patch('ceph_volume.process.call') + def test_selinuxenabled_doesnt_exist(self, mocked_call, fake_run): + mocked_call.side_effect = FileNotFoundError() + system.set_context('/tmp/foo') + assert fake_run.calls == [] + + def test_selinuxenabled_is_not_enabled(self, stub_call, fake_run): + stub_call(('', '', 1)) + system.set_context('/tmp/foo') + assert fake_run.calls == [] diff --git a/src/ceph-volume/ceph_volume/tests/util/test_util.py b/src/ceph-volume/ceph_volume/tests/util/test_util.py new file mode 100644 index 00000000..1a094d33 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/util/test_util.py @@ -0,0 +1,116 @@ +import pytest +from ceph_volume import util + + +class TestAsBytes(object): + + def test_bytes_just_gets_returned(self): + bytes_string = "contents".encode('utf-8') + assert util.as_bytes(bytes_string) == bytes_string + + def test_string_gets_converted_to_bytes(self): + result = util.as_bytes('contents') + assert isinstance(result, bytes) + + +class TestStrToInt(object): + + def test_passing_a_float_str_comma(self): + result = util.str_to_int("1,99") + assert result == 1 + + def test_passing_a_float_does_not_round_comma(self): + result = util.str_to_int("1,99", round_down=False) + assert result == 2 + + @pytest.mark.parametrize("value", ['2', 2]) + def test_passing_an_int(self, value): + result = util.str_to_int(value) + assert result == 2 + + @pytest.mark.parametrize("value", ['1.99', 1.99]) + def test_passing_a_float(self, value): + result = util.str_to_int(value) + assert result == 1 + + @pytest.mark.parametrize("value", ['1.99', 1.99]) + def test_passing_a_float_does_not_round(self, value): + result = util.str_to_int(value, round_down=False) + assert result == 2 + + def test_text_is_not_an_integer_like(self): + with pytest.raises(RuntimeError) as error: + util.str_to_int("1.4GB") + assert str(error.value) == "Unable to convert to integer: '1.4GB'" + + def test_input_is_not_string(self): + with pytest.raises(RuntimeError) as error: + util.str_to_int(None) + assert str(error.value) == "Unable to convert to integer: 'None'" + + +def true_responses(upper_casing=False): + if upper_casing: + return ['Y', 'YES', ''] + return ['y', 'yes', ''] + + +def false_responses(upper_casing=False): + if upper_casing: + return ['N', 'NO'] + return ['n', 'no'] + + +def invalid_responses(): + return [9, 0.1, 'h', [], {}, None] + + +class TestStrToBool(object): + + @pytest.mark.parametrize('response', true_responses()) + def test_trueish(self, response): + assert util.str_to_bool(response) is True + + @pytest.mark.parametrize('response', false_responses()) + def test_falseish(self, response): + assert util.str_to_bool(response) is False + + @pytest.mark.parametrize('response', true_responses(True)) + def test_trueish_upper(self, response): + assert util.str_to_bool(response) is True + + @pytest.mark.parametrize('response', false_responses(True)) + def test_falseish_upper(self, response): + assert util.str_to_bool(response) is False + + @pytest.mark.parametrize('response', invalid_responses()) + def test_invalid(self, response): + with pytest.raises(ValueError): + util.str_to_bool(response) + + +class TestPromptBool(object): + + @pytest.mark.parametrize('response', true_responses()) + def test_trueish(self, response): + fake_input = lambda x: response + qx = 'what the what?' + assert util.prompt_bool(qx, input_=fake_input) is True + + @pytest.mark.parametrize('response', false_responses()) + def test_falseish(self, response): + fake_input = lambda x: response + qx = 'what the what?' + assert util.prompt_bool(qx, input_=fake_input) is False + + def test_try_again_true(self): + responses = ['g', 'h', 'y'] + fake_input = lambda x: responses.pop(0) + qx = 'what the what?' + assert util.prompt_bool(qx, input_=fake_input) is True + + def test_try_again_false(self): + responses = ['g', 'h', 'n'] + fake_input = lambda x: responses.pop(0) + qx = 'what the what?' + assert util.prompt_bool(qx, input_=fake_input) is False diff --git a/src/ceph-volume/ceph_volume/util/__init__.py b/src/ceph-volume/ceph_volume/util/__init__.py new file mode 100644 index 00000000..1b5afe97 --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/__init__.py @@ -0,0 +1,108 @@ +import logging +from math import floor +from ceph_volume import terminal + +try: + input = raw_input # pylint: disable=redefined-builtin +except NameError: + pass + +logger = logging.getLogger(__name__) + + +def as_string(string): + """ + Ensure that whatever type of string is incoming, it is returned as an + actual string, versus 'bytes' which Python 3 likes to use. + """ + if isinstance(string, bytes): + # we really ignore here if we can't properly decode with utf-8 + return string.decode('utf-8', 'ignore') + return string + + +def as_bytes(string): + """ + Ensure that whatever type of string is incoming, it is returned as bytes, + encoding to utf-8 otherwise + """ + if isinstance(string, bytes): + return string + return string.encode('utf-8', errors='ignore') + + +def str_to_int(string, round_down=True): + """ + Parses a string number into an integer, optionally converting to a float + and rounding down. + + Some LVM values may come with a comma instead of a dot to define decimals. + This function normalizes a comma into a dot + """ + error_msg = "Unable to convert to integer: '%s'" % str(string) + try: + integer = float(string.replace(',', '.')) + except AttributeError: + # this might be a integer already, so try to use it, otherwise raise + # the original exception + if isinstance(string, (int, float)): + integer = string + else: + logger.exception(error_msg) + raise RuntimeError(error_msg) + except (TypeError, ValueError): + logger.exception(error_msg) + raise RuntimeError(error_msg) + + if round_down: + integer = floor(integer) + else: + integer = round(integer) + return int(integer) + + +def str_to_bool(val): + """ + Convert a string representation of truth to True or False + + True values are 'y', 'yes', or ''; case-insensitive + False values are 'n', or 'no'; case-insensitive + Raises ValueError if 'val' is anything else. + """ + true_vals = ['yes', 'y', ''] + false_vals = ['no', 'n'] + try: + val = val.lower() + except AttributeError: + val = str(val).lower() + if val in true_vals: + return True + elif val in false_vals: + return False + else: + raise ValueError("Invalid input value: %s" % val) + + +def prompt_bool(question, input_=None): + """ + Interface to prompt a boolean (or boolean-like) response from a user. + Usually a confirmation. + """ + input_prompt = input_ or input + prompt_format = '--> {question} '.format(question=question) + response = input_prompt(prompt_format) + try: + return str_to_bool(response) + except ValueError: + terminal.error('Valid true responses are: y, yes, <Enter>') + terminal.error('Valid false responses are: n, no') + terminal.error('That response was invalid, please try again') + return prompt_bool(question, input_=input_prompt) + +def merge_dict(x, y): + """ + Return two dicts merged + """ + z = x.copy() + z.update(y) + return z
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/util/arg_validators.py b/src/ceph-volume/ceph_volume/util/arg_validators.py new file mode 100644 index 00000000..94cb4f69 --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/arg_validators.py @@ -0,0 +1,150 @@ +import argparse +import os +from ceph_volume import terminal +from ceph_volume import decorators +from ceph_volume.util import disk +from ceph_volume.util.device import Device + + +class ValidDevice(object): + + def __init__(self, as_string=False, gpt_ok=False): + self.as_string = as_string + self.gpt_ok = gpt_ok + + def __call__(self, dev_path): + device = self._is_valid_device(dev_path) + return self._format_device(device) + + def _format_device(self, device): + if self.as_string: + if device.is_lv: + # all codepaths expect an lv path to be returned in this format + return "{}/{}".format(device.vg_name, device.lv_name) + return device.path + return device + + def _is_valid_device(self, dev_path): + device = Device(dev_path) + error = None + if not device.exists: + error = "Unable to proceed with non-existing device: %s" % dev_path + # FIXME this is not a nice API, this validator was meant to catch any + # non-existing devices upfront, not check for gpt headers. Now this + # needs to optionally skip checking gpt headers which is beyond + # verifying if the device exists. The better solution would be to + # configure this with a list of checks that can be excluded/included on + # __init__ + elif device.has_gpt_headers and not self.gpt_ok: + error = "GPT headers found, they must be removed on: %s" % dev_path + + if error: + raise argparse.ArgumentError(None, error) + + return device + + +class ValidBatchDevice(ValidDevice): + + def __call__(self, dev_path): + dev = self._is_valid_device(dev_path) + if dev.is_partition: + raise argparse.ArgumentError( + None, + '{} is a partition, please pass ' + 'LVs or raw block devices'.format(dev_path)) + return self._format_device(dev) + + +class OSDPath(object): + """ + Validate path exists and it looks like an OSD directory. + """ + + @decorators.needs_root + def __call__(self, string): + if not os.path.exists(string): + error = "Path does not exist: %s" % string + raise argparse.ArgumentError(None, error) + + arg_is_partition = disk.is_partition(string) + if arg_is_partition: + return os.path.abspath(string) + absolute_path = os.path.abspath(string) + if not os.path.isdir(absolute_path): + error = "Argument is not a directory or device which is required to scan" + raise argparse.ArgumentError(None, error) + key_files = ['ceph_fsid', 'fsid', 'keyring', 'ready', 'type', 'whoami'] + dir_files = os.listdir(absolute_path) + for key_file in key_files: + if key_file not in dir_files: + terminal.error('All following files must exist in path: %s' % ' '.join(key_files)) + error = "Required file (%s) was not found in OSD dir path: %s" % ( + key_file, + absolute_path + ) + raise argparse.ArgumentError(None, error) + + return os.path.abspath(string) + + +def exclude_group_options(parser, groups, argv=None): + """ + ``argparse`` has the ability to check for mutually exclusive options, but + it only allows a basic XOR behavior: only one flag can be used from + a defined group of options. This doesn't help when two groups of options + need to be separated. For example, with filestore and bluestore, neither + set can be used in conjunction with the other set. + + This helper validator will consume the parser to inspect the group flags, + and it will group them together from ``groups``. This allows proper error + reporting, matching each incompatible flag with its group name. + + :param parser: The argparse object, once it has configured all flags. It is + required to contain the group names being used to validate. + :param groups: A list of group names (at least two), with the same used for + ``add_argument_group`` + :param argv: Consume the args (sys.argv) directly from this argument + + .. note: **Unfortunately** this will not be able to validate correctly when + using default flags. In the case of filestore vs. bluestore, ceph-volume + defaults to --bluestore, but we can't check that programmatically, we can + only parse the flags seen via argv + """ + # Reduce the parser groups to only the groups we need to intersect + parser_groups = [g for g in parser._action_groups if g.title in groups] + # A mapping of the group name to flags/options + group_flags = {} + flags_to_verify = [] + for group in parser_groups: + # option groups may have more than one item in ``option_strings``, this + # will loop over ``_group_actions`` which contains the + # ``option_strings``, like ``['--filestore']`` + group_flags[group.title] = [ + option for group_action in group._group_actions + for option in group_action.option_strings + ] + + # Gather all the flags present in the groups so that we only check on those. + for flags in group_flags.values(): + flags_to_verify.extend(flags) + + seen = [] + last_flag = None + last_group = None + for flag in argv: + if flag not in flags_to_verify: + continue + for group_name, flags in group_flags.items(): + if flag in flags: + seen.append(group_name) + # We are mutually excluding groups, so having more than 1 group + # in ``seen`` means we must raise an error + if len(set(seen)) == len(groups): + terminal.warning('Incompatible flags were found, some values may get ignored') + msg = 'Cannot use %s (%s) with %s (%s)' % ( + last_flag, last_group, flag, group_name + ) + terminal.warning(msg) + last_group = group_name + last_flag = flag diff --git a/src/ceph-volume/ceph_volume/util/constants.py b/src/ceph-volume/ceph_volume/util/constants.py new file mode 100644 index 00000000..3ec819ec --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/constants.py @@ -0,0 +1,46 @@ + +# mount flags +mount = dict( + xfs=['rw', 'noatime' , 'inode64'] +) + + +# format flags +mkfs = dict( + xfs=[ + # force overwriting previous fs + '-f', + # set the inode size to 2kb + '-i', 'size=2048', + ], +) + +# The fantastical world of ceph-disk labels, they should give you the +# collywobbles +ceph_disk_guids = { + # luks + '45b0969e-9b03-4f30-b4c6-35865ceff106': {'type': 'journal', 'encrypted': True, 'encryption_type': 'luks'}, + 'cafecafe-9b03-4f30-b4c6-35865ceff106': {'type': 'block', 'encrypted': True, 'encryption_type': 'luks'}, + '166418da-c469-4022-adf4-b30afd37f176': {'type': 'block.db', 'encrypted': True, 'encryption_type': 'luks'}, + '86a32090-3647-40b9-bbbd-38d8c573aa86': {'type': 'block.wal', 'encrypted': True, 'encryption_type': 'luks'}, + '4fbd7e29-9d25-41b8-afd0-35865ceff05d': {'type': 'data', 'encrypted': True, 'encryption_type': 'luks'}, + # plain + '45b0969e-9b03-4f30-b4c6-5ec00ceff106': {'type': 'journal', 'encrypted': True, 'encryption_type': 'plain'}, + 'cafecafe-9b03-4f30-b4c6-5ec00ceff106': {'type': 'block', 'encrypted': True, 'encryption_type': 'plain'}, + '93b0052d-02d9-4d8a-a43b-33a3ee4dfbc3': {'type': 'block.db', 'encrypted': True, 'encryption_type': 'plain'}, + '306e8683-4fe2-4330-b7c0-00a917c16966': {'type': 'block.wal', 'encrypted': True, 'encryption_type': 'plain'}, + '4fbd7e29-9d25-41b8-afd0-5ec00ceff05d': {'type': 'data', 'encrypted': True, 'encryption_type': 'plain'}, + # regular guids that differ from plain + 'fb3aabf9-d25f-47cc-bf5e-721d1816496b': {'type': 'lockbox', 'encrypted': False, 'encryption_type': None}, + '30cd0809-c2b2-499c-8879-2d6b78529876': {'type': 'block.db', 'encrypted': False, 'encryption_type': None}, + '5ce17fce-4087-4169-b7ff-056cc58473f9': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None}, + '4fbd7e29-9d25-41b8-afd0-062c0ceff05d': {'type': 'data', 'encrypted': False, 'encryption_type': None}, + 'cafecafe-9b03-4f30-b4c6-b4b80ceff106': {'type': 'block', 'encrypted': False, 'encryption_type': None}, + # multipath + '01b41e1b-002a-453c-9f17-88793989ff8f': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None}, + 'ec6d6385-e346-45dc-be91-da2a7c8b3261': {'type': 'block.wal', 'encrypted': False, 'encryption_type': None}, + '45b0969e-8ae0-4982-bf9d-5a8d867af560': {'type': 'journal', 'encrypted': False, 'encryption_type': None}, + '4fbd7e29-8ae0-4982-bf9d-5a8d867af560': {'type': 'data', 'encrypted': False, 'encryption_type': None}, + '7f4a666a-16f3-47a2-8445-152ef4d03f6c': {'type': 'lockbox', 'encrypted': False, 'encryption_type': None}, + 'cafecafe-8ae0-4982-bf9d-5a8d867af560': {'type': 'block', 'encrypted': False, 'encryption_type': None}, +} diff --git a/src/ceph-volume/ceph_volume/util/device.py b/src/ceph-volume/ceph_volume/util/device.py new file mode 100644 index 00000000..c06244dc --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/device.py @@ -0,0 +1,549 @@ +# -*- coding: utf-8 -*- + +import os +from functools import total_ordering +from ceph_volume import sys_info, process +from ceph_volume.api import lvm +from ceph_volume.util import disk, system +from ceph_volume.util.constants import ceph_disk_guids + +report_template = """ +{dev:<25} {size:<12} {rot!s:<7} {available!s:<9} {model}""" + + +def encryption_status(abspath): + """ + Helper function to run ``encryption.status()``. It is done here to avoid + a circular import issue (encryption module imports from this module) and to + ease testing by allowing monkeypatching of this function. + """ + from ceph_volume.util import encryption + return encryption.status(abspath) + + +class Devices(object): + """ + A container for Device instances with reporting + """ + + def __init__(self, filter_for_batch=False): + if not sys_info.devices: + sys_info.devices = disk.get_devices() + self.devices = [Device(k) for k in + sys_info.devices.keys()] + if filter_for_batch: + self.devices = [d for d in self.devices if d.available_lvm_batch] + + def pretty_report(self): + output = [ + report_template.format( + dev='Device Path', + size='Size', + rot='rotates', + model='Model name', + available='available', + )] + for device in sorted(self.devices): + output.append(device.report()) + return ''.join(output) + + def json_report(self): + output = [] + for device in sorted(self.devices): + output.append(device.json_report()) + return output + +@total_ordering +class Device(object): + + pretty_template = """ + {attr:<25} {value}""" + + report_fields = [ + 'rejected_reasons', + 'available', + 'path', + 'sys_api', + 'device_id', + ] + pretty_report_sys_fields = [ + 'human_readable_size', + 'model', + 'removable', + 'ro', + 'rotational', + 'sas_address', + 'scheduler_mode', + 'vendor', + ] + + # define some class variables; mostly to enable the use of autospec in + # unittests + lvs = [] + + def __init__(self, path): + self.path = path + # LVs can have a vg/lv path, while disks will have /dev/sda + self.abspath = path + self.lv_api = None + self.lvs = [] + self.vgs = [] + self.vg_name = None + self.lv_name = None + self.disk_api = {} + self.blkid_api = {} + self.sys_api = {} + self._exists = None + self._is_lvm_member = None + self._parse() + + self.available_lvm, self.rejected_reasons_lvm = self._check_lvm_reject_reasons() + self.available_raw, self.rejected_reasons_raw = self._check_raw_reject_reasons() + self.available = self.available_lvm and self.available_raw + self.rejected_reasons = list(set(self.rejected_reasons_lvm + + self.rejected_reasons_raw)) + + self.device_id = self._get_device_id() + + def __lt__(self, other): + ''' + Implementing this method and __eq__ allows the @total_ordering + decorator to turn the Device class into a totally ordered type. + This can slower then implementing all comparison operations. + This sorting should put available devices before unavailable devices + and sort on the path otherwise (str sorting). + ''' + if self.available == other.available: + return self.path < other.path + return self.available and not other.available + + def __eq__(self, other): + return self.path == other.path + + def __hash__(self): + return hash(self.path) + + def _parse(self): + if not sys_info.devices: + sys_info.devices = disk.get_devices() + self.sys_api = sys_info.devices.get(self.abspath, {}) + if not self.sys_api: + # if no device was found check if we are a partition + partname = self.abspath.split('/')[-1] + for device, info in sys_info.devices.items(): + part = info['partitions'].get(partname, {}) + if part: + self.sys_api = part + break + + # if the path is not absolute, we have 'vg/lv', let's use LV name + # to get the LV. + if self.path[0] == '/': + lv = lvm.get_first_lv(filters={'lv_path': self.path}) + else: + vgname, lvname = self.path.split('/') + lv = lvm.get_first_lv(filters={'lv_name': lvname, + 'vg_name': vgname}) + if lv: + self.lv_api = lv + self.lvs = [lv] + self.abspath = lv.lv_path + self.vg_name = lv.vg_name + self.lv_name = lv.name + else: + dev = disk.lsblk(self.path) + self.blkid_api = disk.blkid(self.path) + self.disk_api = dev + device_type = dev.get('TYPE', '') + # always check is this is an lvm member + if device_type in ['part', 'disk']: + self._set_lvm_membership() + + self.ceph_disk = CephDiskDevice(self) + + def __repr__(self): + prefix = 'Unknown' + if self.is_lv: + prefix = 'LV' + elif self.is_partition: + prefix = 'Partition' + elif self.is_device: + prefix = 'Raw Device' + return '<%s: %s>' % (prefix, self.abspath) + + def pretty_report(self): + def format_value(v): + if isinstance(v, list): + return ', '.join(v) + else: + return v + def format_key(k): + return k.strip('_').replace('_', ' ') + output = ['\n====== Device report {} ======\n'.format(self.path)] + output.extend( + [self.pretty_template.format( + attr=format_key(k), + value=format_value(v)) for k, v in vars(self).items() if k in + self.report_fields and k != 'disk_api' and k != 'sys_api'] ) + output.extend( + [self.pretty_template.format( + attr=format_key(k), + value=format_value(v)) for k, v in self.sys_api.items() if k in + self.pretty_report_sys_fields]) + for lv in self.lvs: + output.append(""" + --- Logical Volume ---""") + output.extend( + [self.pretty_template.format( + attr=format_key(k), + value=format_value(v)) for k, v in lv.report().items()]) + return ''.join(output) + + def report(self): + return report_template.format( + dev=self.abspath, + size=self.size_human, + rot=self.rotational, + available=self.available, + model=self.model, + ) + + def json_report(self): + output = {k.strip('_'): v for k, v in vars(self).items() if k in + self.report_fields} + output['lvs'] = [lv.report() for lv in self.lvs] + return output + + def _get_device_id(self): + """ + Please keep this implementation in sync with get_device_id() in + src/common/blkdev.cc + """ + props = ['ID_VENDOR', 'ID_MODEL', 'ID_MODEL_ENC', 'ID_SERIAL_SHORT', 'ID_SERIAL', + 'ID_SCSI_SERIAL'] + p = disk.udevadm_property(self.abspath, props) + if p.get('ID_MODEL','').startswith('LVM PV '): + p['ID_MODEL'] = p.get('ID_MODEL_ENC', '').replace('\\x20', ' ').strip() + if 'ID_VENDOR' in p and 'ID_MODEL' in p and 'ID_SCSI_SERIAL' in p: + dev_id = '_'.join([p['ID_VENDOR'], p['ID_MODEL'], + p['ID_SCSI_SERIAL']]) + elif 'ID_MODEL' in p and 'ID_SERIAL_SHORT' in p: + dev_id = '_'.join([p['ID_MODEL'], p['ID_SERIAL_SHORT']]) + elif 'ID_SERIAL' in p: + dev_id = p['ID_SERIAL'] + if dev_id.startswith('MTFD'): + # Micron NVMes hide the vendor + dev_id = 'Micron_' + dev_id + else: + # the else branch should fallback to using sysfs and ioctl to + # retrieve device_id on FreeBSD. Still figuring out if/how the + # python ioctl implementation does that on FreeBSD + dev_id = '' + dev_id.replace(' ', '_') + return dev_id + + def _set_lvm_membership(self): + if self._is_lvm_member is None: + # this is contentious, if a PV is recognized by LVM but has no + # VGs, should we consider it as part of LVM? We choose not to + # here, because most likely, we need to use VGs from this PV. + self._is_lvm_member = False + for path in self._get_pv_paths(): + vgs = lvm.get_device_vgs(path) + if vgs: + self.vgs.extend(vgs) + # a pv can only be in one vg, so this should be safe + # FIXME: While the above assumption holds, sda1 and sda2 + # can each host a PV and VG. I think the vg_name property is + # actually unused (not 100% sure) and can simply be removed + self.vg_name = vgs[0] + self._is_lvm_member = True + self.lvs.extend(lvm.get_device_lvs(path)) + return self._is_lvm_member + + def _get_pv_paths(self): + """ + For block devices LVM can reside on the raw block device or on a + partition. Return a list of paths to be checked for a pv. + """ + paths = [self.abspath] + path_dir = os.path.dirname(self.abspath) + for part in self.sys_api.get('partitions', {}).keys(): + paths.append(os.path.join(path_dir, part)) + return paths + + @property + def exists(self): + return os.path.exists(self.abspath) + + @property + def has_gpt_headers(self): + return self.blkid_api.get("PTTYPE") == "gpt" + + @property + def rotational(self): + rotational = self.sys_api.get('rotational') + if rotational is None: + # fall back to lsblk if not found in sys_api + # default to '1' if no value is found with lsblk either + rotational = self.disk_api.get('ROTA', '1') + return rotational == '1' + + @property + def model(self): + return self.sys_api['model'] + + @property + def size_human(self): + return self.sys_api['human_readable_size'] + + @property + def size(self): + return self.sys_api['size'] + + @property + def lvm_size(self): + """ + If this device was made into a PV it would lose 1GB in total size + due to the 1GB physical extent size we set when creating volume groups + """ + size = disk.Size(b=self.size) + lvm_size = disk.Size(gb=size.gb.as_int()) - disk.Size(gb=1) + return lvm_size + + @property + def is_lvm_member(self): + if self._is_lvm_member is None: + self._set_lvm_membership() + return self._is_lvm_member + + @property + def is_ceph_disk_member(self): + is_member = self.ceph_disk.is_member + if self.sys_api.get("partitions"): + for part in self.sys_api.get("partitions").keys(): + part = Device("/dev/%s" % part) + if part.is_ceph_disk_member: + is_member = True + break + return is_member + + @property + def has_bluestore_label(self): + out, err, ret = process.call([ + 'ceph-bluestore-tool', 'show-label', + '--dev', self.abspath], verbose_on_failure=False) + if ret: + return False + return True + + @property + def is_mapper(self): + return self.path.startswith(('/dev/mapper', '/dev/dm-')) + + @property + def is_lv(self): + return self.lv_api is not None + + @property + def is_partition(self): + if self.disk_api: + return self.disk_api['TYPE'] == 'part' + elif self.blkid_api: + return self.blkid_api['TYPE'] == 'part' + return False + + @property + def is_device(self): + api = None + if self.disk_api: + api = self.disk_api + elif self.blkid_api: + api = self.blkid_api + if api: + is_device = api['TYPE'] == 'device' + is_disk = api['TYPE'] == 'disk' + if is_device or is_disk: + return True + return False + + @property + def is_acceptable_device(self): + return self.is_device or self.is_partition + + @property + def is_encrypted(self): + """ + Only correct for LVs, device mappers, and partitions. Will report a ``None`` + for raw devices. + """ + crypt_reports = [self.blkid_api.get('TYPE', ''), self.disk_api.get('FSTYPE', '')] + if self.is_lv: + # if disk APIs are reporting this is encrypted use that: + if 'crypto_LUKS' in crypt_reports: + return True + # if ceph-volume created this, then a tag would let us know + elif self.lv_api.encrypted: + return True + return False + elif self.is_partition: + return 'crypto_LUKS' in crypt_reports + elif self.is_mapper: + active_mapper = encryption_status(self.abspath) + if active_mapper: + # normalize a bit to ensure same values regardless of source + encryption_type = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks + return True if encryption_type in ['plain', 'luks'] else False + else: + return False + else: + return None + + @property + def used_by_ceph(self): + # only filter out data devices as journals could potentially be reused + osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs + if lv.tags.get("ceph.type") in ["data", "block"]] + return any(osd_ids) + + @property + def vg_free_percent(self): + if self.vgs: + return [vg.free_percent for vg in self.vgs] + else: + return [1] + + @property + def vg_size(self): + if self.vgs: + return [vg.size for vg in self.vgs] + else: + # TODO fix this...we can probably get rid of vg_free + return self.vg_free + + @property + def vg_free(self): + ''' + Returns the free space in all VGs on this device. If no VGs are + present, returns the disk size. + ''' + if self.vgs: + return [vg.free for vg in self.vgs] + else: + # We could also query 'lvmconfig + # --typeconfig full' and use allocations -> physical_extent_size + # value to project the space for a vg + # assuming 4M extents here + extent_size = 4194304 + vg_free = int(self.size / extent_size) * extent_size + if self.size % extent_size == 0: + # If the extent size divides size exactly, deduct on extent for + # LVM metadata + vg_free -= extent_size + return [vg_free] + + def _check_generic_reject_reasons(self): + reasons = [ + ('removable', 1, 'removable'), + ('ro', 1, 'read-only'), + ('locked', 1, 'locked'), + ] + rejected = [reason for (k, v, reason) in reasons if + self.sys_api.get(k, '') == v] + if self.is_acceptable_device: + # reject disks smaller than 5GB + if int(self.sys_api.get('size', 0)) < 5368709120: + rejected.append('Insufficient space (<5GB)') + else: + rejected.append("Device type is not acceptable. It should be raw device or partition") + if self.is_ceph_disk_member: + rejected.append("Used by ceph-disk") + if self.has_bluestore_label: + rejected.append('Has BlueStore device label') + return rejected + + def _check_lvm_reject_reasons(self): + rejected = [] + if self.vgs: + available_vgs = [vg for vg in self.vgs if int(vg.vg_free_count) > 10] + if not available_vgs: + rejected.append('Insufficient space (<10 extents) on vgs') + else: + # only check generic if no vgs are present. Vgs might hold lvs and + # that might cause 'locked' to trigger + rejected.extend(self._check_generic_reject_reasons()) + + return len(rejected) == 0, rejected + + def _check_raw_reject_reasons(self): + rejected = self._check_generic_reject_reasons() + if len(self.vgs) > 0: + rejected.append('LVM detected') + + return len(rejected) == 0, rejected + + @property + def available_lvm_batch(self): + if self.sys_api.get("partitions"): + return False + if system.device_is_mounted(self.path): + return False + return self.is_device or self.is_lv + + +class CephDiskDevice(object): + """ + Detect devices that have been created by ceph-disk, report their type + (journal, data, etc..). Requires a ``Device`` object as input. + """ + + def __init__(self, device): + self.device = device + self._is_ceph_disk_member = None + + @property + def partlabel(self): + """ + In containers, the 'PARTLABEL' attribute might not be detected + correctly via ``lsblk``, so we poke at the value with ``lsblk`` first, + falling back to ``blkid`` (which works correclty in containers). + """ + lsblk_partlabel = self.device.disk_api.get('PARTLABEL') + if lsblk_partlabel: + return lsblk_partlabel + return self.device.blkid_api.get('PARTLABEL', '') + + @property + def parttype(self): + """ + Seems like older version do not detect PARTTYPE correctly (assuming the + info in util/disk.py#lsblk is still valid). + SImply resolve to using blkid since lsblk will throw an error if asked + for an unknown columns + """ + return self.device.blkid_api.get('PARTTYPE', '') + + @property + def is_member(self): + if self._is_ceph_disk_member is None: + if 'ceph' in self.partlabel: + self._is_ceph_disk_member = True + return True + elif self.parttype in ceph_disk_guids.keys(): + return True + return False + return self._is_ceph_disk_member + + @property + def type(self): + types = [ + 'data', 'wal', 'db', 'lockbox', 'journal', + # ceph-disk uses 'ceph block' when placing data in bluestore, but + # keeps the regular OSD files in 'ceph data' :( :( :( :( + 'block', + ] + for t in types: + if t in self.partlabel: + return t + label = ceph_disk_guids.get(self.parttype, {}) + return label.get('type', 'unknown').split('.')[-1] diff --git a/src/ceph-volume/ceph_volume/util/disk.py b/src/ceph-volume/ceph_volume/util/disk.py new file mode 100644 index 00000000..2cf18cb5 --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/disk.py @@ -0,0 +1,804 @@ +import logging +import os +import re +import stat +from ceph_volume import process +from ceph_volume.api import lvm +from ceph_volume.util.system import get_file_contents + + +logger = logging.getLogger(__name__) + + +# The blkid CLI tool has some oddities which prevents having one common call +# to extract the information instead of having separate utilities. The `udev` +# type of output is needed in older versions of blkid (v 2.23) that will not +# work correctly with just the ``-p`` flag to bypass the cache for example. +# Xenial doesn't have this problem as it uses a newer blkid version. + + +def get_partuuid(device): + """ + If a device is a partition, it will probably have a PARTUUID on it that + will persist and can be queried against `blkid` later to detect the actual + device + """ + out, err, rc = process.call( + ['blkid', '-s', 'PARTUUID', '-o', 'value', device] + ) + return ' '.join(out).strip() + + +def _blkid_parser(output): + """ + Parses the output from a system ``blkid`` call, requires output to be + produced using the ``-p`` flag which bypasses the cache, mangling the + names. These names are corrected to what it would look like without the + ``-p`` flag. + + Normal output:: + + /dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" [...] + """ + # first spaced separated item is garbage, gets tossed: + output = ' '.join(output.split()[1:]) + # split again, respecting possible whitespace in quoted values + pairs = output.split('" ') + raw = {} + processed = {} + mapping = { + 'UUID': 'UUID', + 'TYPE': 'TYPE', + 'PART_ENTRY_NAME': 'PARTLABEL', + 'PART_ENTRY_UUID': 'PARTUUID', + 'PART_ENTRY_TYPE': 'PARTTYPE', + 'PTTYPE': 'PTTYPE', + } + + for pair in pairs: + try: + column, value = pair.split('=') + except ValueError: + continue + raw[column] = value.strip().strip().strip('"') + + for key, value in raw.items(): + new_key = mapping.get(key) + if not new_key: + continue + processed[new_key] = value + + return processed + + +def blkid(device): + """ + The blkid interface to its CLI, creating an output similar to what is + expected from ``lsblk``. In most cases, ``lsblk()`` should be the preferred + method for extracting information about a device. There are some corner + cases where it might provide information that is otherwise unavailable. + + The system call uses the ``-p`` flag which bypasses the cache, the caveat + being that the keys produced are named completely different to expected + names. + + For example, instead of ``PARTLABEL`` it provides a ``PART_ENTRY_NAME``. + A bit of translation between these known keys is done, which is why + ``lsblk`` should always be preferred: the output provided here is not as + rich, given that a translation of keys is required for a uniform interface + with the ``-p`` flag. + + Label name to expected output chart: + + cache bypass name expected name + + UUID UUID + TYPE TYPE + PART_ENTRY_NAME PARTLABEL + PART_ENTRY_UUID PARTUUID + """ + out, err, rc = process.call( + ['blkid', '-p', device] + ) + return _blkid_parser(' '.join(out)) + + +def get_part_entry_type(device): + """ + Parses the ``ID_PART_ENTRY_TYPE`` from the "low level" (bypasses the cache) + output that uses the ``udev`` type of output. This output is intended to be + used for udev rules, but it is useful in this case as it is the only + consistent way to retrieve the GUID used by ceph-disk to identify devices. + """ + out, err, rc = process.call(['blkid', '-p', '-o', 'udev', device]) + for line in out: + if 'ID_PART_ENTRY_TYPE=' in line: + return line.split('=')[-1].strip() + return '' + + +def get_device_from_partuuid(partuuid): + """ + If a device has a partuuid, query blkid so that it can tell us what that + device is + """ + out, err, rc = process.call( + ['blkid', '-t', 'PARTUUID="%s"' % partuuid, '-o', 'device'] + ) + return ' '.join(out).strip() + + +def remove_partition(device): + """ + Removes a partition using parted + + :param device: A ``Device()`` object + """ + parent_device = '/dev/%s' % device.disk_api['PKNAME'] + udev_info = udevadm_property(device.abspath) + partition_number = udev_info.get('ID_PART_ENTRY_NUMBER') + if not partition_number: + raise RuntimeError('Unable to detect the partition number for device: %s' % device.abspath) + + process.run( + ['parted', parent_device, '--script', '--', 'rm', partition_number] + ) + + +def _stat_is_device(stat_obj): + """ + Helper function that will interpret ``os.stat`` output directly, so that other + functions can call ``os.stat`` once and interpret that result several times + """ + return stat.S_ISBLK(stat_obj) + + +def _lsblk_parser(line): + """ + Parses lines in lsblk output. Requires output to be in pair mode (``-P`` flag). Lines + need to be whole strings, the line gets split when processed. + + :param line: A string, with the full line from lsblk output + """ + # parse the COLUMN="value" output to construct the dictionary + pairs = line.split('" ') + parsed = {} + for pair in pairs: + try: + column, value = pair.split('=') + except ValueError: + continue + parsed[column] = value.strip().strip().strip('"') + return parsed + + +def device_family(device): + """ + Returns a list of associated devices. It assumes that ``device`` is + a parent device. It is up to the caller to ensure that the device being + used is a parent, not a partition. + """ + labels = ['NAME', 'PARTLABEL', 'TYPE'] + command = ['lsblk', '-P', '-p', '-o', ','.join(labels), device] + out, err, rc = process.call(command) + devices = [] + for line in out: + devices.append(_lsblk_parser(line)) + + return devices + + +def udevadm_property(device, properties=[]): + """ + Query udevadm for information about device properties. + Optionally pass a list of properties to return. A requested property might + not be returned if not present. + + Expected output format:: + # udevadm info --query=property --name=/dev/sda :( + DEVNAME=/dev/sda + DEVTYPE=disk + ID_ATA=1 + ID_BUS=ata + ID_MODEL=SK_hynix_SC311_SATA_512GB + ID_PART_TABLE_TYPE=gpt + ID_PART_TABLE_UUID=c8f91d57-b26c-4de1-8884-0c9541da288c + ID_PATH=pci-0000:00:17.0-ata-3 + ID_PATH_TAG=pci-0000_00_17_0-ata-3 + ID_REVISION=70000P10 + ID_SERIAL=SK_hynix_SC311_SATA_512GB_MS83N71801150416A + TAGS=:systemd: + USEC_INITIALIZED=16117769 + ... + """ + out = _udevadm_info(device) + ret = {} + for line in out: + p, v = line.split('=', 1) + if not properties or p in properties: + ret[p] = v + return ret + + +def _udevadm_info(device): + """ + Call udevadm and return the output + """ + cmd = ['udevadm', 'info', '--query=property', device] + out, _err, _rc = process.call(cmd) + return out + + +def lsblk(device, columns=None, abspath=False): + """ + Create a dictionary of identifying values for a device using ``lsblk``. + Each supported column is a key, in its *raw* format (all uppercase + usually). ``lsblk`` has support for certain "columns" (in blkid these + would be labels), and these columns vary between distributions and + ``lsblk`` versions. The newer versions support a richer set of columns, + while older ones were a bit limited. + + These are a subset of lsblk columns which are known to work on both CentOS 7 and Xenial: + + NAME device name + KNAME internal kernel device name + MAJ:MIN major:minor device number + FSTYPE filesystem type + MOUNTPOINT where the device is mounted + LABEL filesystem LABEL + UUID filesystem UUID + RO read-only device + RM removable device + MODEL device identifier + SIZE size of the device + STATE state of the device + OWNER user name + GROUP group name + MODE device node permissions + ALIGNMENT alignment offset + MIN-IO minimum I/O size + OPT-IO optimal I/O size + PHY-SEC physical sector size + LOG-SEC logical sector size + ROTA rotational device + SCHED I/O scheduler name + RQ-SIZE request queue size + TYPE device type + PKNAME internal parent kernel device name + DISC-ALN discard alignment offset + DISC-GRAN discard granularity + DISC-MAX discard max bytes + DISC-ZERO discard zeroes data + + There is a bug in ``lsblk`` where using all the available (supported) + columns will result in no output (!), in order to workaround this the + following columns have been removed from the default reporting columns: + + * RQ-SIZE (request queue size) + * MIN-IO minimum I/O size + * OPT-IO optimal I/O size + + These should be available however when using `columns`. For example:: + + >>> lsblk('/dev/sda1', columns=['OPT-IO']) + {'OPT-IO': '0'} + + Normal CLI output, as filtered by the flags in this function will look like :: + + $ lsblk --nodeps -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT + NAME="sda1" KNAME="sda1" MAJ:MIN="8:1" FSTYPE="ext4" MOUNTPOINT="/" + + :param columns: A list of columns to report as keys in its original form. + :param abspath: Set the flag for absolute paths on the report + """ + default_columns = [ + 'NAME', 'KNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL', 'UUID', + 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE', + 'ALIGNMENT', 'PHY-SEC', 'LOG-SEC', 'ROTA', 'SCHED', 'TYPE', 'DISC-ALN', + 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'PKNAME', 'PARTLABEL' + ] + device = device.rstrip('/') + columns = columns or default_columns + # --nodeps -> Avoid adding children/parents to the device, only give information + # on the actual device we are querying for + # -P -> Produce pairs of COLUMN="value" + # -p -> Return full paths to devices, not just the names, when ``abspath`` is set + # -o -> Use the columns specified or default ones provided by this function + base_command = ['lsblk', '--nodeps', '-P'] + if abspath: + base_command.append('-p') + base_command.append('-o') + base_command.append(','.join(columns)) + base_command.append(device) + out, err, rc = process.call(base_command) + + if rc != 0: + return {} + + return _lsblk_parser(' '.join(out)) + + +def is_device(dev): + """ + Boolean to determine if a given device is a block device (**not** + a partition!) + + For example: /dev/sda would return True, but not /dev/sdc1 + """ + if not os.path.exists(dev): + return False + # use lsblk first, fall back to using stat + TYPE = lsblk(dev).get('TYPE') + if TYPE: + return TYPE == 'disk' + + # fallback to stat + return _stat_is_device(os.lstat(dev).st_mode) + if stat.S_ISBLK(os.lstat(dev)): + return True + return False + + +def is_partition(dev): + """ + Boolean to determine if a given device is a partition, like /dev/sda1 + """ + if not os.path.exists(dev): + return False + # use lsblk first, fall back to using stat + TYPE = lsblk(dev).get('TYPE') + if TYPE: + return TYPE == 'part' + + # fallback to stat + stat_obj = os.stat(dev) + if _stat_is_device(stat_obj.st_mode): + return False + + major = os.major(stat_obj.st_rdev) + minor = os.minor(stat_obj.st_rdev) + if os.path.exists('/sys/dev/block/%d:%d/partition' % (major, minor)): + return True + return False + + +class BaseFloatUnit(float): + """ + Base class to support float representations of size values. Suffix is + computed on child classes by inspecting the class name + """ + + def __repr__(self): + return "<%s(%s)>" % (self.__class__.__name__, self.__float__()) + + def __str__(self): + return "{size:.2f} {suffix}".format( + size=self.__float__(), + suffix=self.__class__.__name__.split('Float')[-1] + ) + + def as_int(self): + return int(self.real) + + def as_float(self): + return self.real + + +class FloatB(BaseFloatUnit): + pass + + +class FloatMB(BaseFloatUnit): + pass + + +class FloatGB(BaseFloatUnit): + pass + + +class FloatKB(BaseFloatUnit): + pass + + +class FloatTB(BaseFloatUnit): + pass + + +class Size(object): + """ + Helper to provide an interface for different sizes given a single initial + input. Allows for comparison between different size objects, which avoids + the need to convert sizes before comparison (e.g. comparing megabytes + against gigabytes). + + Common comparison operators are supported:: + + >>> hd1 = Size(gb=400) + >>> hd2 = Size(gb=500) + >>> hd1 > hd2 + False + >>> hd1 < hd2 + True + >>> hd1 == hd2 + False + >>> hd1 == Size(gb=400) + True + + The Size object can also be multiplied or divided:: + + >>> hd1 + <Size(400.00 GB)> + >>> hd1 * 2 + <Size(800.00 GB)> + >>> hd1 + <Size(800.00 GB)> + + Additions and subtractions are only supported between Size objects:: + + >>> Size(gb=224) - Size(gb=100) + <Size(124.00 GB)> + >>> Size(gb=1) + Size(mb=300) + <Size(1.29 GB)> + + Can also display a human-readable representation, with automatic detection + on best suited unit, or alternatively, specific unit representation:: + + >>> s = Size(mb=2211) + >>> s + <Size(2.16 GB)> + >>> s.mb + <FloatMB(2211.0)> + >>> print "Total size: %s" % s.mb + Total size: 2211.00 MB + >>> print "Total size: %s" % s + Total size: 2.16 GB + """ + + @classmethod + def parse(cls, size): + if (len(size) > 2 and + size[-2].lower() in ['k', 'm', 'g', 't'] and + size[-1].lower() == 'b'): + return cls(**{size[-2:].lower(): float(size[0:-2])}) + elif size[-1].lower() in ['b', 'k', 'm', 'g', 't']: + return cls(**{size[-1].lower(): float(size[0:-1])}) + else: + return cls(b=float(size)) + + + def __init__(self, multiplier=1024, **kw): + self._multiplier = multiplier + # create a mapping of units-to-multiplier, skip bytes as that is + # calculated initially always and does not need to convert + aliases = [ + [('k', 'kb', 'kilobytes'), self._multiplier], + [('m', 'mb', 'megabytes'), self._multiplier ** 2], + [('g', 'gb', 'gigabytes'), self._multiplier ** 3], + [('t', 'tb', 'terabytes'), self._multiplier ** 4], + ] + # and mappings for units-to-formatters, including bytes and aliases for + # each + format_aliases = [ + [('b', 'bytes'), FloatB], + [('kb', 'kilobytes'), FloatKB], + [('mb', 'megabytes'), FloatMB], + [('gb', 'gigabytes'), FloatGB], + [('tb', 'terabytes'), FloatTB], + ] + self._formatters = {} + for key, value in format_aliases: + for alias in key: + self._formatters[alias] = value + self._factors = {} + for key, value in aliases: + for alias in key: + self._factors[alias] = value + + for k, v in kw.items(): + self._convert(v, k) + # only pursue the first occurrence + break + + def _convert(self, size, unit): + """ + Convert any size down to bytes so that other methods can rely on bytes + being available always, regardless of what they pass in, avoiding the + need for a mapping of every permutation. + """ + if unit in ['b', 'bytes']: + self._b = size + return + factor = self._factors[unit] + self._b = float(size * factor) + + def _get_best_format(self): + """ + Go through all the supported units, and use the first one that is less + than 1024. This allows to represent size in the most readable format + available + """ + for unit in ['b', 'kb', 'mb', 'gb', 'tb']: + if getattr(self, unit) > 1024: + continue + return getattr(self, unit) + + def __repr__(self): + return "<Size(%s)>" % self._get_best_format() + + def __str__(self): + return "%s" % self._get_best_format() + + def __format__(self, spec): + return str(self._get_best_format()).__format__(spec) + + def __int__(self): + return int(self._b) + + def __float__(self): + return self._b + + def __lt__(self, other): + if isinstance(other, Size): + return self._b < other._b + else: + return self.b < other + + def __le__(self, other): + if isinstance(other, Size): + return self._b <= other._b + else: + return self.b <= other + + def __eq__(self, other): + if isinstance(other, Size): + return self._b == other._b + else: + return self.b == other + + def __ne__(self, other): + if isinstance(other, Size): + return self._b != other._b + else: + return self.b != other + + def __ge__(self, other): + if isinstance(other, Size): + return self._b >= other._b + else: + return self.b >= other + + def __gt__(self, other): + if isinstance(other, Size): + return self._b > other._b + else: + return self.b > other + + def __add__(self, other): + if isinstance(other, Size): + _b = self._b + other._b + return Size(b=_b) + raise TypeError('Cannot add "Size" object with int') + + def __sub__(self, other): + if isinstance(other, Size): + _b = self._b - other._b + return Size(b=_b) + raise TypeError('Cannot subtract "Size" object from int') + + def __mul__(self, other): + if isinstance(other, Size): + raise TypeError('Cannot multiply with "Size" object') + _b = self._b * other + return Size(b=_b) + + def __truediv__(self, other): + if isinstance(other, Size): + return self._b / other._b + _b = self._b / other + return Size(b=_b) + + def __div__(self, other): + if isinstance(other, Size): + return self._b / other._b + _b = self._b / other + return Size(b=_b) + + def __bool__(self): + return self.b != 0 + + def __nonzero__(self): + return self.__bool__() + + def __getattr__(self, unit): + """ + Calculate units on the fly, relies on the fact that ``bytes`` has been + converted at instantiation. Units that don't exist will trigger an + ``AttributeError`` + """ + try: + formatter = self._formatters[unit] + except KeyError: + raise AttributeError('Size object has not attribute "%s"' % unit) + if unit in ['b', 'bytes']: + return formatter(self._b) + try: + factor = self._factors[unit] + except KeyError: + raise AttributeError('Size object has not attribute "%s"' % unit) + return formatter(float(self._b) / factor) + + +def human_readable_size(size): + """ + Take a size in bytes, and transform it into a human readable size with up + to two decimals of precision. + """ + suffixes = ['B', 'KB', 'MB', 'GB', 'TB'] + suffix_index = 0 + while size > 1024: + suffix_index += 1 + size = size / 1024.0 + return "{size:.2f} {suffix}".format( + size=size, + suffix=suffixes[suffix_index]) + + +def size_from_human_readable(s): + """ + Takes a human readable string and converts into a Size. If no unit is + passed, bytes is assumed. + """ + s = s.replace(' ', '') + if s[-1].isdigit(): + return Size(b=float(s)) + n = float(s[:-1]) + if s[-1].lower() == 't': + return Size(tb=n) + if s[-1].lower() == 'g': + return Size(gb=n) + if s[-1].lower() == 'm': + return Size(mb=n) + if s[-1].lower() == 'k': + return Size(kb=n) + return None + + +def get_partitions_facts(sys_block_path): + partition_metadata = {} + for folder in os.listdir(sys_block_path): + folder_path = os.path.join(sys_block_path, folder) + if os.path.exists(os.path.join(folder_path, 'partition')): + contents = get_file_contents(os.path.join(folder_path, 'partition')) + if contents: + part = {} + partname = folder + part_sys_block_path = os.path.join(sys_block_path, partname) + + part['start'] = get_file_contents(part_sys_block_path + "/start", 0) + part['sectors'] = get_file_contents(part_sys_block_path + "/size", 0) + + part['sectorsize'] = get_file_contents( + part_sys_block_path + "/queue/logical_block_size") + if not part['sectorsize']: + part['sectorsize'] = get_file_contents( + part_sys_block_path + "/queue/hw_sector_size", 512) + part['size'] = float(part['sectors']) * 512 + part['human_readable_size'] = human_readable_size(float(part['sectors']) * 512) + part['holders'] = [] + for holder in os.listdir(part_sys_block_path + '/holders'): + part['holders'].append(holder) + + partition_metadata[partname] = part + return partition_metadata + + +def is_mapper_device(device_name): + return device_name.startswith(('/dev/mapper', '/dev/dm-')) + + +def is_locked_raw_device(disk_path): + """ + A device can be locked by a third party software like a database. + To detect that case, the device is opened in Read/Write and exclusive mode + """ + open_flags = (os.O_RDWR | os.O_EXCL) + open_mode = 0 + fd = None + + try: + fd = os.open(disk_path, open_flags, open_mode) + except OSError: + return 1 + + try: + os.close(fd) + except OSError: + return 1 + + return 0 + + +def get_block_devs_lsblk(): + ''' + This returns a list of lists with 3 items per inner list. + KNAME - reflects the kernel device name , for example /dev/sda or /dev/dm-0 + NAME - the device name, for example /dev/sda or + /dev/mapper/<vg_name>-<lv_name> + TYPE - the block device type: disk, partition, lvm and such + + ''' + cmd = ['lsblk', '-plno', 'KNAME,NAME,TYPE'] + stdout, stderr, rc = process.call(cmd) + # lsblk returns 1 on failure + if rc == 1: + raise OSError('lsblk returned failure, stderr: {}'.format(stderr)) + return [re.split(r'\s+', line) for line in stdout] + + +def get_devices(_sys_block_path='/sys/block'): + """ + Captures all available block devices as reported by lsblk. + Additional interesting metadata like sectors, size, vendor, + solid/rotational, etc. is collected from /sys/block/<device> + + Returns a dictionary, where keys are the full paths to devices. + + ..note:: loop devices, removable media, and logical volumes are never included. + """ + + device_facts = {} + + block_devs = get_block_devs_lsblk() + + for block in block_devs: + devname = os.path.basename(block[0]) + diskname = block[1] + if block[2] != 'disk': + continue + sysdir = os.path.join(_sys_block_path, devname) + metadata = {} + + # If the mapper device is a logical volume it gets excluded + if is_mapper_device(diskname): + if lvm.get_device_lvs(diskname): + continue + + # all facts that have no defaults + # (<name>, <path relative to _sys_block_path>) + facts = [('removable', 'removable'), + ('ro', 'ro'), + ('vendor', 'device/vendor'), + ('model', 'device/model'), + ('rev', 'device/rev'), + ('sas_address', 'device/sas_address'), + ('sas_device_handle', 'device/sas_device_handle'), + ('support_discard', 'queue/discard_granularity'), + ('rotational', 'queue/rotational'), + ('nr_requests', 'queue/nr_requests'), + ] + for key, file_ in facts: + metadata[key] = get_file_contents(os.path.join(sysdir, file_)) + + metadata['scheduler_mode'] = "" + scheduler = get_file_contents(sysdir + "/queue/scheduler") + if scheduler is not None: + m = re.match(r".*?(\[(.*)\])", scheduler) + if m: + metadata['scheduler_mode'] = m.group(2) + + metadata['partitions'] = get_partitions_facts(sysdir) + + size = get_file_contents(os.path.join(sysdir, 'size'), 0) + + metadata['sectors'] = get_file_contents(os.path.join(sysdir, 'sectors'), 0) + fallback_sectorsize = get_file_contents(sysdir + "/queue/hw_sector_size", 512) + metadata['sectorsize'] = get_file_contents(sysdir + + "/queue/logical_block_size", + fallback_sectorsize) + metadata['size'] = float(size) * 512 + metadata['human_readable_size'] = human_readable_size(metadata['size']) + metadata['path'] = diskname + metadata['locked'] = is_locked_raw_device(metadata['path']) + + device_facts[diskname] = metadata + return device_facts diff --git a/src/ceph-volume/ceph_volume/util/encryption.py b/src/ceph-volume/ceph_volume/util/encryption.py new file mode 100644 index 00000000..72a0ccf1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/encryption.py @@ -0,0 +1,263 @@ +import base64 +import os +import logging +from ceph_volume import process, conf +from ceph_volume.util import constants, system +from ceph_volume.util.device import Device +from .prepare import write_keyring +from .disk import lsblk, device_family, get_part_entry_type + +logger = logging.getLogger(__name__) + + +def create_dmcrypt_key(): + """ + Create the secret dm-crypt key used to decrypt a device. + """ + # get the customizable dmcrypt key size (in bits) from ceph.conf fallback + # to the default of 1024 + dmcrypt_key_size = conf.ceph.get_safe( + 'osd', + 'osd_dmcrypt_key_size', + default=1024, + ) + # The size of the key is defined in bits, so we must transform that + # value to bytes (dividing by 8) because we read in bytes, not bits + random_string = os.urandom(int(dmcrypt_key_size / 8)) + key = base64.b64encode(random_string).decode('utf-8') + return key + + +def luks_format(key, device): + """ + Decrypt (open) an encrypted device, previously prepared with cryptsetup + + :param key: dmcrypt secret key, will be used for decrypting + :param device: Absolute path to device + """ + command = [ + 'cryptsetup', + '--batch-mode', # do not prompt + '--key-file', # misnomer, should be key + '-', # because we indicate stdin for the key here + 'luksFormat', + device, + ] + process.call(command, stdin=key, terminal_verbose=True, show_command=True) + + +def plain_open(key, device, mapping): + """ + Decrypt (open) an encrypted device, previously prepared with cryptsetup in plain mode + + .. note: ceph-disk will require an additional b64decode call for this to work + + :param key: dmcrypt secret key + :param device: absolute path to device + :param mapping: mapping name used to correlate device. Usually a UUID + """ + command = [ + 'cryptsetup', + '--key-file', + '-', + '--allow-discards', # allow discards (aka TRIM) requests for device + 'open', + device, + mapping, + '--type', 'plain', + '--key-size', '256', + ] + + process.call(command, stdin=key, terminal_verbose=True, show_command=True) + + +def luks_open(key, device, mapping): + """ + Decrypt (open) an encrypted device, previously prepared with cryptsetup + + .. note: ceph-disk will require an additional b64decode call for this to work + + :param key: dmcrypt secret key + :param device: absolute path to device + :param mapping: mapping name used to correlate device. Usually a UUID + """ + command = [ + 'cryptsetup', + '--key-file', + '-', + '--allow-discards', # allow discards (aka TRIM) requests for device + 'luksOpen', + device, + mapping, + ] + process.call(command, stdin=key, terminal_verbose=True, show_command=True) + + +def dmcrypt_close(mapping): + """ + Encrypt (close) a device, previously decrypted with cryptsetup + + :param mapping: + """ + if not os.path.exists(mapping): + logger.debug('device mapper path does not exist %s' % mapping) + logger.debug('will skip cryptsetup removal') + return + # don't be strict about the remove call, but still warn on the terminal if it fails + process.run(['cryptsetup', 'remove', mapping], stop_on_error=False) + + +def get_dmcrypt_key(osd_id, osd_fsid, lockbox_keyring=None): + """ + Retrieve the dmcrypt (secret) key stored initially on the monitor. The key + is sent initially with JSON, and the Monitor then mangles the name to + ``dm-crypt/osd/<fsid>/luks`` + + The ``lockbox.keyring`` file is required for this operation, and it is + assumed it will exist on the path for the same OSD that is being activated. + To support scanning, it is optionally configurable to a custom location + (e.g. inside a lockbox partition mounted in a temporary location) + """ + if lockbox_keyring is None: + lockbox_keyring = '/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id) + name = 'client.osd-lockbox.%s' % osd_fsid + config_key = 'dm-crypt/osd/%s/luks' % osd_fsid + + stdout, stderr, returncode = process.call( + [ + 'ceph', + '--cluster', conf.cluster, + '--name', name, + '--keyring', lockbox_keyring, + 'config-key', + 'get', + config_key + ], + show_command=True + ) + if returncode != 0: + raise RuntimeError('Unable to retrieve dmcrypt secret') + return ' '.join(stdout).strip() + + +def write_lockbox_keyring(osd_id, osd_fsid, secret): + """ + Helper to write the lockbox keyring. This is needed because the bluestore OSD will + not persist the keyring, and it can't be stored in the data device for filestore because + at the time this is needed, the device is encrypted. + + For bluestore: A tmpfs filesystem is mounted, so the path can get written + to, but the files are ephemeral, which requires this file to be created + every time it is activated. + For filestore: The path for the OSD would exist at this point even if no + OSD data device is mounted, so the keyring is written to fetch the key, and + then the data device is mounted on that directory, making the keyring + "disappear". + """ + if os.path.exists('/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id)): + return + + name = 'client.osd-lockbox.%s' % osd_fsid + write_keyring( + osd_id, + secret, + keyring_name='lockbox.keyring', + name=name + ) + + +def status(device): + """ + Capture the metadata information of a possibly encrypted device, returning + a dictionary with all the values found (if any). + + An encrypted device will contain information about a device. Example + successful output looks like:: + + $ cryptsetup status /dev/mapper/ed6b5a26-eafe-4cd4-87e3-422ff61e26c4 + /dev/mapper/ed6b5a26-eafe-4cd4-87e3-422ff61e26c4 is active and is in use. + type: LUKS1 + cipher: aes-xts-plain64 + keysize: 256 bits + device: /dev/sdc2 + offset: 4096 sectors + size: 20740063 sectors + mode: read/write + + As long as the mapper device is in 'open' state, the ``status`` call will work. + + :param device: Absolute path or UUID of the device mapper + """ + command = [ + 'cryptsetup', + 'status', + device, + ] + out, err, code = process.call(command, show_command=True, verbose_on_failure=False) + + metadata = {} + if code != 0: + logger.warning('failed to detect device mapper information') + return metadata + for line in out: + # get rid of lines that might not be useful to construct the report: + if not line.startswith(' '): + continue + try: + column, value = line.split(': ') + except ValueError: + continue + metadata[column.strip()] = value.strip().strip('"') + return metadata + + +def legacy_encrypted(device): + """ + Detect if a device was encrypted with ceph-disk or not. In the case of + encrypted devices, include the type of encryption (LUKS, or PLAIN), and + infer what the lockbox partition is. + + This function assumes that ``device`` will be a partition. + """ + if os.path.isdir(device): + mounts = system.get_mounts(paths=True) + # yes, rebind the device variable here because a directory isn't going + # to help with parsing + device = mounts.get(device, [None])[0] + if not device: + raise RuntimeError('unable to determine the device mounted at %s' % device) + metadata = {'encrypted': False, 'type': None, 'lockbox': '', 'device': device} + # check if the device is online/decrypted first + active_mapper = status(device) + if active_mapper: + # normalize a bit to ensure same values regardless of source + metadata['type'] = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks + metadata['encrypted'] = True if metadata['type'] in ['plain', 'luks'] else False + # The true device is now available to this function, so it gets + # re-assigned here for the lockbox checks to succeed (it is not + # possible to guess partitions from a device mapper device otherwise + device = active_mapper.get('device', device) + metadata['device'] = device + else: + uuid = get_part_entry_type(device) + guid_match = constants.ceph_disk_guids.get(uuid, {}) + encrypted_guid = guid_match.get('encrypted', False) + if encrypted_guid: + metadata['encrypted'] = True + metadata['type'] = guid_match['encryption_type'] + + # Lets find the lockbox location now, to do this, we need to find out the + # parent device name for the device so that we can query all of its + # associated devices and *then* look for one that has the 'lockbox' label + # on it. Thanks for being awesome ceph-disk + disk_meta = lsblk(device, abspath=True) + if not disk_meta: + return metadata + parent_device = disk_meta['PKNAME'] + # With the parent device set, we can now look for the lockbox listing associated devices + devices = [Device(i['NAME']) for i in device_family(parent_device)] + for d in devices: + if d.ceph_disk.type == 'lockbox': + metadata['lockbox'] = d.abspath + break + return metadata diff --git a/src/ceph-volume/ceph_volume/util/prepare.py b/src/ceph-volume/ceph_volume/util/prepare.py new file mode 100644 index 00000000..85b7033c --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/prepare.py @@ -0,0 +1,531 @@ +""" +These utilities for prepare provide all the pieces needed to prepare a device +but also a compounded ("single call") helper to do them in order. Some plugins +may want to change some part of the process, while others might want to consume +the single-call helper +""" +import errno +import os +import logging +import json +import time +from ceph_volume import process, conf, __release__, terminal +from ceph_volume.util import system, constants, str_to_int, disk + +logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) + + +def create_key(): + stdout, stderr, returncode = process.call( + ['ceph-authtool', '--gen-print-key'], + show_command=True) + if returncode != 0: + raise RuntimeError('Unable to generate a new auth key') + return ' '.join(stdout).strip() + + +def write_keyring(osd_id, secret, keyring_name='keyring', name=None): + """ + Create a keyring file with the ``ceph-authtool`` utility. Constructs the + path over well-known conventions for the OSD, and allows any other custom + ``name`` to be set. + + :param osd_id: The ID for the OSD to be used + :param secret: The key to be added as (as a string) + :param name: Defaults to 'osd.{ID}' but can be used to add other client + names, specifically for 'lockbox' type of keys + :param keyring_name: Alternative keyring name, for supporting other + types of keys like for lockbox + """ + osd_keyring = '/var/lib/ceph/osd/%s-%s/%s' % (conf.cluster, osd_id, keyring_name) + name = name or 'osd.%s' % str(osd_id) + process.run( + [ + 'ceph-authtool', osd_keyring, + '--create-keyring', + '--name', name, + '--add-key', secret + ]) + system.chown(osd_keyring) + + +def get_journal_size(lv_format=True): + """ + Helper to retrieve the size (defined in megabytes in ceph.conf) to create + the journal logical volume, it "translates" the string into a float value, + then converts that into gigabytes, and finally (optionally) it formats it + back as a string so that it can be used for creating the LV. + + :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size + would result in '5G', otherwise it will return a ``Size`` object. + """ + conf_journal_size = conf.ceph.get_safe('osd', 'osd_journal_size', '5120') + logger.debug('osd_journal_size set to %s' % conf_journal_size) + journal_size = disk.Size(mb=str_to_int(conf_journal_size)) + + if journal_size < disk.Size(gb=2): + mlogger.error('Refusing to continue with configured size for journal') + raise RuntimeError('journal sizes must be larger than 2GB, detected: %s' % journal_size) + if lv_format: + return '%sG' % journal_size.gb.as_int() + return journal_size + + +def get_block_db_size(lv_format=True): + """ + Helper to retrieve the size (defined in megabytes in ceph.conf) to create + the block.db logical volume, it "translates" the string into a float value, + then converts that into gigabytes, and finally (optionally) it formats it + back as a string so that it can be used for creating the LV. + + :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size + would result in '5G', otherwise it will return a ``Size`` object. + + .. note: Configuration values are in bytes, unlike journals which + are defined in gigabytes + """ + conf_db_size = None + try: + conf_db_size = conf.ceph.get_safe('osd', 'bluestore_block_db_size', None) + except RuntimeError: + logger.exception("failed to load ceph configuration, will use defaults") + + if not conf_db_size: + logger.debug( + 'block.db has no size configuration, will fallback to using as much as possible' + ) + # TODO better to return disk.Size(b=0) here + return None + logger.debug('bluestore_block_db_size set to %s' % conf_db_size) + db_size = disk.Size(b=str_to_int(conf_db_size)) + + if db_size < disk.Size(gb=2): + mlogger.error('Refusing to continue with configured size for block.db') + raise RuntimeError('block.db sizes must be larger than 2GB, detected: %s' % db_size) + if lv_format: + return '%sG' % db_size.gb.as_int() + return db_size + +def get_block_wal_size(lv_format=True): + """ + Helper to retrieve the size (defined in megabytes in ceph.conf) to create + the block.wal logical volume, it "translates" the string into a float value, + then converts that into gigabytes, and finally (optionally) it formats it + back as a string so that it can be used for creating the LV. + + :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size + would result in '5G', otherwise it will return a ``Size`` object. + + .. note: Configuration values are in bytes, unlike journals which + are defined in gigabytes + """ + conf_wal_size = None + try: + conf_wal_size = conf.ceph.get_safe('osd', 'bluestore_block_wal_size', None) + except RuntimeError: + logger.exception("failed to load ceph configuration, will use defaults") + + if not conf_wal_size: + logger.debug( + 'block.wal has no size configuration, will fallback to using as much as possible' + ) + return None + logger.debug('bluestore_block_wal_size set to %s' % conf_wal_size) + wal_size = disk.Size(b=str_to_int(conf_wal_size)) + + if wal_size < disk.Size(gb=2): + mlogger.error('Refusing to continue with configured size for block.wal') + raise RuntimeError('block.wal sizes must be larger than 2GB, detected: %s' % wal_size) + if lv_format: + return '%sG' % wal_size.gb.as_int() + return wal_size + + +def create_id(fsid, json_secrets, osd_id=None): + """ + :param fsid: The osd fsid to create, always required + :param json_secrets: a json-ready object with whatever secrets are wanted + to be passed to the monitor + :param osd_id: Reuse an existing ID from an OSD that's been destroyed, if the + id does not exist in the cluster a new ID will be created + """ + bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster + cmd = [ + 'ceph', + '--cluster', conf.cluster, + '--name', 'client.bootstrap-osd', + '--keyring', bootstrap_keyring, + '-i', '-', + 'osd', 'new', fsid + ] + if osd_id is not None: + if osd_id_available(osd_id): + cmd.append(osd_id) + else: + raise RuntimeError("The osd ID {} is already in use or does not exist.".format(osd_id)) + stdout, stderr, returncode = process.call( + cmd, + stdin=json_secrets, + show_command=True + ) + if returncode != 0: + raise RuntimeError('Unable to create a new OSD id') + return ' '.join(stdout).strip() + + +def osd_id_available(osd_id): + """ + Checks to see if an osd ID exists and if it's available for + reuse. Returns True if it is, False if it isn't. + + :param osd_id: The osd ID to check + """ + if osd_id is None: + return False + bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster + stdout, stderr, returncode = process.call( + [ + 'ceph', + '--cluster', conf.cluster, + '--name', 'client.bootstrap-osd', + '--keyring', bootstrap_keyring, + 'osd', + 'tree', + '-f', 'json', + ], + show_command=True + ) + if returncode != 0: + raise RuntimeError('Unable check if OSD id exists: %s' % osd_id) + + output = json.loads(''.join(stdout).strip()) + osds = output['nodes'] + osd = [osd for osd in osds if str(osd['id']) == str(osd_id)] + if osd and osd[0].get('status') == "destroyed": + return True + return False + + +def mount_tmpfs(path): + process.run([ + 'mount', + '-t', + 'tmpfs', 'tmpfs', + path + ]) + + # Restore SELinux context + system.set_context(path) + + +def create_osd_path(osd_id, tmpfs=False): + path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) + system.mkdir_p('/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)) + if tmpfs: + mount_tmpfs(path) + + +def format_device(device): + # only supports xfs + command = ['mkfs', '-t', 'xfs'] + + # get the mkfs options if any for xfs, + # fallback to the default options defined in constants.mkfs + flags = conf.ceph.get_list( + 'osd', + 'osd_mkfs_options_xfs', + default=constants.mkfs.get('xfs'), + split=' ', + ) + + # always force + if '-f' not in flags: + flags.insert(0, '-f') + + command.extend(flags) + command.append(device) + process.run(command) + + +def _normalize_mount_flags(flags, extras=None): + """ + Mount flag options have to be a single string, separated by a comma. If the + flags are separated by spaces, or with commas and spaces in ceph.conf, the + mount options will be passed incorrectly. + + This will help when parsing ceph.conf values return something like:: + + ["rw,", "exec,"] + + Or:: + + [" rw ,", "exec"] + + :param flags: A list of flags, or a single string of mount flags + :param extras: Extra set of mount flags, useful when custom devices like VDO need + ad-hoc mount configurations + """ + # Instead of using set(), we append to this new list here, because set() + # will create an arbitrary order on the items that is made worst when + # testing with tools like tox that includes a randomizer seed. By + # controlling the order, it is easier to correctly assert the expectation + unique_flags = [] + if isinstance(flags, list): + if extras: + flags.extend(extras) + + # ensure that spaces and commas are removed so that they can join + # correctly, remove duplicates + for f in flags: + if f and f not in unique_flags: + unique_flags.append(f.strip().strip(',')) + return ','.join(unique_flags) + + # split them, clean them, and join them back again + flags = flags.strip().split(' ') + if extras: + flags.extend(extras) + + # remove possible duplicates + for f in flags: + if f and f not in unique_flags: + unique_flags.append(f.strip().strip(',')) + flags = ','.join(unique_flags) + # Before returning, split them again, since strings can be mashed up + # together, preventing removal of duplicate entries + return ','.join(set(flags.split(','))) + + +def mount_osd(device, osd_id, **kw): + extras = [] + is_vdo = kw.get('is_vdo', '0') + if is_vdo == '1': + extras = ['discard'] + destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id) + command = ['mount', '-t', 'xfs', '-o'] + flags = conf.ceph.get_list( + 'osd', + 'osd_mount_options_xfs', + default=constants.mount.get('xfs'), + split=' ', + ) + command.append( + _normalize_mount_flags(flags, extras=extras) + ) + command.append(device) + command.append(destination) + process.run(command) + + # Restore SELinux context + system.set_context(destination) + + +def _link_device(device, device_type, osd_id): + """ + Allow linking any device type in an OSD directory. ``device`` must the be + source, with an absolute path and ``device_type`` will be the destination + name, like 'journal', or 'block' + """ + device_path = '/var/lib/ceph/osd/%s-%s/%s' % ( + conf.cluster, + osd_id, + device_type + ) + command = ['ln', '-s', device, device_path] + system.chown(device) + + process.run(command) + +def _validate_bluestore_device(device, excepted_device_type, osd_uuid): + """ + Validate whether the given device is truly what it is supposed to be + """ + + out, err, ret = process.call(['ceph-bluestore-tool', 'show-label', '--dev', device]) + if err: + terminal.error('ceph-bluestore-tool failed to run. %s'% err) + raise SystemExit(1) + if ret: + terminal.error('no label on %s'% device) + raise SystemExit(1) + oj = json.loads(''.join(out)) + if device not in oj: + terminal.error('%s not in the output of ceph-bluestore-tool, buggy?'% device) + raise SystemExit(1) + current_device_type = oj[device]['description'] + if current_device_type != excepted_device_type: + terminal.error('%s is not a %s device but %s'% (device, excepted_device_type, current_device_type)) + raise SystemExit(1) + current_osd_uuid = oj[device]['osd_uuid'] + if current_osd_uuid != osd_uuid: + terminal.error('device %s is used by another osd %s as %s, should be %s'% (device, current_osd_uuid, current_device_type, osd_uuid)) + raise SystemExit(1) + +def link_journal(journal_device, osd_id): + _link_device(journal_device, 'journal', osd_id) + + +def link_block(block_device, osd_id): + _link_device(block_device, 'block', osd_id) + + +def link_wal(wal_device, osd_id, osd_uuid=None): + _validate_bluestore_device(wal_device, 'bluefs wal', osd_uuid) + _link_device(wal_device, 'block.wal', osd_id) + + +def link_db(db_device, osd_id, osd_uuid=None): + _validate_bluestore_device(db_device, 'bluefs db', osd_uuid) + _link_device(db_device, 'block.db', osd_id) + + +def get_monmap(osd_id): + """ + Before creating the OSD files, a monmap needs to be retrieved so that it + can be used to tell the monitor(s) about the new OSD. A call will look like:: + + ceph --cluster ceph --name client.bootstrap-osd \ + --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring \ + mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap + """ + path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) + bootstrap_keyring = '/var/lib/ceph/bootstrap-osd/%s.keyring' % conf.cluster + monmap_destination = os.path.join(path, 'activate.monmap') + + process.run([ + 'ceph', + '--cluster', conf.cluster, + '--name', 'client.bootstrap-osd', + '--keyring', bootstrap_keyring, + 'mon', 'getmap', '-o', monmap_destination + ]) + + +def get_osdspec_affinity(): + return os.environ.get('CEPH_VOLUME_OSDSPEC_AFFINITY', '') + + +def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False): + """ + Create the files for the OSD to function. A normal call will look like: + + ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ + --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ + --osd-data /var/lib/ceph/osd/ceph-0 \ + --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ + --keyring /var/lib/ceph/osd/ceph-0/keyring \ + --setuser ceph --setgroup ceph + + In some cases it is required to use the keyring, when it is passed in as + a keyword argument it is used as part of the ceph-osd command + """ + path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) + monmap = os.path.join(path, 'activate.monmap') + + system.chown(path) + + base_command = [ + 'ceph-osd', + '--cluster', conf.cluster, + '--osd-objectstore', 'bluestore', + '--mkfs', + '-i', osd_id, + '--monmap', monmap, + ] + + supplementary_command = [ + '--osd-data', path, + '--osd-uuid', fsid, + '--setuser', 'ceph', + '--setgroup', 'ceph' + ] + + if keyring is not None: + base_command.extend(['--keyfile', '-']) + + if wal: + base_command.extend( + ['--bluestore-block-wal-path', wal] + ) + system.chown(wal) + + if db: + base_command.extend( + ['--bluestore-block-db-path', db] + ) + system.chown(db) + + if get_osdspec_affinity(): + base_command.extend(['--osdspec-affinity', get_osdspec_affinity()]) + + command = base_command + supplementary_command + + """ + When running in containers the --mkfs on raw device sometimes fails + to acquire a lock through flock() on the device because systemd-udevd holds one temporarily. + See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock. + Because this is really transient, we retry up to 5 times and wait for 1 sec in-between + """ + for retry in range(5): + _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True) + if returncode == 0: + break + else: + if returncode == errno.EWOULDBLOCK: + time.sleep(1) + logger.info('disk is held by another process, trying to mkfs again... (%s/5 attempt)' % retry) + continue + else: + raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command))) + + +def osd_mkfs_filestore(osd_id, fsid, keyring): + """ + Create the files for the OSD to function. A normal call will look like: + + ceph-osd --cluster ceph --mkfs --mkkey -i 0 \ + --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \ + --osd-data /var/lib/ceph/osd/ceph-0 \ + --osd-journal /var/lib/ceph/osd/ceph-0/journal \ + --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \ + --keyring /var/lib/ceph/osd/ceph-0/keyring \ + --setuser ceph --setgroup ceph + + """ + path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id) + monmap = os.path.join(path, 'activate.monmap') + journal = os.path.join(path, 'journal') + + system.chown(journal) + system.chown(path) + + command = [ + 'ceph-osd', + '--cluster', conf.cluster, + '--osd-objectstore', 'filestore', + '--mkfs', + '-i', osd_id, + '--monmap', monmap, + ] + + if get_osdspec_affinity(): + command.extend(['--osdspec-affinity', get_osdspec_affinity()]) + + if __release__ != 'luminous': + # goes through stdin + command.extend(['--keyfile', '-']) + + command.extend([ + '--osd-data', path, + '--osd-journal', journal, + '--osd-uuid', fsid, + '--setuser', 'ceph', + '--setgroup', 'ceph' + ]) + + _, _, returncode = process.call( + command, stdin=keyring, terminal_verbose=True, show_command=True + ) + if returncode != 0: + raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command))) diff --git a/src/ceph-volume/ceph_volume/util/system.py b/src/ceph-volume/ceph_volume/util/system.py new file mode 100644 index 00000000..49986233 --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/system.py @@ -0,0 +1,346 @@ +import errno +import logging +import os +import pwd +import platform +import tempfile +import uuid +from ceph_volume import process, terminal +from . import as_string + +# python2 has no FileNotFoundError +try: + FileNotFoundError +except NameError: + FileNotFoundError = OSError + +logger = logging.getLogger(__name__) +mlogger = terminal.MultiLogger(__name__) + +# TODO: get these out of here and into a common area for others to consume +if platform.system() == 'FreeBSD': + FREEBSD = True + DEFAULT_FS_TYPE = 'zfs' + PROCDIR = '/compat/linux/proc' + # FreeBSD does not have blockdevices any more + BLOCKDIR = '/dev' + ROOTGROUP = 'wheel' +else: + FREEBSD = False + DEFAULT_FS_TYPE = 'xfs' + PROCDIR = '/proc' + BLOCKDIR = '/sys/block' + ROOTGROUP = 'root' + + +def generate_uuid(): + return str(uuid.uuid4()) + + +def which(executable): + """find the location of an executable""" + def _get_path(executable, locations): + for location in locations: + executable_path = os.path.join(location, executable) + if os.path.exists(executable_path) and os.path.isfile(executable_path): + return executable_path + return None + + path = os.getenv('PATH', '') + path_locations = path.split(':') + exec_in_path = _get_path(executable, path_locations) + if exec_in_path: + return exec_in_path + mlogger.warning('Executable {} not in PATH: {}'.format(executable, path)) + + static_locations = ( + '/usr/local/bin', + '/bin', + '/usr/bin', + '/usr/local/sbin', + '/usr/sbin', + '/sbin', + ) + exec_in_static_locations = _get_path(executable, static_locations) + if exec_in_static_locations: + mlogger.warning('Found executable under {}, please ensure $PATH is set correctly!'.format(exec_in_static_locations)) + return exec_in_static_locations + # fallback to just returning the argument as-is, to prevent a hard fail, + # and hoping that the system might have the executable somewhere custom + return executable + + +def get_ceph_user_ids(): + """ + Return the id and gid of the ceph user + """ + try: + user = pwd.getpwnam('ceph') + except KeyError: + # is this even possible? + raise RuntimeError('"ceph" user is not available in the current system') + return user[2], user[3] + + +def get_file_contents(path, default=''): + contents = default + if not os.path.exists(path): + return contents + try: + with open(path, 'r') as open_file: + contents = open_file.read().strip() + except Exception: + logger.exception('Failed to read contents from: %s' % path) + + return contents + + +def mkdir_p(path, chown=True): + """ + A `mkdir -p` that defaults to chown the path to the ceph user + """ + try: + os.mkdir(path) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + raise + if chown: + uid, gid = get_ceph_user_ids() + os.chown(path, uid, gid) + + +def chown(path, recursive=True): + """ + ``chown`` a path to the ceph user (uid and guid fetched at runtime) + """ + uid, gid = get_ceph_user_ids() + if os.path.islink(path): + process.run(['chown', '-h', 'ceph:ceph', path]) + path = os.path.realpath(path) + if recursive: + process.run(['chown', '-R', 'ceph:ceph', path]) + else: + os.chown(path, uid, gid) + + +def is_binary(path): + """ + Detect if a file path is a binary or not. Will falsely report as binary + when utf-16 encoded. In the ceph universe there is no such risk (yet) + """ + with open(path, 'rb') as fp: + contents = fp.read(8192) + if b'\x00' in contents: # a null byte may signal binary + return True + return False + + +class tmp_mount(object): + """ + Temporarily mount a device on a temporary directory, + and unmount it upon exit + + When ``encrypted`` is set to ``True``, the exit method will call out to + close the device so that it doesn't remain open after mounting. It is + assumed that it will be open because otherwise it wouldn't be possible to + mount in the first place + """ + + def __init__(self, device, encrypted=False): + self.device = device + self.path = None + self.encrypted = encrypted + + def __enter__(self): + self.path = tempfile.mkdtemp() + process.run([ + 'mount', + '-v', + self.device, + self.path + ]) + return self.path + + def __exit__(self, exc_type, exc_val, exc_tb): + process.run([ + 'umount', + '-v', + self.path + ]) + if self.encrypted: + # avoid a circular import from the encryption module + from ceph_volume.util import encryption + encryption.dmcrypt_close(self.device) + + +def unmount_tmpfs(path): + """ + Removes the mount at the given path iff the path is a tmpfs mount point. + Otherwise no action is taken. + """ + _out, _err, rc = process.call(['findmnt', '-t', 'tmpfs', '-M', path]) + if rc != 0: + logger.info('{} does not appear to be a tmpfs mount'.format(path)) + else: + logger.info('Unmounting tmpfs path at {}'.format( path)) + unmount(path) + + +def unmount(path): + """ + Removes mounts at the given path + """ + process.run([ + 'umount', + '-v', + path, + ]) + + +def path_is_mounted(path, destination=None): + """ + Check if the given path is mounted + """ + mounts = get_mounts(paths=True) + realpath = os.path.realpath(path) + mounted_locations = mounts.get(realpath, []) + + if destination: + return destination in mounted_locations + return mounted_locations != [] + + +def device_is_mounted(dev, destination=None): + """ + Check if the given device is mounted, optionally validating that a + destination exists + """ + plain_mounts = get_mounts(devices=True) + realpath_mounts = get_mounts(devices=True, realpath=True) + realpath_dev = os.path.realpath(dev) if dev.startswith('/') else dev + destination = os.path.realpath(destination) if destination else None + # plain mounts + plain_dev_mounts = plain_mounts.get(dev, []) + realpath_dev_mounts = plain_mounts.get(realpath_dev, []) + # realpath mounts + plain_dev_real_mounts = realpath_mounts.get(dev, []) + realpath_dev_real_mounts = realpath_mounts.get(realpath_dev, []) + + mount_locations = [ + plain_dev_mounts, + realpath_dev_mounts, + plain_dev_real_mounts, + realpath_dev_real_mounts + ] + + for mounts in mount_locations: + if mounts: # we have a matching mount + if destination: + if destination in mounts: + logger.info( + '%s detected as mounted, exists at destination: %s', dev, destination + ) + return True + else: + logger.info('%s was found as mounted', dev) + return True + logger.info('%s was not found as mounted', dev) + return False + + +def get_mounts(devices=False, paths=False, realpath=False): + """ + Create a mapping of all available system mounts so that other helpers can + detect nicely what path or device is mounted + + It ignores (most of) non existing devices, but since some setups might need + some extra device information, it will make an exception for: + + - tmpfs + - devtmpfs + + If ``devices`` is set to ``True`` the mapping will be a device-to-path(s), + if ``paths`` is set to ``True`` then the mapping will be + a path-to-device(s) + + :param realpath: Resolve devices to use their realpaths. This is useful for + paths like LVM where more than one path can point to the same device + """ + devices_mounted = {} + paths_mounted = {} + do_not_skip = ['tmpfs', 'devtmpfs'] + default_to_devices = devices is False and paths is False + + with open(PROCDIR + '/mounts', 'rb') as mounts: + proc_mounts = mounts.readlines() + + for line in proc_mounts: + fields = [as_string(f) for f in line.split()] + if len(fields) < 3: + continue + if realpath: + device = os.path.realpath(fields[0]) if fields[0].startswith('/') else fields[0] + else: + device = fields[0] + path = os.path.realpath(fields[1]) + # only care about actual existing devices + if not os.path.exists(device) or not device.startswith('/'): + if device not in do_not_skip: + continue + if device in devices_mounted.keys(): + devices_mounted[device].append(path) + else: + devices_mounted[device] = [path] + if path in paths_mounted.keys(): + paths_mounted[path].append(device) + else: + paths_mounted[path] = [device] + + # Default to returning information for devices if + if devices is True or default_to_devices: + return devices_mounted + else: + return paths_mounted + + +def set_context(path, recursive=False): + """ + Calls ``restorecon`` to set the proper context on SELinux systems. Only if + the ``restorecon`` executable is found anywhere in the path it will get + called. + + If the ``CEPH_VOLUME_SKIP_RESTORECON`` environment variable is set to + any of: "1", "true", "yes" the call will be skipped as well. + + Finally, if SELinux is not enabled, or not available in the system, + ``restorecon`` will not be called. This is checked by calling out to the + ``selinuxenabled`` executable. If that tool is not installed or returns + a non-zero exit status then no further action is taken and this function + will return. + """ + skip = os.environ.get('CEPH_VOLUME_SKIP_RESTORECON', '') + if skip.lower() in ['1', 'true', 'yes']: + logger.info( + 'CEPH_VOLUME_SKIP_RESTORECON environ is set, will not call restorecon' + ) + return + + try: + stdout, stderr, code = process.call(['selinuxenabled'], + verbose_on_failure=False) + except FileNotFoundError: + logger.info('No SELinux found, skipping call to restorecon') + return + + if code != 0: + logger.info('SELinux is not enabled, will not call restorecon') + return + + # restore selinux context to default policy values + if which('restorecon').startswith('/'): + if recursive: + process.run(['restorecon', '-R', path]) + else: + process.run(['restorecon', path]) diff --git a/src/ceph-volume/ceph_volume/util/templates.py b/src/ceph-volume/ceph_volume/util/templates.py new file mode 100644 index 00000000..85a366d2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/util/templates.py @@ -0,0 +1,49 @@ + +osd_header = """ +{:-^100}""".format('') + + +osd_component_titles = """ + Type Path LV Size % of device""" + + +osd_reused_id = """ + OSD id {id_: <55}""" + + +osd_component = """ + {_type: <15} {path: <55} {size: <15} {percent:.2%}""" + + +osd_encryption = """ + encryption: {enc: <15}""" + + +total_osds = """ +Total OSDs: {total_osds} +""" + + +def filtered_devices(devices): + string = """ +Filtered Devices:""" + for device, info in devices.items(): + string += """ + %s""" % device + + for reason in info['reasons']: + string += """ + %s""" % reason + + string += "\n" + return string + + +ssd_volume_group = """ +Solid State VG: + Targets: {target: <25} Total size: {total_lv_size: <25} + Total LVs: {total_lvs: <25} Size per LV: {lv_size: <25} + Devices: {block_db_devices} +""" + + |