summaryrefslogtreecommitdiffstats
path: root/src/python-common
diff options
context:
space:
mode:
Diffstat (limited to 'src/python-common')
-rw-r--r--src/python-common/ceph/deployment/drive_selection/selector.py26
-rw-r--r--src/python-common/ceph/deployment/hostspec.py18
-rw-r--r--src/python-common/ceph/deployment/service_spec.py203
-rw-r--r--src/python-common/ceph/deployment/translate.py64
-rw-r--r--src/python-common/ceph/tests/test_disk_selector.py41
-rw-r--r--src/python-common/ceph/tests/test_drive_group.py16
-rw-r--r--src/python-common/ceph/tests/test_service_spec.py26
-rw-r--r--src/python-common/ceph/utils.py48
8 files changed, 348 insertions, 94 deletions
diff --git a/src/python-common/ceph/deployment/drive_selection/selector.py b/src/python-common/ceph/deployment/drive_selection/selector.py
index 1b3bfbb4e..041f1ed30 100644
--- a/src/python-common/ceph/deployment/drive_selection/selector.py
+++ b/src/python-common/ceph/deployment/drive_selection/selector.py
@@ -53,9 +53,12 @@ class DriveSelection(object):
# type: () -> List[Device]
return self._journal
- def _limit_reached(self, device_filter, len_devices,
- disk_path):
- # type: (DeviceSelection, int, str) -> bool
+ def _limit_reached(
+ self,
+ device_filter: DeviceSelection,
+ devices: List[Device],
+ disk_path: str
+ ) -> bool:
""" Check for the <limit> property and apply logic
If a limit is set in 'device_attrs' we have to stop adding
@@ -63,14 +66,21 @@ class DriveSelection(object):
If limit is set (>0) and len(devices) >= limit
- :param int len_devices: Length of the already populated device set/list
+ :param List[Device] devices: Already populated device set/list
:param str disk_path: The disk identifier (for logging purposes)
:return: True/False if the device should be added to the list of devices
:rtype: bool
"""
limit = device_filter.limit or 0
-
- if limit > 0 and (len_devices + self.existing_daemons >= limit):
+ # If device A is being used for an OSD already, it can still
+ # match the filter (this is necessary as we still want the
+ # device in the resulting ceph-volume lvm batch command).
+ # If that is the case, we don't want to count the device
+ # towards the limit as it will already be counted through the
+ # existing daemons
+ non_ceph_devices = [d for d in devices if not d.ceph_device]
+
+ if limit > 0 and (len(non_ceph_devices) + self.existing_daemons >= limit):
logger.debug("Refuse to add {} due to limit policy of <{}>".format(
disk_path, limit))
return True
@@ -132,7 +142,7 @@ class DriveSelection(object):
other_osdspec_affinity = ''
for lv in disk.lvs:
if 'osdspec_affinity' in lv.keys():
- if lv['osdspec_affinity'] != self.spec.service_id:
+ if lv['osdspec_affinity'] != str(self.spec.service_id):
other_osdspec_affinity = lv['osdspec_affinity']
break
if other_osdspec_affinity:
@@ -147,7 +157,7 @@ class DriveSelection(object):
continue
# break on this condition.
- if self._limit_reached(device_filter, len(devices), disk.path):
+ if self._limit_reached(device_filter, devices, disk.path):
logger.debug("Ignoring disk {}. Limit reached".format(
disk.path))
break
diff --git a/src/python-common/ceph/deployment/hostspec.py b/src/python-common/ceph/deployment/hostspec.py
index 0c448bf13..f17ba81cf 100644
--- a/src/python-common/ceph/deployment/hostspec.py
+++ b/src/python-common/ceph/deployment/hostspec.py
@@ -16,6 +16,15 @@ def assert_valid_host(name: str) -> None:
raise SpecValidationError(str(e) + f'. Got "{name}"')
+def assert_valid_oob(oob: Dict[str, str]) -> None:
+ fields = ['username', 'password']
+ try:
+ for field in fields:
+ assert field in oob.keys()
+ except AssertionError as e:
+ raise SpecValidationError(str(e))
+
+
class SpecValidationError(Exception):
"""
Defining an exception here is a bit problematic, cause you cannot properly catch it,
@@ -38,6 +47,7 @@ class HostSpec(object):
labels: Optional[List[str]] = None,
status: Optional[str] = None,
location: Optional[Dict[str, str]] = None,
+ oob: Optional[Dict[str, str]] = None,
):
self.service_type = 'host'
@@ -55,8 +65,13 @@ class HostSpec(object):
self.location = location
+ #: oob details, if provided
+ self.oob = oob
+
def validate(self) -> None:
assert_valid_host(self.hostname)
+ if self.oob:
+ assert_valid_oob(self.oob)
def to_json(self) -> Dict[str, Any]:
r: Dict[str, Any] = {
@@ -67,6 +82,8 @@ class HostSpec(object):
}
if self.location:
r['location'] = self.location
+ if self.oob:
+ r['oob'] = self.oob
return r
@classmethod
@@ -79,6 +96,7 @@ class HostSpec(object):
host_spec['labels'])) if 'labels' in host_spec else None,
host_spec['status'] if 'status' in host_spec else None,
host_spec.get('location'),
+ host_spec['oob'] if 'oob' in host_spec else None,
)
return _cls
diff --git a/src/python-common/ceph/deployment/service_spec.py b/src/python-common/ceph/deployment/service_spec.py
index be9f3e8ea..704dfe6f0 100644
--- a/src/python-common/ceph/deployment/service_spec.py
+++ b/src/python-common/ceph/deployment/service_spec.py
@@ -127,17 +127,120 @@ class HostPlacementSpec(NamedTuple):
assert_valid_host(self.hostname)
+HostPatternType = Union[str, None, Dict[str, Union[str, bool, None]], "HostPattern"]
+
+
+class PatternType(enum.Enum):
+ fnmatch = 'fnmatch'
+ regex = 'regex'
+
+
+class HostPattern():
+ def __init__(self,
+ pattern: Optional[str] = None,
+ pattern_type: PatternType = PatternType.fnmatch) -> None:
+ self.pattern: Optional[str] = pattern
+ self.pattern_type: PatternType = pattern_type
+ self.compiled_regex = None
+ if self.pattern_type == PatternType.regex and self.pattern:
+ self.compiled_regex = re.compile(self.pattern)
+
+ def filter_hosts(self, hosts: List[str]) -> List[str]:
+ if not self.pattern:
+ return []
+ if not self.pattern_type or self.pattern_type == PatternType.fnmatch:
+ return fnmatch.filter(hosts, self.pattern)
+ elif self.pattern_type == PatternType.regex:
+ if not self.compiled_regex:
+ self.compiled_regex = re.compile(self.pattern)
+ return [h for h in hosts if re.match(self.compiled_regex, h)]
+ raise SpecValidationError(f'Got unexpected pattern_type: {self.pattern_type}')
+
+ @classmethod
+ def to_host_pattern(cls, arg: HostPatternType) -> "HostPattern":
+ if arg is None:
+ return cls()
+ elif isinstance(arg, str):
+ return cls(arg)
+ elif isinstance(arg, cls):
+ return arg
+ elif isinstance(arg, dict):
+ if 'pattern' not in arg:
+ raise SpecValidationError("Got dict for host pattern "
+ f"with no pattern field: {arg}")
+ pattern = arg['pattern']
+ if not pattern:
+ raise SpecValidationError("Got dict for host pattern"
+ f"with empty pattern: {arg}")
+ assert isinstance(pattern, str)
+ if 'pattern_type' in arg:
+ pattern_type = arg['pattern_type']
+ if not pattern_type or pattern_type == 'fnmatch':
+ return cls(pattern, pattern_type=PatternType.fnmatch)
+ elif pattern_type == 'regex':
+ return cls(pattern, pattern_type=PatternType.regex)
+ else:
+ raise SpecValidationError("Got dict for host pattern "
+ f"with unknown pattern type: {arg}")
+ return cls(pattern)
+ raise SpecValidationError(f"Cannot convert {type(arg)} object to HostPattern")
+
+ def __eq__(self, other: Any) -> bool:
+ try:
+ other_hp = self.to_host_pattern(other)
+ except SpecValidationError:
+ return False
+ return self.pattern == other_hp.pattern and self.pattern_type == other_hp.pattern_type
+
+ def pretty_str(self) -> str:
+ # Placement specs must be able to be converted between the Python object
+ # representation and a pretty str both ways. So we need a corresponding
+ # function for HostPattern to convert it to a pretty str that we can
+ # convert back later.
+ res = self.pattern if self.pattern else ''
+ if self.pattern_type == PatternType.regex:
+ res = 'regex:' + res
+ return res
+
+ @classmethod
+ def from_pretty_str(cls, val: str) -> "HostPattern":
+ if 'regex:' in val:
+ return cls(val[6:], pattern_type=PatternType.regex)
+ else:
+ return cls(val)
+
+ def __repr__(self) -> str:
+ return f'HostPattern(pattern=\'{self.pattern}\', pattern_type={str(self.pattern_type)})'
+
+ def to_json(self) -> Union[str, Dict[str, Any], None]:
+ if self.pattern_type and self.pattern_type != PatternType.fnmatch:
+ return {
+ 'pattern': self.pattern,
+ 'pattern_type': self.pattern_type.name
+ }
+ return self.pattern
+
+ @classmethod
+ def from_json(self, val: Dict[str, Any]) -> "HostPattern":
+ return self.to_host_pattern(val)
+
+ def __bool__(self) -> bool:
+ if self.pattern:
+ return True
+ return False
+
+
class PlacementSpec(object):
"""
For APIs that need to specify a host subset
"""
def __init__(self,
- label=None, # type: Optional[str]
- hosts=None, # type: Union[List[str],List[HostPlacementSpec], None]
- count=None, # type: Optional[int]
- count_per_host=None, # type: Optional[int]
- host_pattern=None, # type: Optional[str]
+ label: Optional[str] = None,
+ hosts: Union[List[str], List[HostPlacementSpec], None] = None,
+ count: Optional[int] = None,
+ count_per_host: Optional[int] = None,
+ host_pattern: HostPatternType = None,
):
# type: (...) -> None
self.label = label
@@ -150,7 +253,7 @@ class PlacementSpec(object):
self.count_per_host = count_per_host # type: Optional[int]
#: fnmatch patterns to select hosts. Can also be a single host.
- self.host_pattern = host_pattern # type: Optional[str]
+ self.host_pattern: HostPattern = HostPattern.to_host_pattern(host_pattern)
self.validate()
@@ -190,10 +293,11 @@ class PlacementSpec(object):
all_hosts = [hs.hostname for hs in hostspecs]
return [h.hostname for h in self.hosts if h.hostname in all_hosts]
if self.label:
- return [hs.hostname for hs in hostspecs if self.label in hs.labels]
- all_hosts = [hs.hostname for hs in hostspecs]
+ all_hosts = [hs.hostname for hs in hostspecs if self.label in hs.labels]
+ else:
+ all_hosts = [hs.hostname for hs in hostspecs]
if self.host_pattern:
- return fnmatch.filter(all_hosts, self.host_pattern)
+ return self.host_pattern.filter_hosts(all_hosts)
return all_hosts
def get_target_count(self, hostspecs: Iterable[HostSpec]) -> int:
@@ -217,7 +321,7 @@ class PlacementSpec(object):
if self.label:
kv.append('label:%s' % self.label)
if self.host_pattern:
- kv.append(self.host_pattern)
+ kv.append(self.host_pattern.pretty_str())
return ';'.join(kv)
def __repr__(self) -> str:
@@ -258,7 +362,7 @@ class PlacementSpec(object):
if self.count_per_host:
r['count_per_host'] = self.count_per_host
if self.host_pattern:
- r['host_pattern'] = self.host_pattern
+ r['host_pattern'] = self.host_pattern.to_json()
return r
def validate(self) -> None:
@@ -302,8 +406,9 @@ class PlacementSpec(object):
"count-per-host cannot be combined explicit placement with names or networks"
)
if self.host_pattern:
- if not isinstance(self.host_pattern, str):
- raise SpecValidationError('host_pattern must be of type string')
+ # if we got an invalid type for the host_pattern, it would have
+ # triggered a SpecValidationError when attemptying to convert it
+ # to a HostPattern type, so no type checking is needed here.
if self.hosts:
raise SpecValidationError('cannot combine host patterns and hosts')
@@ -341,10 +446,17 @@ tPlacementSpec(hostname='host2', network='', name='')])
>>> PlacementSpec.from_string('3 label:mon')
PlacementSpec(count=3, label='mon')
- fnmatch is also supported:
+ You can specify a regex to match with `regex:<regex>`
+
+ >>> PlacementSpec.from_string('regex:Foo[0-9]|Bar[0-9]')
+ PlacementSpec(host_pattern=HostPattern(pattern='Foo[0-9]|Bar[0-9]', \
+pattern_type=PatternType.regex))
+
+ fnmatch is the default for a single string if "regex:" is not provided:
>>> PlacementSpec.from_string('data[1-3]')
- PlacementSpec(host_pattern='data[1-3]')
+ PlacementSpec(host_pattern=HostPattern(pattern='data[1-3]', \
+pattern_type=PatternType.fnmatch))
>>> PlacementSpec.from_string(None)
PlacementSpec()
@@ -394,7 +506,8 @@ tPlacementSpec(hostname='host2', network='', name='')])
advanced_hostspecs = [h for h in strings if
(':' in h or '=' in h or not any(c in '[]?*:=' for c in h)) and
- 'label:' not in h]
+ 'label:' not in h and
+ 'regex:' not in h]
for a_h in advanced_hostspecs:
strings.remove(a_h)
@@ -406,15 +519,20 @@ tPlacementSpec(hostname='host2', network='', name='')])
label = labels[0][6:] if labels else None
host_patterns = strings
+ host_pattern: Optional[HostPattern] = None
if len(host_patterns) > 1:
raise SpecValidationError(
'more than one host pattern provided: {}'.format(host_patterns))
+ if host_patterns:
+ # host_patterns is a list not > 1, and not empty, so we should
+ # be guaranteed just a single string here
+ host_pattern = HostPattern.from_pretty_str(host_patterns[0])
ps = PlacementSpec(count=count,
count_per_host=count_per_host,
hosts=advanced_hostspecs,
label=label,
- host_pattern=host_patterns[0] if host_patterns else None)
+ host_pattern=host_pattern)
return ps
@@ -625,7 +743,8 @@ class ServiceSpec(object):
KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi nvmeof loki promtail mds mgr mon nfs ' \
'node-exporter osd prometheus rbd-mirror rgw agent ceph-exporter ' \
'container ingress cephfs-mirror snmp-gateway jaeger-tracing ' \
- 'elasticsearch jaeger-agent jaeger-collector jaeger-query'.split()
+ 'elasticsearch jaeger-agent jaeger-collector jaeger-query ' \
+ 'node-proxy'.split()
REQUIRES_SERVICE_ID = 'iscsi nvmeof mds nfs rgw container ingress '.split()
MANAGED_CONFIG_OPTIONS = [
'mds_join_fs',
@@ -951,6 +1070,7 @@ class NFSServiceSpec(ServiceSpec):
extra_container_args: Optional[GeneralArgList] = None,
extra_entrypoint_args: Optional[GeneralArgList] = None,
enable_haproxy_protocol: bool = False,
+ idmap_conf: Optional[Dict[str, Dict[str, str]]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'nfs'
@@ -963,6 +1083,7 @@ class NFSServiceSpec(ServiceSpec):
self.port = port
self.virtual_ip = virtual_ip
self.enable_haproxy_protocol = enable_haproxy_protocol
+ self.idmap_conf = idmap_conf
def get_port_start(self) -> List[int]:
if self.port:
@@ -1294,6 +1415,7 @@ class IngressSpec(ServiceSpec):
extra_entrypoint_args: Optional[GeneralArgList] = None,
enable_haproxy_protocol: bool = False,
custom_configs: Optional[List[CustomConfig]] = None,
+ health_check_interval: Optional[str] = None,
):
assert service_type == 'ingress'
@@ -1326,6 +1448,8 @@ class IngressSpec(ServiceSpec):
self.ssl = ssl
self.keepalive_only = keepalive_only
self.enable_haproxy_protocol = enable_haproxy_protocol
+ self.health_check_interval = health_check_interval.strip(
+ ) if health_check_interval else None
def get_port_start(self) -> List[int]:
ports = []
@@ -1356,6 +1480,13 @@ class IngressSpec(ServiceSpec):
if self.virtual_ip is not None and self.virtual_ips_list is not None:
raise SpecValidationError(
'Cannot add ingress: Single and multiple virtual IPs specified')
+ if self.health_check_interval:
+ valid_units = ['s', 'm', 'h']
+ m = re.search(rf"^(\d+)({'|'.join(valid_units)})$", self.health_check_interval)
+ if not m:
+ raise SpecValidationError(
+ f'Cannot add ingress: Invalid health_check_interval specified. '
+ f'Valid units are: {valid_units}')
yaml.add_representer(IngressSpec, ServiceSpec.yaml_representer)
@@ -1372,7 +1503,6 @@ class CustomContainerSpec(ServiceSpec):
preview_only: bool = False,
image: Optional[str] = None,
entrypoint: Optional[str] = None,
- extra_entrypoint_args: Optional[GeneralArgList] = None,
uid: Optional[int] = None,
gid: Optional[int] = None,
volume_mounts: Optional[Dict[str, str]] = {},
@@ -1384,6 +1514,9 @@ class CustomContainerSpec(ServiceSpec):
ports: Optional[List[int]] = [],
dirs: Optional[List[str]] = [],
files: Optional[Dict[str, Any]] = {},
+ extra_container_args: Optional[GeneralArgList] = None,
+ extra_entrypoint_args: Optional[GeneralArgList] = None,
+ custom_configs: Optional[List[CustomConfig]] = None,
):
assert service_type == 'container'
assert service_id is not None
@@ -1393,7 +1526,9 @@ class CustomContainerSpec(ServiceSpec):
service_type, service_id,
placement=placement, unmanaged=unmanaged,
preview_only=preview_only, config=config,
- networks=networks, extra_entrypoint_args=extra_entrypoint_args)
+ networks=networks, extra_container_args=extra_container_args,
+ extra_entrypoint_args=extra_entrypoint_args,
+ custom_configs=custom_configs)
self.image = image
self.entrypoint = entrypoint
@@ -1426,6 +1561,19 @@ class CustomContainerSpec(ServiceSpec):
config_json[prop] = value
return config_json
+ def validate(self) -> None:
+ super(CustomContainerSpec, self).validate()
+
+ if self.args and self.extra_container_args:
+ raise SpecValidationError(
+ '"args" and "extra_container_args" are mutually exclusive '
+ '(and both serve the same purpose)')
+
+ if self.files and self.custom_configs:
+ raise SpecValidationError(
+ '"files" and "custom_configs" are mutually exclusive '
+ '(and both serve the same purpose)')
+
yaml.add_representer(CustomContainerSpec, ServiceSpec.yaml_representer)
@@ -1540,6 +1688,7 @@ class GrafanaSpec(MonitoringSpec):
preview_only: bool = False,
config: Optional[Dict[str, str]] = None,
networks: Optional[List[str]] = None,
+ only_bind_port_on_networks: bool = False,
port: Optional[int] = None,
protocol: Optional[str] = 'https',
initial_admin_password: Optional[str] = None,
@@ -1560,6 +1709,12 @@ class GrafanaSpec(MonitoringSpec):
self.anonymous_access = anonymous_access
self.protocol = protocol
+ # whether ports daemons for this service bind to should
+ # bind to only hte networks listed in networks param, or
+ # to all networks. Defaults to false which is saying to bind
+ # on all networks.
+ self.only_bind_port_on_networks = only_bind_port_on_networks
+
def validate(self) -> None:
super(GrafanaSpec, self).validate()
if self.protocol not in ['http', 'https']:
@@ -1585,6 +1740,7 @@ class PrometheusSpec(MonitoringSpec):
preview_only: bool = False,
config: Optional[Dict[str, str]] = None,
networks: Optional[List[str]] = None,
+ only_bind_port_on_networks: bool = False,
port: Optional[int] = None,
retention_time: Optional[str] = None,
retention_size: Optional[str] = None,
@@ -1602,6 +1758,7 @@ class PrometheusSpec(MonitoringSpec):
self.retention_time = retention_time.strip() if retention_time else None
self.retention_size = retention_size.strip() if retention_size else None
+ self.only_bind_port_on_networks = only_bind_port_on_networks
def validate(self) -> None:
super(PrometheusSpec, self).validate()
@@ -1820,6 +1977,7 @@ class MONSpec(ServiceSpec):
preview_only: bool = False,
networks: Optional[List[str]] = None,
extra_container_args: Optional[GeneralArgList] = None,
+ extra_entrypoint_args: Optional[GeneralArgList] = None,
custom_configs: Optional[List[CustomConfig]] = None,
crush_locations: Optional[Dict[str, List[str]]] = None,
):
@@ -1832,6 +1990,7 @@ class MONSpec(ServiceSpec):
preview_only=preview_only,
networks=networks,
extra_container_args=extra_container_args,
+ extra_entrypoint_args=extra_entrypoint_args,
custom_configs=custom_configs)
self.crush_locations = crush_locations
@@ -1980,6 +2139,7 @@ class CephExporterSpec(ServiceSpec):
unmanaged: bool = False,
preview_only: bool = False,
extra_container_args: Optional[GeneralArgList] = None,
+ extra_entrypoint_args: Optional[GeneralArgList] = None,
):
assert service_type == 'ceph-exporter'
@@ -1988,7 +2148,8 @@ class CephExporterSpec(ServiceSpec):
placement=placement,
unmanaged=unmanaged,
preview_only=preview_only,
- extra_container_args=extra_container_args)
+ extra_container_args=extra_container_args,
+ extra_entrypoint_args=extra_entrypoint_args)
self.service_type = service_type
self.sock_dir = sock_dir
diff --git a/src/python-common/ceph/deployment/translate.py b/src/python-common/ceph/deployment/translate.py
index 86243b8ae..dd91b33e9 100644
--- a/src/python-common/ceph/deployment/translate.py
+++ b/src/python-common/ceph/deployment/translate.py
@@ -13,9 +13,7 @@ logger = logging.getLogger(__name__)
# TODO refactor this to a DriveSelection method
class to_ceph_volume(object):
- _supported_device_classes = [
- "hdd", "ssd", "nvme"
- ]
+ NO_CRUSH = '_NO_CRUSH'
def __init__(self,
selection, # type: DriveSelection
@@ -35,20 +33,6 @@ class to_ceph_volume(object):
lvcount: Dict[str, List[str]] = dict()
"""
- Default entry for the global crush_device_class definition;
- if there's no global definition at spec level, we do not want
- to apply anything to the provided devices, hence we need to run
- a ceph-volume command without that option, otherwise we init an
- entry for the globally defined crush_device_class.
- """
- if self.spec.crush_device_class:
- lvcount[self.spec.crush_device_class] = []
-
- # entry where the drives that don't require a crush_device_class
- # option are collected
- lvcount["no_crush"] = []
-
- """
for each device, check if it's just a path or it has a crush_device
class definition, and append an entry to the right crush_device_
class group
@@ -57,35 +41,16 @@ class to_ceph_volume(object):
# iterate on List[Device], containing both path and
# crush_device_class
path = device.path
- crush_device_class = device.crush_device_class
+ crush_device_class = (
+ device.crush_device_class
+ or self.spec.crush_device_class
+ or self.NO_CRUSH
+ )
if path is None:
raise ValueError("Device path can't be empty")
- """
- if crush_device_class is specified for the current Device path
- we should either init the array for this group or append the
- drive path to the existing entry
- """
- if crush_device_class:
- if crush_device_class in lvcount.keys():
- lvcount[crush_device_class].append(path)
- else:
- lvcount[crush_device_class] = [path]
- continue
-
- """
- if no crush_device_class is specified for the current path
- but a global definition is present in the spec, so we group
- the drives together
- """
- if crush_device_class is None and self.spec.crush_device_class:
- lvcount[self.spec.crush_device_class].append(path)
- continue
- else:
- # default use case
- lvcount["no_crush"].append(path)
- continue
+ lvcount.setdefault(crush_device_class, []).append(path)
return lvcount
@@ -136,7 +101,7 @@ class to_ceph_volume(object):
cmd += " --block.db {}".format(db_devices.pop())
if wal_devices:
cmd += " --block.wal {}".format(wal_devices.pop())
- if d in self._supported_device_classes:
+ if d != self.NO_CRUSH:
cmd += " --crush-device-class {}".format(d)
cmds.append(cmd)
@@ -159,7 +124,7 @@ class to_ceph_volume(object):
if self.spec.block_db_size:
cmd += " --block-db-size {}".format(self.spec.block_db_size)
- if d in self._supported_device_classes:
+ if d != self.NO_CRUSH:
cmd += " --crush-device-class {}".format(d)
cmds.append(cmd)
@@ -180,17 +145,6 @@ class to_ceph_volume(object):
cmds[i] += " --yes"
cmds[i] += " --no-systemd"
- # set the --crush-device-class option when:
- # - crush_device_class is specified at spec level (global for all the osds) # noqa E501
- # - crush_device_class is allowed
- # - there's no override at osd level
- if (
- self.spec.crush_device_class and
- self.spec.crush_device_class in self._supported_device_classes and # noqa E501
- "crush-device-class" not in cmds[i]
- ):
- cmds[i] += " --crush-device-class {}".format(self.spec.crush_device_class) # noqa E501
-
if self.preview:
cmds[i] += " --report"
cmds[i] += " --format json"
diff --git a/src/python-common/ceph/tests/test_disk_selector.py b/src/python-common/ceph/tests/test_disk_selector.py
index b08236130..03bfcbe16 100644
--- a/src/python-common/ceph/tests/test_disk_selector.py
+++ b/src/python-common/ceph/tests/test_disk_selector.py
@@ -557,4 +557,43 @@ class TestDriveSelection(object):
inventory = _mk_inventory(_mk_device(rotational=True)*2)
m = 'Failed to validate OSD spec "foobar.data_devices": No filters applied'
with pytest.raises(DriveGroupValidationError, match=m):
- drive_selection.DriveSelection(spec, inventory) \ No newline at end of file
+ drive_selection.DriveSelection(spec, inventory)
+
+
+class TestDeviceSelectionLimit:
+
+ def test_limit_existing_devices(self):
+ # Initial setup for this test is meant to be that /dev/sda
+ # is already being used for an OSD, hence it being marked
+ # as a ceph_device. /dev/sdb and /dev/sdc are not being used
+ # for OSDs yet. The limit will be set to 2 and the DriveSelection
+ # is set to have 1 pre-existing device (corresponding to /dev/sda)
+ dev_a = Device('/dev/sda', ceph_device=True, available=False)
+ dev_b = Device('/dev/sdb', ceph_device=False, available=True)
+ dev_c = Device('/dev/sdc', ceph_device=False, available=True)
+ all_devices: List[Device] = [dev_a, dev_b, dev_c]
+ processed_devices: List[Device] = []
+ filter = DeviceSelection(all=True, limit=2)
+ dgs = DriveGroupSpec(data_devices=filter)
+ ds = drive_selection.DriveSelection(dgs, all_devices, existing_daemons=1)
+
+ # Check /dev/sda. It's already being used for an OSD and will
+ # be counted in existing_daemons. This check should return False
+ # as we are not over the limit.
+ assert not ds._limit_reached(filter, processed_devices, '/dev/sda')
+ processed_devices.append(dev_a)
+
+ # We should still not be over the limit here with /dev/sdb since
+ # we will have only one pre-existing daemon /dev/sdb itself. This
+ # case previously failed as devices that contributed to existing_daemons
+ # would be double counted both as devices and daemons.
+ assert not ds._limit_reached(filter, processed_devices, '/dev/sdb')
+ processed_devices.append(dev_b)
+
+ # Now, with /dev/sdb and the pre-existing daemon taking up the 2
+ # slots, /dev/sdc should be rejected for us being over the limit.
+ assert ds._limit_reached(filter, processed_devices, '/dev/sdc')
+
+ # DriveSelection does device assignment on initialization. Let's check
+ # it picked up the expected devices
+ assert ds._data == [dev_a, dev_b]
diff --git a/src/python-common/ceph/tests/test_drive_group.py b/src/python-common/ceph/tests/test_drive_group.py
index 77e9b4083..cd4a238af 100644
--- a/src/python-common/ceph/tests/test_drive_group.py
+++ b/src/python-common/ceph/tests/test_drive_group.py
@@ -392,8 +392,12 @@ def test_ceph_volume_command_12(test_input2):
drive = drive_selection.DriveSelection(spec, spec.data_devices.paths)
cmds = translate.to_ceph_volume(drive, []).run()
- assert (cmds[0] == 'lvm batch --no-auto /dev/sdb --crush-device-class ssd --yes --no-systemd') # noqa E501
- assert (cmds[1] == 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd') # noqa E501
+ expected_cmds = [
+ 'lvm batch --no-auto /dev/sdb --crush-device-class ssd --yes --no-systemd',
+ 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd',
+ ]
+ assert len(cmds) == len(expected_cmds), f"Expected {expected_cmds} got {cmds}"
+ assert all(cmd in cmds for cmd in expected_cmds), f'Expected {expected_cmds} got {cmds}'
@pytest.mark.parametrize("test_input3",
@@ -418,8 +422,12 @@ def test_ceph_volume_command_13(test_input3):
drive = drive_selection.DriveSelection(spec, spec.data_devices.paths)
cmds = translate.to_ceph_volume(drive, []).run()
- assert (cmds[0] == 'lvm batch --no-auto /dev/sdb --yes --no-systemd') # noqa E501
- assert (cmds[1] == 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd') # noqa E501
+ expected_cmds = [
+ 'lvm batch --no-auto /dev/sdb --yes --no-systemd',
+ 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd',
+ ]
+ assert len(cmds) == len(expected_cmds), f"Expected {expected_cmds} got {cmds}"
+ assert all(cmd in cmds for cmd in expected_cmds), f'Expected {expected_cmds} got {cmds}'
@pytest.mark.parametrize("test_input4",
diff --git a/src/python-common/ceph/tests/test_service_spec.py b/src/python-common/ceph/tests/test_service_spec.py
index 502057f5c..538ee97ed 100644
--- a/src/python-common/ceph/tests/test_service_spec.py
+++ b/src/python-common/ceph/tests/test_service_spec.py
@@ -144,11 +144,13 @@ def test_apply_prometheus(spec: PrometheusSpec, raise_exception: bool, msg: str)
('2 host1 host2', "PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])"),
('label:foo', "PlacementSpec(label='foo')"),
('3 label:foo', "PlacementSpec(count=3, label='foo')"),
- ('*', "PlacementSpec(host_pattern='*')"),
- ('3 data[1-3]', "PlacementSpec(count=3, host_pattern='data[1-3]')"),
- ('3 data?', "PlacementSpec(count=3, host_pattern='data?')"),
- ('3 data*', "PlacementSpec(count=3, host_pattern='data*')"),
+ ('*', "PlacementSpec(host_pattern=HostPattern(pattern='*', pattern_type=PatternType.fnmatch))"),
+ ('3 data[1-3]', "PlacementSpec(count=3, host_pattern=HostPattern(pattern='data[1-3]', pattern_type=PatternType.fnmatch))"),
+ ('3 data?', "PlacementSpec(count=3, host_pattern=HostPattern(pattern='data?', pattern_type=PatternType.fnmatch))"),
+ ('3 data*', "PlacementSpec(count=3, host_pattern=HostPattern(pattern='data*', pattern_type=PatternType.fnmatch))"),
("count-per-host:4 label:foo", "PlacementSpec(count_per_host=4, label='foo')"),
+ ('regex:Foo[0-9]|Bar[0-9]', "PlacementSpec(host_pattern=HostPattern(pattern='Foo[0-9]|Bar[0-9]', pattern_type=PatternType.regex))"),
+ ('3 regex:Foo[0-9]|Bar[0-9]', "PlacementSpec(count=3, host_pattern=HostPattern(pattern='Foo[0-9]|Bar[0-9]', pattern_type=PatternType.regex))"),
])
def test_parse_placement_specs(test_input, expected):
ret = PlacementSpec.from_string(test_input)
@@ -161,6 +163,9 @@ def test_parse_placement_specs(test_input, expected):
("host=a host*"),
("host=a label:wrong"),
("host? host*"),
+ ("host? regex:host*"),
+ ("regex:host? host*"),
+ ("regex:host? regex:host*"),
('host=a count-per-host:0'),
('host=a count-per-host:-10'),
('count:2 count-per-host:1'),
@@ -313,6 +318,13 @@ placement:
host_pattern: '*'
unmanaged: true
---
+service_type: crash
+service_name: crash
+placement:
+ host_pattern:
+ pattern: Foo[0-9]|Bar[0-9]
+ pattern_type: regex
+---
service_type: rgw
service_id: default-rgw-realm.eu-central-1.1
service_name: rgw.default-rgw-realm.eu-central-1.1
@@ -384,6 +396,12 @@ service_type: nfs
service_id: mynfs
service_name: nfs.mynfs
spec:
+ idmap_conf:
+ general:
+ local-realms: domain.org
+ mapping:
+ nobody-group: nfsnobody
+ nobody-user: nfsnobody
port: 1234
---
service_type: iscsi
diff --git a/src/python-common/ceph/utils.py b/src/python-common/ceph/utils.py
index 643be0658..e92a2d1de 100644
--- a/src/python-common/ceph/utils.py
+++ b/src/python-common/ceph/utils.py
@@ -1,8 +1,15 @@
import datetime
import re
import string
+import ssl
-from typing import Optional
+from typing import Optional, MutableMapping, Tuple, Any
+from urllib.error import HTTPError, URLError
+from urllib.request import urlopen, Request
+
+import logging
+
+log = logging.getLogger(__name__)
def datetime_now() -> datetime.datetime:
@@ -121,3 +128,42 @@ def is_hex(s: str, strict: bool = True) -> bool:
return False
return True
+
+
+def http_req(hostname: str = '',
+ port: str = '443',
+ method: Optional[str] = None,
+ headers: MutableMapping[str, str] = {},
+ data: Optional[str] = None,
+ endpoint: str = '/',
+ scheme: str = 'https',
+ ssl_verify: bool = False,
+ timeout: Optional[int] = None,
+ ssl_ctx: Optional[Any] = None) -> Tuple[Any, Any, Any]:
+
+ if not ssl_ctx:
+ ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+ if not ssl_verify:
+ ssl_ctx.check_hostname = False
+ ssl_ctx.verify_mode = ssl.CERT_NONE
+ else:
+ ssl_ctx.verify_mode = ssl.CERT_REQUIRED
+
+ url: str = f'{scheme}://{hostname}:{port}{endpoint}'
+ _data = bytes(data, 'ascii') if data else None
+ _headers = headers
+ if data and not method:
+ method = 'POST'
+ if not _headers.get('Content-Type') and method in ['POST', 'PATCH']:
+ _headers['Content-Type'] = 'application/json'
+ try:
+ req = Request(url, _data, _headers, method=method)
+ with urlopen(req, context=ssl_ctx, timeout=timeout) as response:
+ response_str = response.read()
+ response_headers = response.headers
+ response_code = response.code
+ return response_headers, response_str.decode(), response_code
+ except (HTTPError, URLError) as e:
+ log.error(e)
+ # handle error here if needed
+ raise