summaryrefslogtreecommitdiffstats
path: root/qa/tasks/util
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/tasks/util
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/tasks/util')
-rw-r--r--qa/tasks/util/__init__.py26
-rw-r--r--qa/tasks/util/chacra.py186
-rw-r--r--qa/tasks/util/rados.py87
-rw-r--r--qa/tasks/util/rgw.py99
-rw-r--r--qa/tasks/util/test/__init__.py0
-rw-r--r--qa/tasks/util/test/test_rados.py40
-rw-r--r--qa/tasks/util/workunit.py78
7 files changed, 516 insertions, 0 deletions
diff --git a/qa/tasks/util/__init__.py b/qa/tasks/util/__init__.py
new file mode 100644
index 000000000..5b8575ed9
--- /dev/null
+++ b/qa/tasks/util/__init__.py
@@ -0,0 +1,26 @@
+from teuthology import misc
+
+def get_remote(ctx, cluster, service_type, service_id):
+ """
+ Get the Remote for the host where a particular role runs.
+
+ :param cluster: name of the cluster the service is part of
+ :param service_type: e.g. 'mds', 'osd', 'client'
+ :param service_id: The third part of a role, e.g. '0' for
+ the role 'ceph.client.0'
+ :return: a Remote instance for the host where the
+ requested role is placed
+ """
+ def _is_instance(role):
+ role_tuple = misc.split_role(role)
+ return role_tuple == (cluster, service_type, str(service_id))
+ try:
+ (remote,) = ctx.cluster.only(_is_instance).remotes.keys()
+ except ValueError:
+ raise KeyError("Service {0}.{1}.{2} not found".format(cluster,
+ service_type,
+ service_id))
+ return remote
+
+def get_remote_for_role(ctx, role):
+ return get_remote(ctx, *misc.split_role(role))
diff --git a/qa/tasks/util/chacra.py b/qa/tasks/util/chacra.py
new file mode 100644
index 000000000..ed9358a59
--- /dev/null
+++ b/qa/tasks/util/chacra.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+
+import argparse
+import logging
+import requests
+import sys
+
+from pathlib import Path
+from urllib.parse import urlparse
+
+log = logging.getLogger(__name__)
+
+SHAMAN_SEARCH_URL = 'https://shaman.ceph.com/api/search'
+
+PROJECT = 'ceph'
+DISTRO = 'ubuntu'
+RELEASE = 'focal'
+ARCH='x86_64'
+BRANCH = 'main'
+SHA1 = 'latest'
+FLAVOR = 'default'
+FILENAME = 'cephadm'
+
+
+def search(*args, **kwargs):
+ '''
+ Query shaman for a build result
+ '''
+ resp = requests.get(SHAMAN_SEARCH_URL, params=kwargs)
+ resp.raise_for_status()
+ return resp
+
+def _get_distros(distro, release, arch=None):
+ ret = f'{distro}/{release}'
+ if arch:
+ ret = f'{ret}/{arch}'
+ return ret
+
+def _get_binary_url(host, project, ref, sha1, distro, release, arch, flavor, filename):
+ return f'https://{host}/binaries/{project}/{ref}/{sha1}/{distro}/{release}/{arch}/flavors/{flavor}/{filename}'
+
+def get_binary_url(
+ filename,
+ project=None,
+ distro=None,
+ release=None,
+ arch=None,
+ flavor=None,
+ branch=None,
+ sha1=None
+):
+ '''
+ Return the chacra url for a build result
+ '''
+ # query shaman for the built binary
+ s = {}
+ if project:
+ s['project'] = project
+ if distro:
+ s['distros'] = _get_distros(distro, release, arch)
+ if flavor:
+ s['flavor'] = flavor
+ if branch:
+ s['ref'] = branch
+ if sha1:
+ s['sha1'] = sha1
+
+ resp = search(**s)
+ result = resp.json()
+
+ if len(result) == 0:
+ raise RuntimeError(f'no results found at {resp.url}')
+
+ # TODO: filter the result down to the correct arch etc.?
+ result = result[0]
+
+ status = result['status']
+ if status != 'ready':
+ raise RuntimeError(f'cannot pull file with status: {status}')
+
+ # build the chacra url
+ chacra_host = urlparse(result['url']).netloc
+ chacra_ref = result['ref']
+ chacra_sha1 = result['sha1']
+ log.info(f'got chacra host {chacra_host}, ref {chacra_ref}, sha1 {chacra_sha1} from {resp.url}')
+
+ # prefer codename if a release is not specified
+ if result.get('distro_codename'):
+ release = result.get('distro_codename')
+ elif result.get('distro_version'):
+ release = result.get('distro_version')
+ elif not release:
+ raise RuntimeError('cannot determine distro release!')
+
+ if not arch:
+ if ARCH in result['archs']:
+ arch = ARCH
+ elif len(result['archs']) > 0:
+ arch = result['archs'][0]
+ else:
+ raise RuntimeError('cannot determine the arch type!')
+
+ # build the url to the binary
+ url = _get_binary_url(
+ chacra_host,
+ result['project'],
+ chacra_ref,
+ chacra_sha1,
+ result['distro'],
+ release,
+ arch,
+ result['flavor'],
+ filename,
+ )
+
+ return url
+
+def pull(
+ filename,
+ project=None,
+ distro=None,
+ release=None,
+ arch=None,
+ flavor=None,
+ branch=None,
+ sha1=None
+):
+ '''
+ Pull a build result from chacra
+ '''
+ url = get_binary_url(
+ filename,
+ project=project,
+ distro=distro,
+ release=release,
+ arch=arch,
+ flavor=flavor,
+ branch=branch,
+ sha1=sha1
+ )
+ resp = requests.get(url, stream=True)
+ resp.raise_for_status()
+ log.info(f'got file from {resp.url}')
+
+ return resp
+
+def main():
+ handler = logging.StreamHandler(sys.stdout)
+ log.addHandler(handler)
+ log.setLevel(logging.INFO)
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--project', default=PROJECT)
+ parser.add_argument('--distro', default=DISTRO)
+ parser.add_argument('--release', default=RELEASE)
+ parser.add_argument('--arch', default=ARCH)
+ parser.add_argument('--branch', default=BRANCH)
+ parser.add_argument('--sha1', default=SHA1)
+ parser.add_argument('--flavor', default=FLAVOR)
+ parser.add_argument('--src', default=FILENAME)
+ parser.add_argument('--dest', default=FILENAME)
+ args = parser.parse_args()
+
+ resp = pull(
+ args.src,
+ project=args.project,
+ distro=args.distro,
+ release=args.release,
+ arch=args.arch,
+ flavor=args.flavor,
+ branch=args.branch,
+ sha1=args.sha1
+ )
+
+ dest = Path(args.dest).absolute()
+ with open(dest, 'wb') as f:
+ for chunk in resp.iter_content(chunk_size=None, decode_unicode=True):
+ log.info('.',)
+ f.write(chunk)
+ log.info(f'wrote binary file: {dest}')
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py
new file mode 100644
index 000000000..a0c54ce4e
--- /dev/null
+++ b/qa/tasks/util/rados.py
@@ -0,0 +1,87 @@
+import logging
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def rados(ctx, remote, cmd, wait=True, check_status=False):
+ testdir = teuthology.get_testdir(ctx)
+ log.info("rados %s" % ' '.join(cmd))
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rados',
+ ];
+ pre.extend(cmd)
+ proc = remote.run(
+ args=pre,
+ check_status=check_status,
+ wait=wait,
+ )
+ if wait:
+ return proc.exitstatus
+ else:
+ return proc
+
+def create_ec_pool(remote, name, profile_name, pgnum, profile={}, cluster_name="ceph", application=None):
+ remote.run(args=['sudo', 'ceph'] +
+ cmd_erasure_code_profile(profile_name, profile) + ['--cluster', cluster_name])
+ remote.run(args=[
+ 'sudo', 'ceph', 'osd', 'pool', 'create', name,
+ str(pgnum), str(pgnum), 'erasure', profile_name, '--cluster', cluster_name
+ ])
+ if application:
+ remote.run(args=[
+ 'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name
+ ], check_status=False) # may fail as EINVAL when run in jewel upgrade test
+
+def create_replicated_pool(remote, name, pgnum, cluster_name="ceph", application=None):
+ remote.run(args=[
+ 'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), '--cluster', cluster_name
+ ])
+ if application:
+ remote.run(args=[
+ 'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name
+ ], check_status=False)
+
+def create_cache_pool(remote, base_name, cache_name, pgnum, size, cluster_name="ceph"):
+ remote.run(args=[
+ 'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum), '--cluster', cluster_name
+ ])
+ remote.run(args=[
+ 'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name,
+ str(size), '--cluster', cluster_name
+ ])
+
+def cmd_erasure_code_profile(profile_name, profile):
+ """
+ Return the shell command to run to create the erasure code profile
+ described by the profile parameter.
+
+ :param profile_name: a string matching [A-Za-z0-9-_.]+
+ :param profile: a map whose semantic depends on the erasure code plugin
+ :returns: a shell command as an array suitable for Remote.run
+
+ If profile is {}, it is replaced with
+
+ { 'k': '2', 'm': '1', 'crush-failure-domain': 'osd'}
+
+ for backward compatibility. In previous versions of teuthology,
+ these values were hardcoded as function arguments and some yaml
+ files were designed with these implicit values. The teuthology
+ code should not know anything about the erasure code profile
+ content or semantic. The valid values and parameters are outside
+ its scope.
+ """
+
+ if profile == {}:
+ profile = {
+ 'k': '2',
+ 'm': '1',
+ 'crush-failure-domain': 'osd'
+ }
+ return [
+ 'osd', 'erasure-code-profile', 'set',
+ profile_name
+ ] + [ str(key) + '=' + str(value) for key, value in profile.items() ]
diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py
new file mode 100644
index 000000000..59c801028
--- /dev/null
+++ b/qa/tasks/util/rgw.py
@@ -0,0 +1,99 @@
+import logging
+import json
+import time
+
+from io import StringIO
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
+ omit_sudo=False, omit_tdir=False, format='json', decode=True,
+ log_level=logging.DEBUG):
+ log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
+ testdir = teuthology.get_testdir(ctx)
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage']
+ if not omit_tdir:
+ pre.append(
+ '{tdir}/archive/coverage'.format(tdir=testdir))
+ pre.extend([
+ 'radosgw-admin',
+ '--log-to-stderr',
+ '--format', format,
+ '-n', client_with_id,
+ '--cluster', cluster_name,
+ ])
+ pre.extend(cmd)
+ log.log(log_level, 'rgwadmin: cmd=%s' % pre)
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ proc = remote.run(
+ args=pre,
+ check_status=check_status,
+ omit_sudo=omit_sudo,
+ stdout=StringIO(),
+ stderr=StringIO(),
+ stdin=stdin,
+ )
+ r = proc.exitstatus
+ out = proc.stdout.getvalue()
+ if not decode:
+ return (r, out)
+ j = None
+ if not r and out != '':
+ try:
+ j = json.loads(out)
+ log.log(log_level, ' json result: %s' % j)
+ except ValueError:
+ j = out
+ log.log(log_level, ' raw result: %s' % j)
+ return (r, j)
+
+def get_user_summary(out, user):
+ """Extract the summary for a given user"""
+ user_summary = None
+ for summary in out['summary']:
+ if summary.get('user') == user:
+ user_summary = summary
+
+ if not user_summary:
+ raise AssertionError('No summary info found for user: %s' % user)
+
+ return user_summary
+
+def get_user_successful_ops(out, user):
+ summary = out['summary']
+ if len(summary) == 0:
+ return 0
+ return get_user_summary(out, user)['total']['successful_ops']
+
+def wait_for_radosgw(url, remote):
+ """ poll the given url until it starts accepting connections
+
+ add_daemon() doesn't wait until radosgw finishes startup, so this is used
+ to avoid racing with later tasks that expect radosgw to be up and listening
+ """
+ # TODO: use '--retry-connrefused --retry 8' when teuthology is running on
+ # Centos 8 and other OS's with an updated version of curl
+ curl_cmd = ['curl',
+ url]
+ exit_status = 0
+ num_retries = 8
+ for seconds in range(num_retries):
+ proc = remote.run(
+ args=curl_cmd,
+ check_status=False,
+ stdout=StringIO(),
+ stderr=StringIO(),
+ stdin=StringIO(),
+ )
+ exit_status = proc.exitstatus
+ if exit_status == 0:
+ break
+ time.sleep(2**seconds)
+
+ assert exit_status == 0
diff --git a/qa/tasks/util/test/__init__.py b/qa/tasks/util/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/tasks/util/test/__init__.py
diff --git a/qa/tasks/util/test/test_rados.py b/qa/tasks/util/test/test_rados.py
new file mode 100644
index 000000000..a8f4cb02d
--- /dev/null
+++ b/qa/tasks/util/test/test_rados.py
@@ -0,0 +1,40 @@
+#
+# The MIT License
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation
+# files (the "Software"), to deal in the Software without
+# restriction, including without limitation the rights to use,
+# copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following
+# conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+from tasks.util import rados
+
+class TestRados(object):
+
+ def test_cmd_erasure_code_profile(self):
+ name = 'NAME'
+ cmd = rados.cmd_erasure_code_profile(name, {})
+ assert 'k=2' in cmd
+ assert name in cmd
+ cmd = rados.cmd_erasure_code_profile(name, { 'k': '88' })
+ assert 'k=88' in cmd
+ assert name in cmd
diff --git a/qa/tasks/util/workunit.py b/qa/tasks/util/workunit.py
new file mode 100644
index 000000000..1f5623af8
--- /dev/null
+++ b/qa/tasks/util/workunit.py
@@ -0,0 +1,78 @@
+import copy
+
+from teuthology import misc
+from teuthology.orchestra import run
+
+class Refspec:
+ def __init__(self, refspec):
+ self.refspec = refspec
+
+ def __str__(self):
+ return self.refspec
+
+ def _clone(self, git_url, clonedir, opts=None):
+ if opts is None:
+ opts = []
+ return (['rm', '-rf', clonedir] +
+ [run.Raw('&&')] +
+ ['git', 'clone'] + opts +
+ [git_url, clonedir])
+
+ def _cd(self, clonedir):
+ return ['cd', clonedir]
+
+ def _checkout(self):
+ return ['git', 'checkout', self.refspec]
+
+ def clone(self, git_url, clonedir):
+ return (self._clone(git_url, clonedir) +
+ [run.Raw('&&')] +
+ self._cd(clonedir) +
+ [run.Raw('&&')] +
+ self._checkout())
+
+
+class Branch(Refspec):
+ def __init__(self, tag):
+ Refspec.__init__(self, tag)
+
+ def clone(self, git_url, clonedir):
+ opts = ['--depth', '1',
+ '--branch', self.refspec]
+ return (self._clone(git_url, clonedir, opts) +
+ [run.Raw('&&')] +
+ self._cd(clonedir))
+
+
+class Head(Refspec):
+ def __init__(self):
+ Refspec.__init__(self, 'HEAD')
+
+ def clone(self, git_url, clonedir):
+ opts = ['--depth', '1']
+ return (self._clone(git_url, clonedir, opts) +
+ [run.Raw('&&')] +
+ self._cd(clonedir))
+
+
+def get_refspec_after_overrides(config, overrides):
+ # mimic the behavior of the "install" task, where the "overrides" are
+ # actually the defaults of that task. in other words, if none of "sha1",
+ # "tag", or "branch" is specified by a "workunit" tasks, we will update
+ # it with the information in the "workunit" sub-task nested in "overrides".
+ overrides = copy.deepcopy(overrides.get('workunit', {}))
+ refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch,
+ 'sha1': Refspec, 'tag': Refspec, 'branch': Branch}
+ if any(map(lambda i: i in config, refspecs.keys())):
+ for i in refspecs.keys():
+ overrides.pop(i, None)
+ misc.deep_merge(config, overrides)
+
+ for spec, cls in refspecs.items():
+ refspec = config.get(spec)
+ if refspec:
+ refspec = cls(refspec)
+ break
+ if refspec is None:
+ refspec = Head()
+ return refspec