summaryrefslogtreecommitdiffstats
path: root/src/script
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/script
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/script')
-rw-r--r--src/script/CMakeLists.txt7
-rwxr-xr-xsrc/script/add_header.pl26
-rwxr-xr-xsrc/script/add_osd.sh35
-rwxr-xr-xsrc/script/backport-create-issue396
-rwxr-xr-xsrc/script/backport-resolve-issue748
-rwxr-xr-xsrc/script/bdev_grep.pl19
-rwxr-xr-xsrc/script/build-integration-branch112
-rwxr-xr-xsrc/script/ceph-backport.sh1818
-rwxr-xr-xsrc/script/ceph-debug-docker.sh175
-rwxr-xr-xsrc/script/ceph-release-notes375
-rw-r--r--src/script/ceph_dump_log.py92
-rwxr-xr-xsrc/script/check_commands.sh20
-rw-r--r--src/script/cmake_uninstall.cmake.in21
-rwxr-xr-xsrc/script/cpatch246
-rwxr-xr-xsrc/script/crash_bdev.sh10
-rwxr-xr-xsrc/script/credits.sh46
-rwxr-xr-xsrc/script/extend_stretch_cluster.sh8
-rwxr-xr-xsrc/script/find_dups_in_pg_log.sh22
-rwxr-xr-xsrc/script/fix_modeline.pl29
-rwxr-xr-xsrc/script/gen-corpus.sh102
-rwxr-xr-xsrc/script/kcon_all.sh10
-rwxr-xr-xsrc/script/kcon_most.sh13
-rw-r--r--src/script/kubejacker/Dockerfile15
-rw-r--r--src/script/kubejacker/README.rst11
-rwxr-xr-xsrc/script/kubejacker/kubejacker.sh88
-rwxr-xr-xsrc/script/ptl-tool.py404
-rw-r--r--src/script/requirements.backport-create-issue.txt1
-rwxr-xr-xsrc/script/run-cbt.sh148
-rwxr-xr-xsrc/script/run-coverity33
-rwxr-xr-xsrc/script/run-make.sh204
-rwxr-xr-xsrc/script/run_mypy.sh108
-rwxr-xr-xsrc/script/run_tox.sh131
-rwxr-xr-xsrc/script/run_uml.sh212
-rwxr-xr-xsrc/script/set_up_stretch_mode.sh54
-rwxr-xr-xsrc/script/smr_benchmark/linearCopy.sh91
-rwxr-xr-xsrc/script/smr_benchmark/linearSMRCopy.sh69
-rwxr-xr-xsrc/script/strip_trailing_whitespace.sh4
-rwxr-xr-xsrc/script/unhexdump-C18
38 files changed, 5921 insertions, 0 deletions
diff --git a/src/script/CMakeLists.txt b/src/script/CMakeLists.txt
new file mode 100644
index 000000000..fdc0e83e4
--- /dev/null
+++ b/src/script/CMakeLists.txt
@@ -0,0 +1,7 @@
+configure_file(
+ "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in"
+ "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
+ IMMEDIATE @ONLY)
+
+add_custom_target(uninstall
+ COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
diff --git a/src/script/add_header.pl b/src/script/add_header.pl
new file mode 100755
index 000000000..023c06e45
--- /dev/null
+++ b/src/script/add_header.pl
@@ -0,0 +1,26 @@
+#!/usr/bin/perl
+
+use strict;
+my $fn = shift @ARGV;
+my $old = `cat $fn`;
+
+my $header = `cat doc/header.txt`;
+
+# strip existing header
+my $new = $old;
+if ($new =~ /^(.*)\* Ceph - scalable distributed file system/s) {
+ my ($a,@b) = split(/\*\/\n/, $new);
+ $new = join("*/\n",@b);
+}
+$new = $header . $new;
+
+if ($new ne $old) {
+ open(O, ">$fn.new");
+ print O $new;
+ close O;
+ system "diff $fn $fn.new";
+ rename "$fn.new", $fn;
+ #unlink "$fn.new";
+
+}
+
diff --git a/src/script/add_osd.sh b/src/script/add_osd.sh
new file mode 100755
index 000000000..a8dff6b3d
--- /dev/null
+++ b/src/script/add_osd.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+set -ex
+
+CEPH_DEV_DIR=dev
+CEPH_BIN=bin
+ceph_adm=$CEPH_BIN/ceph
+osd=$1
+location=$2
+weight=.0990
+
+# DANGEROUS
+rm -rf $CEPH_DEV_DIR/osd$osd
+mkdir -p $CEPH_DEV_DIR/osd$osd
+
+uuid=`uuidgen`
+echo "add osd$osd $uuid"
+OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
+echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
+$CEPH_BIN/ceph osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
+rm $CEPH_DEV_DIR/osd$osd/new.json
+$CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid
+
+key_fn=$CEPH_DEV_DIR/osd$osd/keyring
+cat > $key_fn<<EOF
+[osd.$osd]
+ key = $OSD_SECRET
+EOF
+echo adding osd$osd key to auth repository
+$CEPH_BIN/ceph -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow profile osd"
+
+$CEPH_BIN/ceph osd crush add osd.$osd $weight $location
+
+echo start osd.$osd
+$CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
diff --git a/src/script/backport-create-issue b/src/script/backport-create-issue
new file mode 100755
index 000000000..e2e2298f9
--- /dev/null
+++ b/src/script/backport-create-issue
@@ -0,0 +1,396 @@
+#!/usr/bin/env python3
+#
+# backport-create-issue
+#
+# Standalone version of the "backport-create-issue" subcommand of
+# "ceph-workbench" by Loic Dachary.
+#
+# This script scans Redmine (tracker.ceph.com) for issues in "Pending Backport"
+# status and creates backport issues for them, based on the contents of the
+# "Backport" field while trying to avoid creating duplicate backport issues.
+#
+# Copyright (C) 2015 <contact@redhat.com>
+# Copyright (C) 2018, SUSE LLC
+#
+# Author: Loic Dachary <loic@dachary.org>
+# Author: Nathan Cutler <ncutler@suse.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see http://www.gnu.org/licenses/>
+#
+import argparse
+import logging
+import os
+import re
+import time
+from redminelib import Redmine # https://pypi.org/project/python-redmine/
+from redminelib.exceptions import ResourceAttrError
+
+redmine_endpoint = "https://tracker.ceph.com"
+project_name = "Ceph"
+release_id = 16
+custom_field_tag = 'cf_3'
+tag_separator = ' '
+tag_backport_processed = 'backport_processed'
+delay_seconds = 5
+redmine_key_file="~/.redmine_key"
+redmine_key_env="REDMINE_API_KEY"
+#
+# NOTE: release_id is hard-coded because
+# http://www.redmine.org/projects/redmine/wiki/Rest_CustomFields
+# requires administrative permissions. If and when
+# https://www.redmine.org/issues/18875
+# is resolved, it could maybe be replaced by the following code:
+#
+# for field in redmine.custom_field.all():
+# if field.name == 'Release':
+# release_id = field.id
+#
+status2status_id = {}
+project_id2project = {}
+tracker2tracker_id = {}
+version2version_id = {}
+resolve_parent = None
+
+def usage():
+ logging.error("Redmine credentials are required to perform this operation. "
+ "Please provide either a Redmine key (via %s or $%s) "
+ "or a Redmine username and password (via --user and --password). "
+ "Optionally, one or more issue numbers can be given via positional "
+ "argument(s). In the absence of positional arguments, the script "
+ "will loop through all issues in Pending Backport status.",
+ redmine_key_file, redmine_key_env)
+ exit(-1)
+
+def parse_arguments():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("issue_numbers", nargs='*', help="Issue number")
+ parser.add_argument("--user", help="Redmine user")
+ parser.add_argument("--password", help="Redmine password")
+ parser.add_argument("--resolve-parent", help="Resolve parent issue if all backports resolved/rejected",
+ action="store_true")
+ parser.add_argument("--debug", help="Show debug-level messages",
+ action="store_true")
+ parser.add_argument("--dry-run", help="Do not write anything to Redmine",
+ action="store_true")
+ parser.add_argument("--force", help="When issue numbers provided, process "
+ "them even if not in 'Pending Backport' status. "
+ "Otherwise, process all issues in 'Pending Backport' "
+ "status even if already processed "
+ f"(tag '{tag_backport_processed}' added)",
+ action="store_true")
+ return parser.parse_args()
+
+
+def set_logging_level(a):
+ if a.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+ return None
+
+def report_dry_run(a):
+ if a.dry_run:
+ logging.info("Dry run: nothing will be written to Redmine")
+ else:
+ logging.warning("Missing issues will be created in Backport tracker "
+ "of the relevant Redmine project")
+
+def process_resolve_parent_option(a):
+ global resolve_parent
+ resolve_parent = a.resolve_parent
+ if a.resolve_parent:
+ logging.warning("Parent issues with all backports resolved/rejected will be marked Resolved")
+
+def connect_to_redmine(a):
+ full_path=os.path.expanduser(redmine_key_file)
+ redmine_key=''
+ try:
+ with open(full_path, "r") as f:
+ redmine_key = f.read().strip()
+ except FileNotFoundError:
+ pass
+
+ if a.user and a.password:
+ logging.info("Redmine username and password were provided; using them")
+ return Redmine(redmine_endpoint, username=a.user, password=a.password)
+ elif redmine_key:
+ logging.info("Redmine key was read from '%s'; using it" % redmine_key_file)
+ return Redmine(redmine_endpoint, key=redmine_key)
+ elif os.getenv(redmine_key_env):
+ logging.info("Redmine key was read from '$%s'; using it", redmine_key_env)
+ return Redmine(redmine_endpoint, key=os.getenv(redmine_key_env))
+ else:
+ usage()
+
+def releases():
+ return ('argonaut', 'bobtail', 'cuttlefish', 'dumpling', 'emperor',
+ 'firefly', 'giant', 'hammer', 'infernalis', 'jewel', 'kraken',
+ 'luminous', 'mimic', 'nautilus', 'octopus', 'pacific', 'quincy')
+
+def populate_status_dict(r):
+ for status in r.issue_status.all():
+ status2status_id[status.name] = status.id
+ logging.debug("Statuses {}".format(status2status_id))
+ return None
+
+# not used currently, but might be useful
+def populate_version_dict(r, p_id):
+ versions = r.version.filter(project_id=p_id)
+ for version in versions:
+ version2version_id[version.name] = version.id
+ #logging.debug("Versions {}".format(version2version_id))
+ return None
+
+def populate_tracker_dict(r):
+ for tracker in r.tracker.all():
+ tracker2tracker_id[tracker.name] = tracker.id
+ logging.debug("Trackers {}".format(tracker2tracker_id))
+ return None
+
+def has_tracker(r, p_id, tracker_name):
+ for tracker in get_project(r, p_id).trackers:
+ if tracker['name'] == tracker_name:
+ return True
+ return False
+
+def get_project(r, p_id):
+ if p_id not in project_id2project:
+ p_obj = r.project.get(p_id, include='trackers')
+ project_id2project[p_id] = p_obj
+ return project_id2project[p_id]
+
+def url(issue):
+ return redmine_endpoint + "/issues/" + str(issue['id'])
+
+def set_backport(issue):
+ for field in issue['custom_fields']:
+ if field['name'] == 'Backport' and field['value'] != 0:
+ issue['backports'] = set(re.findall('\w+', field['value']))
+ logging.debug("backports for " + str(issue['id']) +
+ " is " + str(field['value']) + " " +
+ str(issue['backports']))
+ return True
+ return False
+
+def get_release(issue):
+ for field in issue.custom_fields:
+ if field['name'] == 'Release':
+ return field['value']
+
+def update_relations(r, issue, dry_run):
+ global resolve_parent
+ relations = r.issue_relation.filter(issue_id=issue['id'])
+ existing_backports = set()
+ existing_backports_dict = {}
+ for relation in relations:
+ other = r.issue.get(relation['issue_to_id'])
+ if other['tracker']['name'] != 'Backport':
+ logging.debug(url(issue) + " ignore relation to " +
+ url(other) + " because it is not in the Backport " +
+ "tracker")
+ continue
+ if relation['relation_type'] != 'copied_to':
+ logging.error(url(issue) + " unexpected relation '" +
+ relation['relation_type'] + "' to " + url(other))
+ continue
+ release = get_release(other)
+ if release in existing_backports:
+ logging.error(url(issue) + " duplicate " + release +
+ " backport issue detected")
+ continue
+ existing_backports.add(release)
+ existing_backports_dict[release] = relation['issue_to_id']
+ logging.debug(url(issue) + " backport to " + release + " is " +
+ redmine_endpoint + "/issues/" + str(relation['issue_to_id']))
+ if existing_backports == issue['backports']:
+ logging.debug(url(issue) + " has all the required backport issues")
+ if resolve_parent:
+ maybe_resolve(issue, existing_backports_dict, dry_run)
+ return None
+ if existing_backports.issuperset(issue['backports']):
+ logging.error(url(issue) + " has more backport issues (" +
+ ",".join(sorted(existing_backports)) + ") than expected (" +
+ ",".join(sorted(issue['backports'])) + ")")
+ return None
+ backport_tracker_id = tracker2tracker_id['Backport']
+ for release in issue['backports'] - existing_backports:
+ if release not in releases():
+ logging.error(url(issue) + " requires backport to " +
+ "unknown release " + release)
+ break
+ subject = (release + ": " + issue['subject'])[:255]
+ assigned_to_id = None
+ try:
+ assigned_to_id = issue.assigned_to.id
+ except ResourceAttrError: # not assigned
+ pass
+ if dry_run:
+ logging.info(url(issue) + " add backport to " + release)
+ continue
+ other = r.issue.create(project_id=issue['project']['id'],
+ tracker_id=backport_tracker_id,
+ subject=subject,
+ priority_id=issue['priority']['id'],
+ assigned_to_id=assigned_to_id,
+ target_version=None,
+ custom_fields=[{
+ "id": release_id,
+ "value": release,
+ }])
+ logging.debug("Rate-limiting to avoid seeming like a spammer")
+ time.sleep(delay_seconds)
+ r.issue_relation.create(issue_id=issue['id'],
+ issue_to_id=other['id'],
+ relation_type='copied_to')
+ logging.info(url(issue) + " added backport to " +
+ release + " " + url(other))
+ return None
+
+def maybe_resolve(issue, backports, dry_run):
+ '''
+ issue is a parent issue in Pending Backports status, and backports is a dict
+ like, e.g., { "luminous": 25345, "mimic": 32134 }.
+ If all the backport issues are Resolved/Rejected, set the parent issue to Resolved, too.
+ '''
+ global delay_seconds
+ global redmine
+ global status2status_id
+ if not backports:
+ return None
+ pending_backport_status_id = status2status_id["Pending Backport"]
+ resolved_status_id = status2status_id["Resolved"]
+ rejected_status_id = status2status_id["Rejected"]
+ logging.debug("entering maybe_resolve with parent issue ->{}<- backports ->{}<-"
+ .format(issue.id, backports))
+ assert issue.status.id == pending_backport_status_id, \
+ "Parent Redmine issue ->{}<- has status ->{}<- (expected Pending Backport)".format(issue.id, issue.status)
+ all_resolved = True
+ resolved_equiv_statuses = [resolved_status_id, rejected_status_id]
+ for backport in backports.keys():
+ tracker_issue_id = backports[backport]
+ backport_issue = redmine.issue.get(tracker_issue_id)
+ logging.debug("{} backport is in status {}".format(backport, backport_issue.status.name))
+ if backport_issue.status.id not in resolved_equiv_statuses:
+ all_resolved = False
+ break
+ if all_resolved:
+ logging.debug("Parent ->{}<- all backport issues in status Resolved".format(url(issue)))
+ note = ("While running with --resolve-parent, the script \"backport-create-issue\" "
+ "noticed that all backports of this issue are in status \"Resolved\" or \"Rejected\".")
+ if dry_run:
+ logging.info("Set status of parent ->{}<- to Resolved".format(url(issue)))
+ else:
+ redmine.issue.update(issue.id, status_id=resolved_status_id, notes=note)
+ logging.info("Parent ->{}<- status changed from Pending Backport to Resolved".format(url(issue)))
+ logging.debug("Rate-limiting to avoid seeming like a spammer")
+ time.sleep(delay_seconds)
+ else:
+ logging.debug("Some backport issues are still unresolved: leaving parent issue open")
+
+
+def mark_as_processed(r, issue):
+ """
+ This script will add a custom Tag to indicate whether the tracker was
+ already processed for backport tracker creation.
+ """
+ custom_fields = list(issue['custom_fields'].values())
+ for i, field in enumerate(custom_fields):
+ if field['name'] == 'Tags':
+ if tag_backport_processed not in field['value']:
+ if field['value']:
+ custom_fields[i]['value'] += (tag_separator +
+ tag_backport_processed)
+ else:
+ custom_fields[i]['value'] = tag_backport_processed
+ logging.info("%s adding tag '%s'", url(issue),
+ tag_backport_processed)
+ r.issue.update(issue.id, custom_fields=custom_fields)
+ return
+
+
+def iterate_over_backports(r, issues, dry_run=False):
+ counter = 0
+ for issue in issues:
+ counter += 1
+ logging.debug("{} ({}) {}".format(issue.id, issue.project,
+ issue.subject))
+ print('Examining issue#{} ({}/{})\r'.format(issue.id, counter, len(issues)), end='', flush=True)
+ if not has_tracker(r, issue['project']['id'], 'Backport'):
+ logging.info("{} skipped because the project {} does not "
+ "have a Backport tracker".format(url(issue),
+ issue['project']['name']))
+ continue
+ if not set_backport(issue):
+ logging.error(url(issue) + " no backport field")
+ continue
+ if len(issue['backports']) == 0:
+ logging.error(url(issue) + " the backport field is empty")
+ update_relations(r, issue, dry_run)
+ if not dry_run:
+ mark_as_processed(r, issue)
+ print(' \r', end='', flush=True)
+ logging.info("Processed {} issues".format(counter))
+ return None
+
+
+if __name__ == '__main__':
+ args = parse_arguments()
+ set_logging_level(args)
+ process_resolve_parent_option(args)
+ report_dry_run(args)
+ redmine = connect_to_redmine(args)
+ project = redmine.project.get(project_name)
+ ceph_project_id = project.id
+ logging.debug("Project {} has ID {}".format(project_name, ceph_project_id))
+ populate_status_dict(redmine)
+ pending_backport_status_id = status2status_id["Pending Backport"]
+ logging.debug("Pending Backport status has ID {}"
+ .format(pending_backport_status_id))
+ populate_tracker_dict(redmine)
+ force_create = False
+ if args.issue_numbers:
+ issue_list = ','.join(args.issue_numbers)
+ logging.info("Processing issue list ->{}<-".format(issue_list))
+ if args.force:
+ force_create = True
+ logging.warn("--force option was given: ignoring issue status!")
+ issues = redmine.issue.filter(project_id=ceph_project_id,
+ issue_id=issue_list)
+
+ else:
+ issues = redmine.issue.filter(project_id=ceph_project_id,
+ issue_id=issue_list,
+ status_id=pending_backport_status_id)
+ else:
+ if args.force or args.resolve_parent:
+ if args.force:
+ logging.warn("--force option was given: ignoring '%s' tag!",
+ tag_backport_processed)
+ issues = redmine.issue.filter(project_id=ceph_project_id,
+ status_id=pending_backport_status_id)
+ else:
+ # https://python-redmine.com/resources/issue.html#filter
+ issues = redmine.issue.filter(project_id=ceph_project_id,
+ status_id=pending_backport_status_id,
+ **{
+ custom_field_tag:
+ '!~' +
+ tag_backport_processed})
+ if force_create:
+ logging.info("Processing {} issues regardless of status"
+ .format(len(issues)))
+ else:
+ logging.info("Processing {} issues with status Pending Backport"
+ .format(len(issues)))
+ iterate_over_backports(redmine, issues, dry_run=args.dry_run)
diff --git a/src/script/backport-resolve-issue b/src/script/backport-resolve-issue
new file mode 100755
index 000000000..7d27ac2eb
--- /dev/null
+++ b/src/script/backport-resolve-issue
@@ -0,0 +1,748 @@
+#!/usr/bin/env python3
+#
+# backport-resolve-issue
+#
+# Based on "backport-create-issue", which was itself based on work by
+# by Loic Dachary.
+#
+#
+# Introduction
+# ============
+#
+# This script processes GitHub backport PRs, checking for proper cross-linking
+# with a Redmine Backport tracker issue and, if a PR is merged and properly
+# cross-linked, it can optionally resolve the tracker issue and correctly
+# populate the "Target version" field.
+#
+# The script takes a single positional argument, which is optional. If the
+# argument is an integer, it is assumed to be a GitHub backport PR ID (e.g. "28549").
+# In this mode ("single PR mode") the script processes a single GitHub backport
+# PR and terminates.
+#
+# If the argument is not an integer, or is missing, it is assumed to be a
+# commit (SHA1 or tag) to start from. If no positional argument is given, it
+# defaults to the tag "BRI-{release}", which might have been added by the last run of the
+# script. This mode is called "scan merge commits mode".
+#
+# In both modes, the script scans a local git repo, which is assumed to be
+# in the current working directory. In single PR mode, the script will work
+# only if the PR's merge commit is present in the current branch of the local
+# git repo. In scan merge commits mode, the script starts from the given SHA1
+# or tag, taking each merge commit in turn and attempting to obtain the GitHub
+# PR number for each.
+#
+# For each GitHub PR, the script interactively displays all relevant information
+# (NOTE: this includes displaying the GitHub PR and Redmine backport issue in
+# web browser tabs!) and prompts the user for her preferred disposition.
+#
+#
+# Assumptions
+# ===========
+#
+# Among other things, the script assumes:
+#
+# 1. it is being run in the top-level directory of a Ceph git repo
+# 2. the preferred web browser is Firefox and the command to open a browser
+# tab is "firefox"
+# 3. if Firefox is running and '--no-browser' was not given, the Firefox window
+# is visible to the user and the user desires to view GitHub PRs and Tracker
+# Issues in the browser
+# 4. if Firefox is not running, the user does not want to view PRs and issues
+# in a web browser
+#
+#
+# Dependencies
+# ============
+#
+# To run this script, first install the dependencies
+#
+# virtualenv v
+# source v/bin/activate
+# pip install gitpython python-redmine
+#
+# Then, copy the script from src/script/backport-resolve-issue (in the branch
+# "master" - the script is not maintained anywhere else) to somewhere in your
+# PATH.
+#
+# Finally, run the script with appropriate parameters. For example:
+#
+# backport-resolve-issue --key $MY_REDMINE_KEY
+# backport-resolve-issue --user $MY_REDMINE_USER --password $MY_REDMINE_PASSWORD
+#
+#
+# Copyright Notice
+# ================
+#
+# Copyright (C) 2019, SUSE LLC
+#
+# Author: Nathan Cutler <ncutler@suse.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see http://www.gnu.org/licenses/>
+#
+import argparse
+import logging
+import json
+import os
+import re
+import sys
+import time
+from redminelib import Redmine # https://pypi.org/project/python-redmine/
+from redminelib.exceptions import ResourceAttrError
+from git import Repo
+from git.exc import GitCommandError
+
+github_endpoint = "https://github.com/ceph/ceph"
+redmine_endpoint = "https://tracker.ceph.com"
+project_name = "Ceph"
+status2status_id = {}
+project_id2project = {}
+tracker2tracker_id = {}
+version2version_id = {}
+delay_seconds = 5
+browser_cmd = "firefox"
+no_browser = False
+ceph_release = None
+dry_run = False
+redmine = None
+bri_tag = None
+github_token_file = "~/.github_token"
+github_token = None
+github_user = None
+redmine_key_file = "~/.redmine_key"
+redmine_key = None
+
+def browser_running():
+ global browser_cmd
+ retval = os.system("pgrep {} >/dev/null".format(browser_cmd))
+ if retval == 0:
+ return True
+ return False
+
+def ceph_version(repo, sha1=None):
+ if sha1:
+ return repo.git.describe('--match', 'v*', sha1).split('-')[0]
+ return repo.git.describe('--match', 'v*').split('-')[0]
+
+def commit_range(args):
+ global bri_tag
+ if len(args.pr_or_commit) == 0:
+ return '{}..HEAD'.format(bri_tag)
+ elif len(args.pr_or_commit) == 1:
+ pass
+ else:
+ logging.warn("Ignoring positional parameters {}".format(args.pr_or_commit[1:]))
+ commit = args.pr_or_commit[0]
+ return '{}..HEAD'.format(commit)
+
+def connect_to_redmine(a):
+ global redmine_key
+ global redmine_key_file
+ redmine_key = read_from_file(redmine_key_file)
+ if a.user and a.password:
+ logging.info("Redmine username and password were provided; using them")
+ return Redmine(redmine_endpoint, username=a.user, password=a.password)
+ elif redmine_key:
+ logging.info("Redmine key was read from '%s'; using it" % redmine_key_file)
+ return Redmine(redmine_endpoint, key=redmine_key)
+ else:
+ usage()
+
+def derive_github_user_from_token(gh_token):
+ retval = None
+ if gh_token:
+ curl_opt = "-u :{} --silent".format(gh_token)
+ cmd = "curl {} https://api.github.com/user".format(curl_opt)
+ logging.debug("Running curl command ->{}<-".format(cmd))
+ json_str = os.popen(cmd).read()
+ github_api_result = json.loads(json_str)
+ if "login" in github_api_result:
+ retval = github_api_result['login']
+ if "message" in github_api_result:
+ assert False, \
+ "GitHub API unexpectedly returned ->{}<-".format(github_api_result['message'])
+ return retval
+
+def ensure_bri_tag_exists(repo, release):
+ global bri_tag
+ bri_tag = "BRI-{}".format(release)
+ bri_tag_exists = ''
+ try:
+ bri_tag_exists = repo.git.show_ref(bri_tag)
+ except GitCommandError as err:
+ logging.error(err)
+ logging.debug("git show-ref {} returned ->{}<-".format(bri_tag, bri_tag_exists))
+ if not bri_tag_exists:
+ c_v = ceph_version(repo)
+ logging.info("No {} tag found: setting it to {}".format(bri_tag, c_v))
+ repo.git.tag(bri_tag, c_v)
+
+def get_issue_release(redmine_issue):
+ for field in redmine_issue.custom_fields:
+ if field['name'] == 'Release':
+ return field['value']
+ return None
+
+def get_project(r, p_id):
+ if p_id not in project_id2project:
+ p_obj = r.project.get(p_id, include='trackers')
+ project_id2project[p_id] = p_obj
+ return project_id2project[p_id]
+
+def has_tracker(r, p_id, tracker_name):
+ for tracker in get_project(r, p_id).trackers:
+ if tracker['name'] == tracker_name:
+ return True
+ return False
+
+def parse_arguments():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--user", help="Redmine user")
+ parser.add_argument("--password", help="Redmine password")
+ parser.add_argument("--debug", help="Show debug-level messages",
+ action="store_true")
+ parser.add_argument("--dry-run", help="Do not write anything to Redmine",
+ action="store_true")
+ parser.add_argument("--no-browser", help="Do not use web browser even if it is running",
+ action="store_true")
+ parser.add_argument("pr_or_commit", nargs='*',
+ help="GitHub PR ID, or last merge commit successfully processed")
+ return parser.parse_args()
+
+def populate_ceph_release(repo):
+ global ceph_release
+ current_branch = repo.git.rev_parse('--abbrev-ref', 'HEAD')
+ release_ver_full = ceph_version(repo)
+ logging.info("Current git branch is {}, {}".format(current_branch, release_ver_full))
+ release_ver = release_ver_full.split('.')[0] + '.' + release_ver_full.split('.')[1]
+ try:
+ ceph_release = ver_to_release()[release_ver]
+ except KeyError:
+ assert False, \
+ "Release version {} does not correspond to any known stable release".format(release_ver)
+ logging.info("Ceph release is {}".format(ceph_release))
+
+def populate_status_dict(r):
+ for status in r.issue_status.all():
+ status2status_id[status.name] = status.id
+ logging.debug("Statuses {}".format(status2status_id))
+ return None
+
+def populate_tracker_dict(r):
+ for tracker in r.tracker.all():
+ tracker2tracker_id[tracker.name] = tracker.id
+ logging.debug("Trackers {}".format(tracker2tracker_id))
+ return None
+
+# not used currently, but might be useful
+def populate_version_dict(r, p_id):
+ versions = r.version.filter(project_id=p_id)
+ for version in versions:
+ version2version_id[version.name] = version.id
+ return None
+
+def print_inner_divider():
+ print("-----------------------------------------------------------------")
+
+def print_outer_divider():
+ print("=================================================================")
+
+def process_merge(repo, merge, merges_remaining):
+ backport = None
+ sha1 = merge.split(' ')[0]
+ possible_to_resolve = True
+ try:
+ backport = Backport(repo, merge_commit_string=merge)
+ except AssertionError as err:
+ logging.error("Malformed backport due to ->{}<-".format(err))
+ possible_to_resolve = False
+ if tag_merge_commits:
+ if possible_to_resolve:
+ prompt = ("[a] Abort, "
+ "[i] Ignore and advance {bri} tag, "
+ "[u] Update tracker and advance {bri} tag (default 'u') --> "
+ .format(bri=bri_tag)
+ )
+ default_input_val = "u"
+ else:
+ prompt = ("[a] Abort, "
+ "[i] Ignore and advance {bri} tag (default 'i') --> "
+ .format(bri=bri_tag)
+ )
+ default_input_val = "i"
+ else:
+ if possible_to_resolve:
+ prompt = "[a] Abort, [i] Ignore, [u] Update tracker (default 'u') --> "
+ default_input_val = "u"
+ else:
+ if merges_remaining > 1:
+ prompt = "[a] Abort, [i] Ignore --> "
+ default_input_val = "i"
+ else:
+ return False
+ input_val = input(prompt)
+ if input_val == '':
+ input_val = default_input_val
+ if input_val.lower() == "a":
+ exit(-1)
+ elif input_val.lower() == "i":
+ pass
+ else:
+ input_val = "u"
+ if input_val.lower() == "u":
+ if backport:
+ backport.resolve()
+ else:
+ logging.warn("Cannot determine which issue to resolve. Ignoring.")
+ if tag_merge_commits:
+ if backport:
+ tag_sha1(repo, backport.merge_commit_sha1)
+ else:
+ tag_sha1(repo, sha1)
+ return True
+
+def read_from_file(fs):
+ retval = None
+ full_path = os.path.expanduser(fs)
+ try:
+ with open(full_path, "r") as f:
+ retval = f.read().strip()
+ except FileNotFoundError:
+ pass
+ return retval
+
+def releases():
+ return ('argonaut', 'bobtail', 'cuttlefish', 'dumpling', 'emperor',
+ 'firefly', 'giant', 'hammer', 'infernalis', 'jewel', 'kraken',
+ 'luminous', 'mimic', 'nautilus', 'octopus', 'pacific', 'quincy')
+
+def report_params(a):
+ global dry_run
+ global no_browser
+ if a.dry_run:
+ dry_run = True
+ logging.warning("Dry run: nothing will be written to Redmine")
+ if a.no_browser:
+ no_browser = True
+ logging.warning("Web browser will not be used even if it is running")
+
+def set_logging_level(a):
+ if a.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+ return None
+
+def tag_sha1(repo, sha1):
+ global bri_tag
+ repo.git.tag('--delete', bri_tag)
+ repo.git.tag(bri_tag, sha1)
+
+def ver_to_release():
+ return {'v9.2': 'infernalis', 'v10.2': 'jewel', 'v11.2': 'kraken',
+ 'v12.2': 'luminous', 'v13.2': 'mimic', 'v14.2': 'nautilus',
+ 'v15.2': 'octopus', 'v16.0': 'pacific', 'v16.1': 'pacific',
+ 'v16.2': 'pacific', 'v17.0': 'quincy'}
+
+def usage():
+ logging.error("Redmine credentials are required to perform this operation. "
+ "Please provide either a Redmine key (via {}) "
+ "or a Redmine username and password (via --user and --password). "
+ "Optionally, one or more issue numbers can be given via positional "
+ "argument(s). In the absence of positional arguments, the script "
+ "will loop through all merge commits after the tag \"BRI-{release}\". "
+ "If there is no such tag in the local branch, one will be created "
+ "for you.".format(redmine_key_file)
+ )
+ exit(-1)
+
+
+class Backport:
+
+ def __init__(self, repo, merge_commit_string):
+ '''
+ The merge commit string should look something like this:
+ 27ff851953 Merge pull request #29678 from pdvian/wip-40948-nautilus
+ '''
+ global browser_cmd
+ global ceph_release
+ global github_token
+ global github_user
+ self.repo = repo
+ self.merge_commit_string = merge_commit_string
+ #
+ # split merge commit string on first space character
+ merge_commit_sha1_short, self.merge_commit_description = merge_commit_string.split(' ', 1)
+ #
+ # merge commit SHA1 from merge commit string
+ p = re.compile('\\S+')
+ self.merge_commit_sha1_short = p.match(merge_commit_sha1_short).group()
+ assert self.merge_commit_sha1_short == merge_commit_sha1_short, \
+ ("Failed to extract merge commit short SHA1 from merge commit string ->{}<-"
+ .format(merge_commit_string)
+ )
+ logging.debug("Short merge commit SHA1 is {}".format(self.merge_commit_sha1_short))
+ self.merge_commit_sha1 = self.repo.git.rev_list(
+ '--max-count=1',
+ self.merge_commit_sha1_short,
+ )
+ logging.debug("Full merge commit SHA1 is {}".format(self.merge_commit_sha1))
+ self.merge_commit_gd = repo.git.describe('--match', 'v*', self.merge_commit_sha1)
+ self.populate_base_version()
+ self.populate_target_version()
+ self.populate_github_url()
+ #
+ # GitHub PR description and merged status from GitHub
+ curl_opt = "--silent"
+ # if GitHub token was provided, use it to avoid throttling -
+ if github_token and github_user:
+ curl_opt = "-u {}:{} {}".format(github_user, github_token, curl_opt)
+ cmd = (
+ "curl {} https://api.github.com/repos/ceph/ceph/pulls/{}"
+ .format(curl_opt, self.github_pr_id)
+ )
+ logging.debug("Running curl command ->{}<-".format(cmd))
+ json_str = os.popen(cmd).read()
+ github_api_result = json.loads(json_str)
+ if "title" in github_api_result and "body" in github_api_result:
+ self.github_pr_title = github_api_result["title"]
+ self.github_pr_desc = github_api_result["body"]
+ else:
+ logging.error("GitHub API unexpectedly returned: {}".format(github_api_result))
+ logging.info("Curl command was: {}".format(cmd))
+ sys.exit(-1)
+ self.mogrify_github_pr_desc()
+ self.github_pr_merged = github_api_result["merged"]
+ if not no_browser:
+ if browser_running():
+ os.system("{} {}".format(browser_cmd, self.github_url))
+ pr_title_trunc = self.github_pr_title
+ if len(pr_title_trunc) > 60:
+ pr_title_trunc = pr_title_trunc[0:50] + "|TRUNCATED"
+ print('''\n\n=================================================================
+GitHub PR URL: {}
+GitHub PR title: {}
+Merge commit: {} ({})
+Merged: {}
+Ceph version: base {}, target {}'''
+ .format(self.github_url, pr_title_trunc, self.merge_commit_sha1,
+ self.merge_commit_gd, self.github_pr_merged, self.base_version,
+ self.target_version
+ )
+ )
+ if no_browser or not browser_running():
+ print('''----------------------- PR DESCRIPTION --------------------------
+{}
+-----------------------------------------------------------------'''.format(self.github_pr_desc))
+ assert self.github_pr_merged, "GitHub PR {} has not been merged!".format(self.github_pr_id)
+ #
+ # obtain backport tracker from GitHub PR description
+ self.extract_backport_trackers_from_github_pr_desc()
+ #
+ for bt in self.backport_trackers:
+ # does the Backport Tracker description link back to the GitHub PR?
+ p = re.compile('http.?://github.com/ceph/ceph/pull/\\d+')
+ bt.get_tracker_description()
+ try:
+ bt.github_url_from_tracker = p.search(bt.tracker_description).group()
+ except AttributeError:
+ pass
+ if bt.github_url_from_tracker:
+ p = re.compile('\\d+')
+ bt.github_id_from_tracker = p.search(bt.github_url_from_tracker).group()
+ logging.debug("GitHub PR from Tracker: URL is ->{}<- and ID is {}"
+ .format(bt.github_url_from_tracker, bt.github_id_from_tracker))
+ assert bt.github_id_from_tracker == self.github_pr_id, \
+ "GitHub PR ID {} does not match GitHub ID from tracker {}".format(
+ self.github_pr_id,
+ bt.github_id_from_tracker,
+ )
+ print_inner_divider()
+ if bt.github_url_from_tracker:
+ logging.info("Tracker {} links to PR {}".format(bt.issue_url(), self.github_url))
+ else:
+ logging.warning("Backport Tracker {} does not link to PR - will update"
+ .format(bt.issue_id))
+ #
+ # does the Backport Tracker's release field match the Ceph release?
+ tracker_release = get_issue_release(bt.redmine_issue)
+ assert ceph_release == tracker_release, \
+ (
+ "Backport Tracker {} is a {} backport - expected {}"
+ .format(bt.issue_id, tracker_release, ceph_release)
+ )
+ #
+ # is the Backport Tracker's "Target version" custom field populated?
+ try:
+ ttv = bt.get_tracker_target_version()
+ except:
+ logging.info("Backport Tracker {} target version not populated yet!"
+ .format(bt.issue_id))
+ bt.set_target_version = True
+ else:
+ bt.tracker_target_version = ttv
+ logging.info("Backport Tracker {} target version already populated "
+ "with correct value {}"
+ .format(bt.issue_id, bt.tracker_target_version))
+ bt.set_target_version = False
+ assert bt.tracker_target_version == self.target_version, \
+ (
+ "Tracker target version {} is wrong; should be {}"
+ .format(bt.tracker_target_version, self.target_version)
+ )
+ #
+ # is the Backport Tracker's status already set to Resolved?
+ resolved_id = status2status_id['Resolved']
+ if bt.redmine_issue.status.id == resolved_id:
+ logging.info("Backport Tracker {} status is already set to Resolved"
+ .format(bt.issue_id))
+ bt.set_tracker_status = False
+ else:
+ logging.info("Backport Tracker {} status is currently set to {}"
+ .format(bt.issue_id, bt.redmine_issue.status))
+ bt.set_tracker_status = True
+ print_outer_divider()
+
+ def populate_base_version(self):
+ self.base_version = ceph_version(self.repo, self.merge_commit_sha1)
+
+ def populate_target_version(self):
+ x, y, z = self.base_version.split('v')[1].split('.')
+ maybe_stable = "v{}.{}".format(x, y)
+ assert ver_to_release()[maybe_stable], \
+ "SHA1 {} is not based on any known stable release ({})".format(sha1, maybe_stable)
+ tv = "v{}.{}.{}".format(x, y, int(z) + 1)
+ if tv in version2version_id:
+ self.target_version = tv
+ else:
+ raise Exception("Version {} not found in Redmine".format(tv))
+
+ def mogrify_github_pr_desc(self):
+ if not self.github_pr_desc:
+ self.github_pr_desc = ''
+ p = re.compile('<!--.+-->', re.DOTALL)
+ new_str = p.sub('', self.github_pr_desc)
+ if new_str == self.github_pr_desc:
+ logging.debug("GitHub PR description not mogrified")
+ else:
+ self.github_pr_desc = new_str
+
+ def populate_github_url(self):
+ global github_endpoint
+ # GitHub PR ID from merge commit string
+ p = re.compile('(pull request|PR) #(\\d+)')
+ try:
+ self.github_pr_id = p.search(self.merge_commit_description).group(2)
+ except AttributeError:
+ assert False, \
+ (
+ "Failed to extract GitHub PR ID from merge commit string ->{}<-"
+ .format(self.merge_commit_string)
+ )
+ logging.debug("Merge commit string: {}".format(self.merge_commit_string))
+ logging.debug("GitHub PR ID from merge commit string: {}".format(self.github_pr_id))
+ self.github_url = "{}/pull/{}".format(github_endpoint, self.github_pr_id)
+
+ def extract_backport_trackers_from_github_pr_desc(self):
+ global redmine_endpoint
+ p = re.compile('http.?://tracker.ceph.com/issues/\\d+')
+ matching_strings = p.findall(self.github_pr_desc)
+ if not matching_strings:
+ print_outer_divider()
+ assert False, \
+ "GitHub PR description does not contain a Tracker URL"
+ self.backport_trackers = []
+ for issue_url in list(dict.fromkeys(matching_strings)):
+ p = re.compile('\\d+')
+ issue_id = p.search(issue_url).group()
+ if not issue_id:
+ print_outer_divider()
+ assert issue_id, \
+ "Failed to extract tracker ID from tracker URL {}".format(issue_url)
+ issue_url = "{}/issues/{}".format(redmine_endpoint, issue_id)
+ #
+ # we have a Tracker URL, but is it really a backport tracker?
+ backport_tracker_id = tracker2tracker_id['Backport']
+ redmine_issue = redmine.issue.get(issue_id)
+ if redmine_issue.tracker.id == backport_tracker_id:
+ self.backport_trackers.append(
+ BackportTracker(redmine_issue, issue_id, self)
+ )
+ print('''Found backport tracker: {}'''.format(issue_url))
+ if not self.backport_trackers:
+ print_outer_divider()
+ assert False, \
+ "No backport tracker found in PR description at {}".format(self.github_url)
+
+ def resolve(self):
+ for bt in self.backport_trackers:
+ bt.resolve()
+
+
+class BackportTracker(Backport):
+
+ def __init__(self, redmine_issue, issue_id, backport_obj):
+ self.redmine_issue = redmine_issue
+ self.issue_id = issue_id
+ self.parent = backport_obj
+ self.tracker_description = None
+ self.github_url_from_tracker = None
+
+ def get_tracker_description(self):
+ try:
+ self.tracker_description = self.redmine_issue.description
+ except ResourceAttrError:
+ self.tracker_description = ""
+
+ def get_tracker_target_version(self):
+ if self.redmine_issue.fixed_version:
+ logging.debug("Target version: ID {}, name {}"
+ .format(
+ self.redmine_issue.fixed_version.id,
+ self.redmine_issue.fixed_version.name
+ )
+ )
+ return self.redmine_issue.fixed_version.name
+ return None
+
+ def issue_url(self):
+ return "{}/issues/{}".format(redmine_endpoint, self.issue_id)
+
+ def resolve(self):
+ global delay_seconds
+ global dry_run
+ global redmine
+ kwargs = {}
+ if self.set_tracker_status:
+ kwargs['status_id'] = status2status_id['Resolved']
+ if self.set_target_version:
+ kwargs['fixed_version_id'] = version2version_id[self.parent.target_version]
+ if not self.github_url_from_tracker:
+ if self.tracker_description:
+ kwargs['description'] = "{}\n\n---\n\n{}".format(
+ self.parent.github_url,
+ self.tracker_description,
+ )
+ else:
+ kwargs['description'] = self.parent.github_url
+ kwargs['notes'] = (
+ "This update was made using the script \"backport-resolve-issue\".\n"
+ "backport PR {}\n"
+ "merge commit {} ({})\n".format(
+ self.parent.github_url,
+ self.parent.merge_commit_sha1,
+ self.parent.merge_commit_gd,
+ )
+ )
+ my_delay_seconds = delay_seconds
+ if dry_run:
+ logging.info("--dry-run was given: NOT updating Redmine")
+ my_delay_seconds = 0
+ else:
+ logging.debug("Updating tracker ID {}".format(self.issue_id))
+ redmine.issue.update(self.issue_id, **kwargs)
+ if not no_browser:
+ if browser_running():
+ os.system("{} {}".format(browser_cmd, self.issue_url()))
+ my_delay_seconds = 3
+ logging.debug(
+ "Delaying {} seconds to avoid seeming like a spammer"
+ .format(my_delay_seconds)
+ )
+ time.sleep(my_delay_seconds)
+
+
+if __name__ == '__main__':
+ args = parse_arguments()
+ set_logging_level(args)
+ logging.debug(args)
+ github_token = read_from_file(github_token_file)
+ if github_token:
+ logging.info("GitHub token was read from ->{}<-; using it".format(github_token_file))
+ github_user = derive_github_user_from_token(github_token)
+ if github_user:
+ logging.info(
+ "GitHub user ->{}<- was derived from the GitHub token".format(github_user)
+ )
+ report_params(args)
+ #
+ # set up Redmine variables
+ redmine = connect_to_redmine(args)
+ project = redmine.project.get(project_name)
+ ceph_project_id = project.id
+ logging.debug("Project {} has ID {}".format(project_name, ceph_project_id))
+ populate_status_dict(redmine)
+ pending_backport_status_id = status2status_id["Pending Backport"]
+ logging.debug(
+ "Pending Backport status has ID {}"
+ .format(pending_backport_status_id)
+ )
+ populate_tracker_dict(redmine)
+ populate_version_dict(redmine, ceph_project_id)
+ #
+ # construct github Repo object for the current directory
+ repo = Repo('.')
+ assert not repo.bare
+ populate_ceph_release(repo)
+ #
+ # if positional argument is an integer, assume it is a GitHub PR
+ if args.pr_or_commit:
+ pr_id = args.pr_or_commit[0]
+ try:
+ pr_id = int(pr_id)
+ logging.info("Examining PR#{}".format(pr_id))
+ tag_merge_commits = False
+ except ValueError:
+ logging.info("Starting from merge commit {}".format(args.pr_or_commit))
+ tag_merge_commits = True
+ else:
+ logging.info("Starting from BRI tag")
+ tag_merge_commits = True
+ #
+ # get list of merges
+ if tag_merge_commits:
+ ensure_bri_tag_exists(repo, ceph_release)
+ c_r = commit_range(args)
+ logging.info("Commit range is {}".format(c_r))
+ #
+ # get the list of merge commits, i.e. strings that looks like:
+ # "27ff851953 Merge pull request #29678 from pdvian/wip-40948-nautilus"
+ merges_raw_str = repo.git.log(c_r, '--merges', '--oneline', '--no-decorate', '--reverse')
+ else:
+ pr_id = args.pr_or_commit[0]
+ merges_raw_str = repo.git.log(
+ '--merges',
+ '--grep=#{}'.format(pr_id),
+ '--oneline',
+ '--no-decorate',
+ '--reverse',
+ )
+ if merges_raw_str:
+ merges_raw_list = merges_raw_str.split('\n')
+ else:
+ merges_raw_list = [] # prevent ['']
+ merges_remaining = len(merges_raw_list)
+ logging.info("I see {} merge(s) to process".format(merges_remaining))
+ if not merges_remaining:
+ logging.info("Did you do \"git pull\" before running the script?")
+ if not tag_merge_commits:
+ logging.info("Or maybe GitHub PR {} has not been merged yet?".format(pr_id))
+ #
+ # loop over the merge commits
+ for merge in merges_raw_list:
+ can_go_on = process_merge(repo, merge, merges_remaining)
+ if can_go_on:
+ merges_remaining -= 1
+ print("Merges remaining to process: {}".format(merges_remaining))
+ else:
+ break
diff --git a/src/script/bdev_grep.pl b/src/script/bdev_grep.pl
new file mode 100755
index 000000000..a343aad45
--- /dev/null
+++ b/src/script/bdev_grep.pl
@@ -0,0 +1,19 @@
+#!/usr/bin/perl
+
+my $offset = shift @ARGV;
+
+while (<>) {
+ # next unless / \d\d bdev /;
+ my $rest = $_;
+ my @hit;
+ while ($rest =~ /([\da-f]+)[~\+]([\da-f]+)/) {
+ my ($o, $l) = $rest =~ /([\da-f]+)[~\+]([\da-f]+)/;
+ $rest = $';
+ if (hex($offset) >= hex($o) &&
+ hex($offset) < hex($o) + hex($l)) {
+ my $rel = hex($offset) - hex($o);
+ push(@hit, sprintf("%x",$rel));
+ }
+ }
+ print join(',',@hit) . "\t$_" if @hit;
+}
diff --git a/src/script/build-integration-branch b/src/script/build-integration-branch
new file mode 100755
index 000000000..b4f2a6121
--- /dev/null
+++ b/src/script/build-integration-branch
@@ -0,0 +1,112 @@
+#!/usr/bin/env python3
+
+"""
+Builds integration branches. Something similar to
+ $ git checkout -b branch-name
+ $ for b in $(get-branches-from-github) ; do
+ > git pull b
+ > done
+
+Requires `~/.github_token`.
+
+
+Usage:
+ build-integration-branch <label> [--no-date]
+ build-integration-branch -h | --help
+
+Options:
+ -h --help Show this screen.
+ --no-date Don't add `{postfix}` to the branch name.
+"""
+
+import json
+import os
+import requests
+import sys
+import time
+
+from subprocess import call, check_output
+from urllib.parse import urljoin
+
+TIME_FORMAT = '%Y-%m-%d-%H%M'
+postfix = "-" + time.strftime(TIME_FORMAT, time.localtime())
+
+current_branch = check_output('git rev-parse --abbrev-ref HEAD',
+ shell=True).strip().decode()
+if current_branch in 'mimic nautilus octopus pacific quincy'.split():
+ postfix += '-' + current_branch
+ print(f"Adding current branch name '-{current_branch}' as a postfix")
+
+repo = "ceph/ceph"
+
+try:
+ from docopt import docopt
+ arguments = docopt(__doc__.format(postfix=postfix))
+ label = arguments['<label>']
+ branch = label
+ if not arguments['--no-date']:
+ branch += postfix
+except ImportError:
+ # Fallback without docopt.
+ label = sys.argv[1]
+ assert len(sys.argv) == 2
+ branch = label + postfix
+
+
+with open(os.path.expanduser('~/.github_token')) as myfile:
+ token = myfile.readline().strip()
+
+# get prs
+baseurl = urljoin('https://api.github.com',
+ ('repos/{repo}/issues?labels={label}'
+ '&sort=created'
+ '&direction=asc'))
+url = baseurl.format(label=label,
+ repo=repo)
+r = requests.get(url,
+ headers={'Authorization': 'token %s' % token})
+assert(r.ok)
+j = json.loads(r.text or r.content)
+print("--- found %d issues tagged with %s" % (len(j), label))
+
+prs = []
+prtext = []
+for issue in j:
+ if 'pull_request' not in issue:
+ continue
+ r = requests.get(issue['pull_request']['url'],
+ headers={'Authorization': 'token %s' % token})
+ pr = json.loads(r.text or r.content)
+ prs.append(pr)
+ prtext.append(pr['html_url'] + ' - ' + pr['title'])
+print("--- queried %s prs" % len(prs))
+
+print("branch %s" % branch)
+
+# assemble
+print('--- creating branch %s' % branch)
+r = call(['git', 'branch', '-D', branch])
+r = call(['git', 'checkout', '-b', branch])
+assert not r
+for pr in prs:
+ pr_number = pr['number']
+ pr_url = pr['head']['repo']['clone_url']
+ pr_ref = pr['head']['ref']
+ print(f'--- pr {pr_number} --- pulling {pr_url} branch {pr_ref}')
+ while True:
+ r = call(['git', 'pull', '--no-ff', '--no-edit', pr_url, pr_ref])
+ if r == 0:
+ break
+ elif r == 1:
+ print(f'Unable to access {pr_url}, retrying..')
+ elif r == 128:
+ message = f'Unable to resolve conflict when merging PR#{pr_number}'
+ raise Exception(message)
+ else:
+ message = ('Exiting due to an unknown failure when pulling '
+ f'PR#{pr_number}')
+ raise Exception(message)
+
+print('--- done. these PRs were included:')
+print('\n'.join(prtext).encode('ascii', errors='ignore').decode())
+print('--- perhaps you want to: ./run-make-check.sh && git push ci %s' % branch)
diff --git a/src/script/ceph-backport.sh b/src/script/ceph-backport.sh
new file mode 100755
index 000000000..ae39c40e8
--- /dev/null
+++ b/src/script/ceph-backport.sh
@@ -0,0 +1,1818 @@
+#!/usr/bin/env bash
+set -e
+#
+# ceph-backport.sh - Ceph backporting script
+#
+# Credits: This script is based on work done by Loic Dachary
+#
+#
+# This script automates the process of staging a backport starting from a
+# Backport tracker issue.
+#
+# Setup:
+#
+# ceph-backport.sh --setup
+#
+# Usage and troubleshooting:
+#
+# ceph-backport.sh --help
+# ceph-backport.sh --usage | less
+# ceph-backport.sh --troubleshooting | less
+#
+
+full_path="$0"
+
+SCRIPT_VERSION="16.0.0.6848"
+active_milestones=""
+backport_pr_labels=""
+backport_pr_number=""
+backport_pr_title=""
+backport_pr_url=""
+deprecated_backport_common="$HOME/bin/backport_common.sh"
+existing_pr_milestone_number=""
+github_token=""
+github_token_file="$HOME/.github_token"
+github_user=""
+milestone=""
+non_interactive=""
+original_issue=""
+original_issue_url=""
+original_pr=""
+original_pr_url=""
+redmine_key=""
+redmine_key_file="$HOME/.redmine_key"
+redmine_login=""
+redmine_user_id=""
+setup_ok=""
+this_script=$(basename "$full_path")
+
+if [[ $* == *--debug* ]]; then
+ set -x
+fi
+
+# associative array keyed on "component" strings from PR titles, mapping them to
+# GitHub PR labels that make sense in backports
+declare -A comp_hash=(
+["auth"]="core"
+["bluestore"]="bluestore"
+["build/ops"]="build/ops"
+["ceph.spec"]="build/ops"
+["ceph-volume"]="ceph-volume"
+["cephadm"]="cephadm"
+["cephfs"]="cephfs"
+["cmake"]="build/ops"
+["config"]="config"
+["client"]="cephfs"
+["common"]="common"
+["core"]="core"
+["dashboard"]="dashboard"
+["deb"]="build/ops"
+["doc"]="documentation"
+["grafana"]="monitoring"
+["mds"]="cephfs"
+["messenger"]="core"
+["mon"]="core"
+["msg"]="core"
+["mgr/cephadm"]="cephadm"
+["mgr/dashboard"]="dashboard"
+["mgr/prometheus"]="monitoring"
+["mgr"]="core"
+["monitoring"]="monitoring"
+["orch"]="orchestrator"
+["osd"]="core"
+["perf"]="performance"
+["prometheus"]="monitoring"
+["pybind"]="pybind"
+["py3"]="python3"
+["python3"]="python3"
+["qa"]="tests"
+["rbd"]="rbd"
+["rgw"]="rgw"
+["rpm"]="build/ops"
+["tests"]="tests"
+["tool"]="tools"
+)
+
+declare -A flagged_pr_hash=()
+
+function abort_due_to_setup_problem {
+ error "problem detected in your setup"
+ info "Run \"${this_script} --setup\" to fix"
+ false
+}
+
+function assert_fail {
+ local message="$1"
+ error "(internal error) $message"
+ info "This could be reported as a bug!"
+ false
+}
+
+function backport_pr_needs_label {
+ local check_label="$1"
+ local label
+ local needs_label="yes"
+ while read -r label ; do
+ if [ "$label" = "$check_label" ] ; then
+ needs_label=""
+ fi
+ done <<< "$backport_pr_labels"
+ echo "$needs_label"
+}
+
+function backport_pr_needs_milestone {
+ if [ "$existing_pr_milestone_number" ] ; then
+ echo ""
+ else
+ echo "yes"
+ fi
+}
+
+function bail_out_github_api {
+ local api_said="$1"
+ local hint="$2"
+ info "GitHub API said:"
+ log bare "$api_said"
+ if [ "$hint" ] ; then
+ info "(hint) $hint"
+ fi
+ abort_due_to_setup_problem
+}
+
+function blindly_set_pr_metadata {
+ local pr_number="$1"
+ local json_blob="$2"
+ curl -u ${github_user}:${github_token} --silent --data-binary "$json_blob" "https://api.github.com/repos/ceph/ceph/issues/${pr_number}" >/dev/null 2>&1 || true
+}
+
+function check_milestones {
+ local milestones_to_check
+ milestones_to_check="$(echo "$1" | tr '\n' ' ' | xargs)"
+ info "Active milestones: $milestones_to_check"
+ for m in $milestones_to_check ; do
+ info "Examining all PRs targeting base branch \"$m\""
+ vet_prs_for_milestone "$m"
+ done
+ dump_flagged_prs
+}
+
+function check_tracker_status {
+ local -a ok_statuses=("new" "need more info")
+ local ts="$1"
+ local error_msg
+ local tslc="${ts,,}"
+ local tslc_is_ok=
+ for oks in "${ok_statuses[@]}"; do
+ if [ "$tslc" = "$oks" ] ; then
+ debug "Tracker status $ts is OK for backport to proceed"
+ tslc_is_ok="yes"
+ break
+ fi
+ done
+ if [ "$tslc_is_ok" ] ; then
+ true
+ else
+ if [ "$tslc" = "in progress" ] ; then
+ error_msg="backport $redmine_url is already in progress"
+ else
+ error_msg="backport $redmine_url is closed (status: ${ts})"
+ fi
+ if [ "$FORCE" ] || [ "$EXISTING_PR" ] ; then
+ warning "$error_msg"
+ else
+ error "$error_msg"
+ fi
+ fi
+ echo "$tslc_is_ok"
+}
+
+function cherry_pick_phase {
+ local base_branch
+ local default_val
+ local i
+ local merged
+ local number_of_commits
+ local offset
+ local sha1_to_cherry_pick
+ local singular_or_plural_commit
+ local yes_or_no_answer
+ populate_original_issue
+ if [ -z "$original_issue" ] ; then
+ error "Could not find original issue"
+ info "Does ${redmine_url} have a \"Copied from\" relation?"
+ false
+ fi
+ info "Parent issue: ${original_issue_url}"
+
+ populate_original_pr
+ if [ -z "$original_pr" ]; then
+ error "Could not find original PR"
+ info "Is the \"Pull request ID\" field of ${original_issue_url} populated?"
+ false
+ fi
+ info "Parent issue ostensibly fixed by: ${original_pr_url}"
+
+ verbose "Examining ${original_pr_url}"
+ remote_api_output=$(curl -u ${github_user}:${github_token} --silent "https://api.github.com/repos/ceph/ceph/pulls/${original_pr}")
+ base_branch=$(echo "${remote_api_output}" | jq -r '.base.label')
+ if [ "$base_branch" = "ceph:master" -o "$base_branch" = "ceph:main" ] ; then
+ true
+ else
+ if [ "$FORCE" ] ; then
+ warning "base_branch ->$base_branch<- is something other than \"ceph:master\" or \"ceph:main\""
+ info "--force was given, so continuing anyway"
+ else
+ error "${original_pr_url} is targeting ${base_branch}: cowardly refusing to perform automated cherry-pick"
+ info "Out of an abundance of caution, the script only automates cherry-picking of commits from PRs targeting \"ceph:master\" or \"ceph:main\"."
+ info "You can still use the script to stage the backport, though. Just prepare the local branch \"${local_branch}\" manually and re-run the script."
+ false
+ fi
+ fi
+ merged=$(echo "${remote_api_output}" | jq -r '.merged')
+ if [ "$merged" = "true" ] ; then
+ true
+ else
+ error "${original_pr_url} is not merged yet"
+ info "Cowardly refusing to perform automated cherry-pick"
+ false
+ fi
+ number_of_commits=$(echo "${remote_api_output}" | jq '.commits')
+ if [ "$number_of_commits" -eq "$number_of_commits" ] 2>/dev/null ; then
+ # \$number_of_commits is set, and is an integer
+ if [ "$number_of_commits" -eq "1" ] ; then
+ singular_or_plural_commit="commit"
+ else
+ singular_or_plural_commit="commits"
+ fi
+ else
+ error "Could not determine the number of commits in ${original_pr_url}"
+ bail_out_github_api "$remote_api_output"
+ fi
+ info "Found $number_of_commits $singular_or_plural_commit in $original_pr_url"
+
+ set -x
+ git fetch "$upstream_remote"
+
+ if git show-ref --verify --quiet "refs/heads/$local_branch" ; then
+ if [ "$FORCE" ] ; then
+ if [ "$non_interactive" ] ; then
+ git checkout "$local_branch"
+ git reset --hard "${upstream_remote}/${milestone}"
+ else
+ echo
+ echo "A local branch $local_branch already exists and the --force option was given."
+ echo "If you continue, any local changes in $local_branch will be lost!"
+ echo
+ default_val="y"
+ echo -n "Do you really want to overwrite ${local_branch}? (default: ${default_val}) "
+ yes_or_no_answer="$(get_user_input "$default_val")"
+ [ "$yes_or_no_answer" ] && yes_or_no_answer="${yes_or_no_answer:0:1}"
+ if [ "$yes_or_no_answer" = "y" ] ; then
+ git checkout "$local_branch"
+ git reset --hard "${upstream_remote}/${milestone}"
+ else
+ info "OK, bailing out!"
+ false
+ fi
+ fi
+ else
+ set +x
+ maybe_restore_set_x
+ error "Cannot initialize $local_branch - local branch already exists"
+ false
+ fi
+ else
+ git checkout "${upstream_remote}/${milestone}" -b "$local_branch"
+ fi
+
+ git fetch "$upstream_remote" "pull/$original_pr/head:pr-$original_pr"
+
+ set +x
+ maybe_restore_set_x
+ info "Attempting to cherry pick $number_of_commits commits from ${original_pr_url} into local branch $local_branch"
+ offset="$((number_of_commits - 1))" || true
+ for ((i=offset; i>=0; i--)) ; do
+ info "Running \"git cherry-pick -x\" on $(git log --oneline --max-count=1 --no-decorate "pr-${original_pr}~${i}")"
+ sha1_to_cherry_pick=$(git rev-parse --verify "pr-${original_pr}~${i}")
+ set -x
+ if git cherry-pick -x "$sha1_to_cherry_pick" ; then
+ set +x
+ maybe_restore_set_x
+ else
+ set +x
+ maybe_restore_set_x
+ [ "$VERBOSE" ] && git status
+ error "Cherry pick failed"
+ info "Next, manually fix conflicts and complete the current cherry-pick"
+ if [ "$i" -gt "0" ] >/dev/null 2>&1 ; then
+ info "Then, cherry-pick the remaining commits from ${original_pr_url}, i.e.:"
+ for ((j=i-1; j>=0; j--)) ; do
+ info "-> missing commit: $(git log --oneline --max-count=1 --no-decorate "pr-${original_pr}~${j}")"
+ done
+ info "Finally, re-run the script"
+ else
+ info "Then re-run the script"
+ fi
+ false
+ fi
+ done
+ info "Cherry picking completed without conflicts"
+}
+
+function clear_line {
+ log overwrite " \r"
+}
+
+function clip_pr_body {
+ local pr_body="$*"
+ local clipped=""
+ local last_line_was_blank=""
+ local line=""
+ local pr_json_tempfile=$(mktemp)
+ echo "$pr_body" | sed -n '/<!--.*/q;p' > "$pr_json_tempfile"
+ while IFS= read -r line; do
+ if [ "$(trim_whitespace "$line")" ] ; then
+ last_line_was_blank=""
+ clipped="${clipped}${line}\n"
+ else
+ if [ "$last_line_was_blank" ] ; then
+ true
+ else
+ clipped="${clipped}\n"
+ fi
+ fi
+ done < "$pr_json_tempfile"
+ rm "$pr_json_tempfile"
+ echo "$clipped"
+}
+
+function debug {
+ log debug "$@"
+}
+
+function display_version_message_and_exit {
+ echo "$this_script: Ceph backporting script, version $SCRIPT_VERSION"
+ exit 0
+}
+
+function dump_flagged_prs {
+ local url=
+ clear_line
+ if [ "${#flagged_pr_hash[@]}" -eq "0" ] ; then
+ info "All backport PRs appear to have milestone set correctly"
+ else
+ warning "Some backport PRs had problematic milestone settings"
+ log bare "==========="
+ log bare "Flagged PRs"
+ log bare "-----------"
+ for url in "${!flagged_pr_hash[@]}" ; do
+ log bare "$url - ${flagged_pr_hash[$url]}"
+ done
+ log bare "==========="
+ fi
+}
+
+function eol {
+ local mtt="$1"
+ error "$mtt is EOL"
+ false
+}
+
+function error {
+ log error "$@"
+}
+
+function existing_pr_routine {
+ local base_branch
+ local clipped_pr_body
+ local new_pr_body
+ local new_pr_title
+ local pr_body
+ local pr_json_tempfile
+ local remote_api_output
+ local update_pr_body
+ remote_api_output="$(curl -u ${github_user}:${github_token} --silent "https://api.github.com/repos/ceph/ceph/pulls/${backport_pr_number}")"
+ backport_pr_title="$(echo "$remote_api_output" | jq -r '.title')"
+ if [ "$backport_pr_title" = "null" ] ; then
+ error "could not get PR title of existing PR ${backport_pr_number}"
+ bail_out_github_api "$remote_api_output"
+ fi
+ existing_pr_milestone_number="$(echo "$remote_api_output" | jq -r '.milestone.number')"
+ if [ "$existing_pr_milestone_number" = "null" ] ; then
+ existing_pr_milestone_number=""
+ fi
+ backport_pr_labels="$(echo "$remote_api_output" | jq -r '.labels[].name')"
+ pr_body="$(echo "$remote_api_output" | jq -r '.body')"
+ if [ "$pr_body" = "null" ] ; then
+ error "could not get PR body of existing PR ${backport_pr_number}"
+ bail_out_github_api "$remote_api_output"
+ fi
+ base_branch=$(echo "${remote_api_output}" | jq -r '.base.label')
+ base_branch="${base_branch#ceph:}"
+ if [ -z "$(is_active_milestone "$base_branch")" ] ; then
+ error "existing PR $backport_pr_url is targeting $base_branch which is not an active milestone"
+ info "Cowardly refusing to work on a backport to $base_branch"
+ false
+ fi
+ clipped_pr_body="$(clip_pr_body "$pr_body")"
+ verbose_en "Clipped body of existing PR ${backport_pr_number}:\n${clipped_pr_body}"
+ if [[ "$backport_pr_title" =~ ^${milestone}: ]] ; then
+ verbose "Existing backport PR ${backport_pr_number} title has ${milestone} prepended"
+ else
+ warning "Existing backport PR ${backport_pr_number} title does NOT have ${milestone} prepended"
+ new_pr_title="${milestone}: $backport_pr_title"
+ if [[ "$new_pr_title" =~ \" ]] ; then
+ new_pr_title="${new_pr_title//\"/\\\"}"
+ fi
+ verbose "New PR title: ${new_pr_title}"
+ fi
+ redmine_url_without_scheme="${redmine_url//http?:\/\//}"
+ verbose "Redmine URL without scheme: $redmine_url_without_scheme"
+ if [[ "$clipped_pr_body" =~ $redmine_url_without_scheme ]] ; then
+ info "Existing backport PR ${backport_pr_number} already mentions $redmine_url"
+ if [ "$FORCE" ] ; then
+ warning "--force was given, so updating the PR body anyway"
+ update_pr_body="yes"
+ fi
+ else
+ warning "Existing backport PR ${backport_pr_number} does NOT mention $redmine_url - adding it"
+ update_pr_body="yes"
+ fi
+ if [ "$update_pr_body" ] ; then
+ new_pr_body="backport tracker: ${redmine_url}"
+ if [ "${original_pr_url}" ] ; then
+ new_pr_body="${new_pr_body}
+possibly a backport of ${original_pr_url}"
+ fi
+ if [ "${original_issue_url}" ] ; then
+ new_pr_body="${new_pr_body}
+parent tracker: ${original_issue_url}"
+ fi
+ new_pr_body="${new_pr_body}
+
+---
+
+original PR body:
+
+$clipped_pr_body
+
+---
+
+updated using ceph-backport.sh version ${SCRIPT_VERSION}"
+ fi
+ maybe_update_pr_title_body "${new_pr_title}" "${new_pr_body}"
+}
+
+function failed_mandatory_var_check {
+ local varname="$1"
+ local error="$2"
+ verbose "$varname $error"
+ setup_ok=""
+}
+
+function flag_pr {
+ local pr_num="$1"
+ local pr_url="$2"
+ local flag_reason="$3"
+ warning "flagging PR#${pr_num} because $flag_reason"
+ flagged_pr_hash["${pr_url}"]="$flag_reason"
+}
+
+function from_file {
+ local what="$1"
+ xargs 2>/dev/null < "$HOME/.${what}" || true
+}
+
+function get_user_input {
+ local default_val="$1"
+ local user_input=
+ read -r user_input
+ if [ "$user_input" ] ; then
+ echo "$user_input"
+ else
+ echo "$default_val"
+ fi
+}
+
+# takes a string and a substring - returns position of substring within string,
+# or -1 if not found
+# NOTE: position of first character in string is 0
+function grep_for_substr {
+ local str="$1"
+ local look_for_in_str="$2"
+ str="${str,,}"
+ munged="${str%%${look_for_in_str}*}"
+ if [ "$munged" = "$str" ] ; then
+ echo "-1"
+ else
+ echo "${#munged}"
+ fi
+}
+
+# takes PR title, attempts to guess component
+function guess_component {
+ local comp=
+ local pos="0"
+ local pr_title="$1"
+ local winning_comp=
+ local winning_comp_pos="9999"
+ for comp in "${!comp_hash[@]}" ; do
+ pos=$(grep_for_substr "$pr_title" "$comp")
+ # echo "$comp: $pos"
+ [ "$pos" = "-1" ] && continue
+ if [ "$pos" -lt "$winning_comp_pos" ] ; then
+ winning_comp_pos="$pos"
+ winning_comp="$comp"
+ fi
+ done
+ [ "$winning_comp" ] && echo "${comp_hash["$winning_comp"]}" || echo ""
+}
+
+function info {
+ log info "$@"
+}
+
+function init_endpoints {
+ verbose "Initializing remote API endpoints"
+ redmine_endpoint="${redmine_endpoint:-"https://tracker.ceph.com"}"
+ github_endpoint="${github_endpoint:-"https://github.com/ceph/ceph"}"
+}
+
+function init_fork_remote {
+ [ "$github_user" ] || assert_fail "github_user not set"
+ [ "$EXPLICIT_FORK" ] && info "Using explicit fork ->$EXPLICIT_FORK<- instead of personal fork."
+ fork_remote="${fork_remote:-$(maybe_deduce_remote fork)}"
+}
+
+function init_github_token {
+ github_token="$(from_file github_token)"
+ if [ "$github_token" ] ; then
+ true
+ else
+ warning "$github_token_file not populated: initiating interactive setup routine"
+ INTERACTIVE_SETUP_ROUTINE="yes"
+ fi
+}
+
+function init_redmine_key {
+ redmine_key="$(from_file redmine_key)"
+ if [ "$redmine_key" ] ; then
+ true
+ else
+ warning "$redmine_key_file not populated: initiating interactive setup routine"
+ INTERACTIVE_SETUP_ROUTINE="yes"
+ fi
+}
+
+function init_upstream_remote {
+ upstream_remote="${upstream_remote:-$(maybe_deduce_remote upstream)}"
+}
+
+function interactive_setup_routine {
+ local default_val
+ local original_github_token
+ local original_redmine_key
+ local total_steps
+ local yes_or_no_answer
+ original_github_token="$github_token"
+ original_redmine_key="$redmine_key"
+ total_steps="4"
+ if [ -e "$deprecated_backport_common" ] ; then
+ github_token=""
+ redmine_key=""
+ # shellcheck disable=SC1090
+ source "$deprecated_backport_common" 2>/dev/null || true
+ total_steps="$((total_steps+1))"
+ fi
+ echo
+ echo "Welcome to the ${this_script} interactive setup routine!"
+ echo
+ echo "---------------------------------------------------------------------"
+ echo "Setup step 1 of $total_steps - GitHub token"
+ echo "---------------------------------------------------------------------"
+ echo "For information on how to generate a GitHub personal access token"
+ echo "to use with this script, go to https://github.com/settings/tokens"
+ echo "then click on \"Generate new token\" and make sure the token has"
+ echo "\"Full control of private repositories\" scope."
+ echo
+ echo "For more details, see:"
+ echo "https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line"
+ echo
+ echo -n "What is your GitHub token? "
+ default_val="$github_token"
+ [ "$github_token" ] && echo "(default: ${default_val})"
+ github_token="$(get_user_input "$default_val")"
+ if [ "$github_token" ] ; then
+ true
+ else
+ error "You must provide a valid GitHub personal access token"
+ abort_due_to_setup_problem
+ fi
+ [ "$github_token" ] || assert_fail "github_token not set, even after completing Step 1 of interactive setup"
+ echo
+ echo "---------------------------------------------------------------------"
+ echo "Setup step 2 of $total_steps - GitHub user"
+ echo "---------------------------------------------------------------------"
+ echo "The script will now attempt to determine your GitHub user (login)"
+ echo "from the GitHub token provided in the previous step. If this is"
+ echo "successful, there is a good chance that your GitHub token is OK."
+ echo
+ echo "Communicating with the GitHub API..."
+ set_github_user_from_github_token
+ [ "$github_user" ] || abort_due_to_setup_problem
+ echo
+ echo -n "Is the GitHub username (login) \"$github_user\" correct? "
+ default_val="y"
+ [ "$github_token" ] && echo "(default: ${default_val})"
+ yes_or_no_answer="$(get_user_input "$default_val")"
+ [ "$yes_or_no_answer" ] && yes_or_no_answer="${yes_or_no_answer:0:1}"
+ if [ "$yes_or_no_answer" = "y" ] ; then
+ if [ "$github_token" = "$original_github_token" ] ; then
+ true
+ else
+ debug "GitHub personal access token changed"
+ echo "$github_token" > "$github_token_file"
+ chmod 0600 "$github_token_file"
+ info "Wrote GitHub personal access token to $github_token_file"
+ fi
+ else
+ error "GitHub user does not look right"
+ abort_due_to_setup_problem
+ fi
+ [ "$github_token" ] || assert_fail "github_token not set, even after completing Steps 1 and 2 of interactive setup"
+ [ "$github_user" ] || assert_fail "github_user not set, even after completing Steps 1 and 2 of interactive setup"
+ echo
+ echo "---------------------------------------------------------------------"
+ echo "Setup step 3 of $total_steps - remote repos"
+ echo "---------------------------------------------------------------------"
+ echo "Searching \"git remote -v\" for remote repos"
+ echo
+ init_upstream_remote
+ init_fork_remote
+ vet_remotes
+ echo "Upstream remote is \"$upstream_remote\""
+ echo "Fork remote is \"$fork_remote\""
+ [ "$setup_ok" ] || abort_due_to_setup_problem
+ [ "$github_token" ] || assert_fail "github_token not set, even after completing Steps 1-3 of interactive setup"
+ [ "$github_user" ] || assert_fail "github_user not set, even after completing Steps 1-3 of interactive setup"
+ [ "$upstream_remote" ] || assert_fail "upstream_remote not set, even after completing Steps 1-3 of interactive setup"
+ [ "$fork_remote" ] || assert_fail "fork_remote not set, even after completing Steps 1-3 of interactive setup"
+ echo
+ echo "---------------------------------------------------------------------"
+ echo "Setup step 4 of $total_steps - Redmine key"
+ echo "---------------------------------------------------------------------"
+ echo "To generate a Redmine API access key, go to https://tracker.ceph.com"
+ echo "After signing in, click: \"My account\""
+ echo "Now, find \"API access key\"."
+ echo "Once you know the API access key, enter it below."
+ echo
+ echo -n "What is your Redmine key? "
+ default_val="$redmine_key"
+ [ "$redmine_key" ] && echo "(default: ${default_val})"
+ redmine_key="$(get_user_input "$default_val")"
+ if [ "$redmine_key" ] ; then
+ set_redmine_user_from_redmine_key
+ if [ "$setup_ok" ] ; then
+ true
+ else
+ info "You must provide a valid Redmine API access key"
+ abort_due_to_setup_problem
+ fi
+ if [ "$redmine_key" = "$original_redmine_key" ] ; then
+ true
+ else
+ debug "Redmine API access key changed"
+ echo "$redmine_key" > "$redmine_key_file"
+ chmod 0600 "$redmine_key_file"
+ info "Wrote Redmine API access key to $redmine_key_file"
+ fi
+ else
+ error "You must provide a valid Redmine API access key"
+ abort_due_to_setup_problem
+ fi
+ [ "$github_token" ] || assert_fail "github_token not set, even after completing Steps 1-4 of interactive setup"
+ [ "$github_user" ] || assert_fail "github_user not set, even after completing Steps 1-4 of interactive setup"
+ [ "$upstream_remote" ] || assert_fail "upstream_remote not set, even after completing Steps 1-4 of interactive setup"
+ [ "$fork_remote" ] || assert_fail "fork_remote not set, even after completing Steps 1-4 of interactive setup"
+ [ "$redmine_key" ] || assert_fail "redmine_key not set, even after completing Steps 1-4 of interactive setup"
+ [ "$redmine_user_id" ] || assert_fail "redmine_user_id not set, even after completing Steps 1-4 of interactive setup"
+ [ "$redmine_login" ] || assert_fail "redmine_login not set, even after completing Steps 1-4 of interactive setup"
+ if [ "$total_steps" -gt "4" ] ; then
+ echo
+ echo "---------------------------------------------------------------------"
+ echo "Step 5 of $total_steps - delete deprecated $deprecated_backport_common file"
+ echo "---------------------------------------------------------------------"
+ fi
+ maybe_delete_deprecated_backport_common
+ vet_setup --interactive
+}
+
+function is_active_milestone {
+ local is_active=
+ local milestone_under_test="$1"
+ for m in $active_milestones ; do
+ if [ "$milestone_under_test" = "$m" ] ; then
+ verbose "Milestone $m is active"
+ is_active="yes"
+ break
+ fi
+ done
+ echo "$is_active"
+}
+
+function log {
+ local level="$1"
+ local trailing_newline="yes"
+ local in_hex=""
+ shift
+ local msg="$*"
+ prefix="${this_script}: "
+ verbose_only=
+ case $level in
+ bare)
+ prefix=
+ ;;
+ debug)
+ prefix="${prefix}DEBUG: "
+ verbose_only="yes"
+ ;;
+ err*)
+ prefix="${prefix}ERROR: "
+ ;;
+ hex)
+ in_hex="yes"
+ ;;
+ info)
+ :
+ ;;
+ overwrite)
+ trailing_newline=
+ prefix=
+ ;;
+ verbose)
+ verbose_only="yes"
+ ;;
+ verbose_en)
+ verbose_only="yes"
+ trailing_newline=
+ ;;
+ warn|warning)
+ prefix="${prefix}WARNING: "
+ ;;
+ esac
+ if [ "$in_hex" ] ; then
+ print_in_hex "$msg"
+ elif [ "$verbose_only" ] && [ -z "$VERBOSE" ] ; then
+ true
+ else
+ msg="${prefix}${msg}"
+ if [ "$trailing_newline" ] ; then
+ echo "${msg}" >&2
+ else
+ echo -en "${msg}" >&2
+ fi
+ fi
+}
+
+function maybe_deduce_remote {
+ local remote_type="$1"
+ local remote=""
+ local url_component=""
+ if [ "$remote_type" = "upstream" ] ; then
+ url_component="ceph"
+ elif [ "$remote_type" = "fork" ] ; then
+ if [ "$EXPLICIT_FORK" ] ; then
+ url_component="$EXPLICIT_FORK"
+ else
+ url_component="$github_user"
+ fi
+ else
+ assert_fail "bad remote_type ->$remote_type<- in maybe_deduce_remote"
+ fi
+ remote=$(git remote -v | grep --extended-regexp --ignore-case '(://|@)github.com(/|:|:/)'${url_component}'/ceph(\s|\.|\/)' | head -n1 | cut -f 1)
+ echo "$remote"
+}
+
+function maybe_delete_deprecated_backport_common {
+ local default_val
+ local user_inp
+ if [ -e "$deprecated_backport_common" ] ; then
+ echo "You still have a $deprecated_backport_common file,"
+ echo "which was used to store configuration parameters in version"
+ echo "15.0.0.6270 and earlier versions of ${this_script}."
+ echo
+ echo "Since $deprecated_backport_common has been deprecated in favor"
+ echo "of the interactive setup routine, which has been completed"
+ echo "successfully, the file should be deleted now."
+ echo
+ echo -n "Delete it now? (default: y) "
+ default_val="y"
+ user_inp="$(get_user_input "$default_val")"
+ user_inp="$(echo "$user_inp" | tr '[:upper:]' '[:lower:]' | xargs)"
+ if [ "$user_inp" ] ; then
+ user_inp="${user_inp:0:1}"
+ if [ "$user_inp" = "y" ] ; then
+ set -x
+ rm -f "$deprecated_backport_common"
+ set +x
+ maybe_restore_set_x
+ fi
+ fi
+ if [ -e "$deprecated_backport_common" ] ; then
+ error "$deprecated_backport_common still exists. Bailing out!"
+ false
+ fi
+ fi
+}
+
+function maybe_restore_set_x {
+ if [ "$DEBUG" ] ; then
+ set -x
+ fi
+}
+
+function maybe_update_pr_milestone_labels {
+ local component
+ local data_binary
+ local data_binary
+ local label
+ local needs_milestone
+ if [ "$EXPLICIT_COMPONENT" ] ; then
+ debug "Component given on command line: using it"
+ component="$EXPLICIT_COMPONENT"
+ else
+ debug "Attempting to guess component"
+ component=$(guess_component "$backport_pr_title")
+ fi
+ data_binary="{"
+ needs_milestone="$(backport_pr_needs_milestone)"
+ if [ "$needs_milestone" ] ; then
+ debug "Attempting to set ${milestone} milestone in ${backport_pr_url}"
+ data_binary="${data_binary}\"milestone\":${milestone_number}"
+ else
+ info "Backport PR ${backport_pr_url} already has ${milestone} milestone"
+ fi
+ if [ "$(backport_pr_needs_label "$component")" ] ; then
+ debug "Attempting to add ${component} label to ${backport_pr_url}"
+ if [ "$needs_milestone" ] ; then
+ data_binary="${data_binary},"
+ fi
+ data_binary="${data_binary}\"labels\":[\"${component}\""
+ while read -r label ; do
+ if [ "$label" ] ; then
+ data_binary="${data_binary},\"${label}\""
+ fi
+ done <<< "$backport_pr_labels"
+ data_binary="${data_binary}]}"
+ else
+ info "Backport PR ${backport_pr_url} already has label ${component}"
+ data_binary="${data_binary}}"
+ fi
+ if [ "$data_binary" = "{}" ] ; then
+ true
+ else
+ blindly_set_pr_metadata "$backport_pr_number" "$data_binary"
+ fi
+}
+
+function maybe_update_pr_title_body {
+ local new_title="$1"
+ local new_body="$2"
+ local data_binary
+ if [ "$new_title" ] && [ "$new_body" ] ; then
+ data_binary="{\"title\":\"${new_title}\", \"body\":\"$(munge_body "${new_body}")\"}"
+ elif [ "$new_title" ] ; then
+ data_binary="{\"title\":\"${new_title}\"}"
+ backport_pr_title="${new_title}"
+ elif [ "$new_body" ] ; then
+ data_binary="{\"body\":\"$(munge_body "${new_body}")\"}"
+ #log hex "${data_binary}"
+ #echo -n "${data_binary}"
+ fi
+ if [ "$data_binary" ] ; then
+ blindly_set_pr_metadata "${backport_pr_number}" "$data_binary"
+ fi
+}
+
+function milestone_number_from_remote_api {
+ local mtt="$1" # milestone to try
+ local mn="" # milestone number
+ local milestones
+ remote_api_output=$(curl -u ${github_user}:${github_token} --silent -X GET "https://api.github.com/repos/ceph/ceph/milestones")
+ mn=$(echo "$remote_api_output" | jq --arg milestone "$mtt" '.[] | select(.title==$milestone) | .number')
+ if [ "$mn" -gt "0" ] >/dev/null 2>&1 ; then
+ echo "$mn"
+ else
+ error "Could not determine milestone number of ->$milestone<-"
+ verbose_en "GitHub API said:\n${remote_api_output}\n"
+ remote_api_output=$(curl -u ${github_user}:${github_token} --silent -X GET "https://api.github.com/repos/ceph/ceph/milestones")
+ milestones=$(echo "$remote_api_output" | jq '.[].title')
+ info "Valid values are ${milestones}"
+ info "(This probably means the Release field of ${redmine_url} is populated with"
+ info "an unexpected value - i.e. it does not match any of the GitHub milestones.)"
+ false
+ fi
+}
+
+function munge_body {
+ echo "$new_body" | tr '\r' '\n' | sed 's/$/\\n/' | tr -d '\n'
+}
+
+function number_to_url {
+ local number_type="$1"
+ local number="$2"
+ if [ "$number_type" = "github" ] ; then
+ echo "${github_endpoint}/pull/${number}"
+ elif [ "$number_type" = "redmine" ] ; then
+ echo "${redmine_endpoint}/issues/${number}"
+ else
+ assert_fail "internal error in number_to_url: bad type ->$number_type<-"
+ fi
+}
+
+function populate_original_issue {
+ if [ -z "$original_issue" ] ; then
+ original_issue=$(curl --silent "${redmine_url}.json?include=relations" |
+ jq '.issue.relations[] | select(.relation_type | contains("copied_to")) | .issue_id')
+ original_issue_url="$(number_to_url "redmine" "${original_issue}")"
+ fi
+}
+
+function populate_original_pr {
+ if [ "$original_issue" ] ; then
+ if [ -z "$original_pr" ] ; then
+ original_pr=$(curl --silent "${original_issue_url}.json" |
+ jq -r '.issue.custom_fields[] | select(.id | contains(21)) | .value')
+ original_pr_url="$(number_to_url "github" "${original_pr}")"
+ fi
+ fi
+}
+
+function print_in_hex {
+ local str="$1"
+ local c
+
+ for (( i=0; i < ${#str}; i++ ))
+ do
+ c=${str:$i:1}
+ if [[ $c == ' ' ]]
+ then
+ printf "[%s] 0x%X\n" " " \'\ \' >&2
+ else
+ printf "[%s] 0x%X\n" "$c" \'"$c"\' >&2
+ fi
+ done
+}
+
+function set_github_user_from_github_token {
+ local quiet="$1"
+ local api_error
+ local curl_opts
+ setup_ok=""
+ [ "$github_token" ] || assert_fail "set_github_user_from_github_token: git_token not set"
+ curl_opts="--silent -u :${github_token} https://api.github.com/user"
+ [ "$quiet" ] || set -x
+ remote_api_output="$(curl $curl_opts)"
+ set +x
+ github_user=$(echo "${remote_api_output}" | jq -r .login 2>/dev/null | grep -v null || true)
+ api_error=$(echo "${remote_api_output}" | jq -r .message 2>/dev/null | grep -v null || true)
+ if [ "$api_error" ] ; then
+ info "GitHub API said: ->$api_error<-"
+ info "If you can't figure out what's wrong by examining the curl command and its output, above,"
+ info "please also study https://developer.github.com/v3/users/#get-the-authenticated-user"
+ github_user=""
+ else
+ [ "$github_user" ] || assert_fail "set_github_user_from_github_token: failed to set github_user"
+ info "my GitHub username is $github_user"
+ setup_ok="yes"
+ fi
+}
+
+function set_redmine_user_from_redmine_key {
+ [ "$redmine_key" ] || assert_fail "set_redmine_user_from_redmine_key was called, but redmine_key not set"
+ local api_key_from_api
+ remote_api_output="$(curl --silent "https://tracker.ceph.com/users/current.json?key=$redmine_key")"
+ redmine_login="$(echo "$remote_api_output" | jq -r '.user.login')"
+ redmine_user_id="$(echo "$remote_api_output" | jq -r '.user.id')"
+ api_key_from_api="$(echo "$remote_api_output" | jq -r '.user.api_key')"
+ if [ "$redmine_login" ] && [ "$redmine_user_id" ] && [ "$api_key_from_api" = "$redmine_key" ] ; then
+ [ "$redmine_user_id" ] || assert_fail "set_redmine_user_from_redmine_key: failed to set redmine_user_id"
+ [ "$redmine_login" ] || assert_fail "set_redmine_user_from_redmine_key: failed to set redmine_login"
+ info "my Redmine username is $redmine_login (ID $redmine_user_id)"
+ setup_ok="yes"
+ else
+ error "Redmine API access key $redmine_key is invalid"
+ redmine_login=""
+ redmine_user_id=""
+ setup_ok=""
+ fi
+}
+
+function tracker_component_is_in_desired_state {
+ local comp="$1"
+ local val_is="$2"
+ local val_should_be="$3"
+ local in_desired_state
+ if [ "$val_is" = "$val_should_be" ] ; then
+ debug "Tracker $comp is in the desired state"
+ in_desired_state="yes"
+ fi
+ echo "$in_desired_state"
+}
+
+function tracker_component_was_updated {
+ local comp="$1"
+ local val_old="$2"
+ local val_new="$3"
+ local was_updated
+ if [ "$val_old" = "$val_new" ] ; then
+ true
+ else
+ debug "Tracker $comp was updated!"
+ was_updated="yes"
+ fi
+ echo "$was_updated"
+}
+
+function trim_whitespace {
+ local var="$*"
+ # remove leading whitespace characters
+ var="${var#"${var%%[![:space:]]*}"}"
+ # remove trailing whitespace characters
+ var="${var%"${var##*[![:space:]]}"}"
+ echo -n "$var"
+}
+
+function troubleshooting_advice {
+ cat <<EOM
+Troubleshooting notes
+---------------------
+
+If the script inexplicably fails with:
+
+ error: a cherry-pick or revert is already in progress
+ hint: try "git cherry-pick (--continue | --quit | --abort)"
+ fatal: cherry-pick failed
+
+This is because HEAD is not where git expects it to be:
+
+ $ git cherry-pick --abort
+ warning: You seem to have moved HEAD. Not rewinding, check your HEAD!
+
+This can be fixed by issuing the command:
+
+ $ git cherry-pick --quit
+
+EOM
+}
+
+# to update known milestones, consult:
+# curl --verbose -X GET https://api.github.com/repos/ceph/ceph/milestones
+function try_known_milestones {
+ local mtt=$1 # milestone to try
+ local mn="" # milestone number
+ case $mtt in
+ cuttlefish) eol "$mtt" ;;
+ dumpling) eol "$mtt" ;;
+ emperor) eol "$mtt" ;;
+ firefly) eol "$mtt" ;;
+ giant) eol "$mtt" ;;
+ hammer) eol "$mtt" ;;
+ infernalis) eol "$mtt" ;;
+ jewel) mn="8" ;;
+ kraken) eol "$mtt" ;;
+ luminous) mn="10" ;;
+ mimic) mn="11" ;;
+ nautilus) mn="12" ;;
+ octopus) mn="13" ;;
+ pacific) mn="14" ;;
+ quincy) mn="15" ;;
+ esac
+ echo "$mn"
+}
+
+function update_version_number_and_exit {
+ set -x
+ local raw_version
+ local munge_first_hyphen
+ # munge_first_hyphen will look like this: 15.0.0.5774-g4c2f2eda969
+ local script_version_number
+ raw_version="$(git describe --long --match 'v*' | sed 's/^v//')" # example: "15.0.0-5774-g4c2f2eda969"
+ munge_first_hyphen="${raw_version/-/.}" # example: "15.0.0.5774-g4c2f2eda969"
+ script_version_number="${munge_first_hyphen%-*}" # example: "15.0.0.5774"
+ sed -i -e "s/^SCRIPT_VERSION=.*/SCRIPT_VERSION=\"${script_version_number}\"/" "$full_path"
+ exit 0
+}
+
+function usage {
+ cat <<EOM >&2
+Setup:
+
+ ${this_script} --setup
+
+Documentation:
+
+ ${this_script} --help
+ ${this_script} --usage | less
+ ${this_script} --troubleshooting | less
+
+Usage:
+ ${this_script} BACKPORT_TRACKER_ISSUE_NUMBER
+
+Options (not needed in normal operation):
+ --cherry-pick-only (stop after cherry-pick phase)
+ --component/-c COMPONENT
+ (explicitly set the component label; if omitted, the
+ script will try to guess the component)
+ --debug (turns on "set -x")
+ --existing-pr BACKPORT_PR_ID
+ (use this when the backport PR is already open)
+ --force (exercise caution!)
+ --fork EXPLICIT_FORK (use EXPLICIT_FORK instead of personal GitHub fork)
+ --milestones (vet all backport PRs for correct milestone setting)
+ --setup/-s (run the interactive setup routine - NOTE: this can
+ be done any number of times)
+ --setup-report (check the setup and print a report)
+ --update-version (this option exists as a convenience for the script
+ maintainer only: not intended for day-to-day usage)
+ --verbose/-v (produce more output than normal)
+ --version (display version number and exit)
+
+Example:
+ ${this_script} 31459
+ (if cherry-pick conflicts are present, finish cherry-picking phase manually
+ and then run the script again with the same argument)
+
+CAVEAT: The script must be run from inside a local git clone.
+EOM
+}
+
+function usage_advice {
+ cat <<EOM
+Usage advice
+------------
+
+Once you have completed --setup, you can run the script with the ID of
+a Backport tracker issue. For example, to stage the backport
+https://tracker.ceph.com/issues/41502, run:
+
+ ${this_script} 41502
+
+Provided the commits in the corresponding main PR cherry-pick cleanly, the
+script will automatically perform all steps required to stage the backport:
+
+Cherry-pick phase:
+
+1. fetching the latest commits from the upstream remote
+2. creating a wip branch for the backport
+3. figuring out which upstream PR contains the commits to cherry-pick
+4. cherry-picking the commits
+
+PR phase:
+
+5. pushing the wip branch to your fork
+6. opening the backport PR with compliant title and description describing
+ the backport
+7. (optionally) setting the milestone and label in the PR
+8. updating the Backport tracker issue
+
+When run with --cherry-pick-only, the script will stop after the cherry-pick
+phase.
+
+If any of the commits do not cherry-pick cleanly, the script will abort in
+step 4. In this case, you can either finish the cherry-picking manually
+or abort the cherry-pick. In any case, when and if the local wip branch is
+ready (all commits cherry-picked), if you run the script again, like so:
+
+ ${this_script} 41502
+
+the script will detect that the wip branch already exists and skip over
+steps 1-4, starting from step 5 ("PR phase"). In other words, if the wip branch
+already exists for any reason, the script will assume that the cherry-pick
+phase (steps 1-4) is complete.
+
+As this implies, you can do steps 1-4 manually. Provided the wip branch name
+is in the format wip-\$TRACKER_ID-\$STABLE_RELEASE (e.g. "wip-41502-mimic"),
+the script will detect the wip branch and start from step 5.
+
+For details on all the options the script takes, run:
+
+ ${this_script} --help
+
+For more information on Ceph backporting, see:
+
+ https://github.com/ceph/ceph/tree/main/SubmittingPatches-backports.rst
+
+EOM
+}
+
+function verbose {
+ log verbose "$@"
+}
+
+function verbose_en {
+ log verbose_en "$@"
+}
+
+function vet_pr_milestone {
+ local pr_number="$1"
+ local pr_title="$2"
+ local pr_url="$3"
+ local milestone_stanza="$4"
+ local milestone_title_should_be="$5"
+ local milestone_number_should_be
+ local milestone_number_is=
+ local milestone_title_is=
+ milestone_number_should_be="$(try_known_milestones "$milestone_title_should_be")"
+ log overwrite "Vetting milestone of PR#${pr_number}\r"
+ if [ "$milestone_stanza" = "null" ] ; then
+ blindly_set_pr_metadata "$pr_number" "{\"milestone\": $milestone_number_should_be}"
+ warning "$pr_url: set milestone to \"$milestone_title_should_be\""
+ flag_pr "$pr_number" "$pr_url" "milestone not set"
+ else
+ milestone_title_is=$(echo "$milestone_stanza" | jq -r '.title')
+ milestone_number_is=$(echo "$milestone_stanza" | jq -r '.number')
+ if [ "$milestone_number_is" -eq "$milestone_number_should_be" ] ; then
+ true
+ else
+ blindly_set_pr_metadata "$pr_number" "{\"milestone\": $milestone_number_should_be}"
+ warning "$pr_url: changed milestone from \"$milestone_title_is\" to \"$milestone_title_should_be\""
+ flag_pr "$pr_number" "$pr_url" "milestone set to wrong value \"$milestone_title_is\""
+ fi
+ fi
+}
+
+function vet_prs_for_milestone {
+ local milestone_title="$1"
+ local pages_of_output=
+ local pr_number=
+ local pr_title=
+ local pr_url=
+ # determine last page (i.e., total number of pages)
+ remote_api_output="$(curl -u ${github_user}:${github_token} --silent --head "https://api.github.com/repos/ceph/ceph/pulls?base=${milestone_title}" | grep -E '^Link' || true)"
+ if [ "$remote_api_output" ] ; then
+ # Link: <https://api.github.com/repositories/2310495/pulls?base=luminous&page=2>; rel="next", <https://api.github.com/repositories/2310495/pulls?base=luminous&page=2>; rel="last"
+ # shellcheck disable=SC2001
+ pages_of_output="$(echo "$remote_api_output" | sed 's/^.*&page\=\([0-9]\+\)>; rel=\"last\".*$/\1/g')"
+ else
+ pages_of_output="1"
+ fi
+ verbose "GitHub has $pages_of_output pages of pull request data for \"base:${milestone_title}\""
+ for ((page=1; page<=pages_of_output; page++)) ; do
+ verbose "Fetching PRs (page $page of ${pages_of_output})"
+ remote_api_output="$(curl -u ${github_user}:${github_token} --silent -X GET "https://api.github.com/repos/ceph/ceph/pulls?base=${milestone_title}&page=${page}")"
+ prs_in_page="$(echo "$remote_api_output" | jq -r '. | length')"
+ verbose "Page $page of remote API output contains information on $prs_in_page PRs"
+ for ((i=0; i<prs_in_page; i++)) ; do
+ pr_number="$(echo "$remote_api_output" | jq -r ".[${i}].number")"
+ pr_title="$(echo "$remote_api_output" | jq -r ".[${i}].title")"
+ pr_url="$(number_to_url "github" "${pr_number}")"
+ milestone_stanza="$(echo "$remote_api_output" | jq -r ".[${i}].milestone")"
+ vet_pr_milestone "$pr_number" "$pr_title" "$pr_url" "$milestone_stanza" "$milestone_title"
+ done
+ clear_line
+ done
+}
+
+function vet_remotes {
+ if [ "$upstream_remote" ] ; then
+ verbose "Upstream remote is $upstream_remote"
+ else
+ error "Cannot auto-determine upstream remote"
+ "(Could not find any upstream remote in \"git remote -v\")"
+ false
+ fi
+ if [ "$fork_remote" ] ; then
+ verbose "Fork remote is $fork_remote"
+ else
+ error "Cannot auto-determine fork remote"
+ if [ "$EXPLICIT_FORK" ] ; then
+ info "(Could not find $EXPLICIT_FORK fork of ceph/ceph in \"git remote -v\")"
+ else
+ info "(Could not find GitHub user ${github_user}'s fork of ceph/ceph in \"git remote -v\")"
+ fi
+ setup_ok=""
+ fi
+}
+
+function vet_setup {
+ local argument="$1"
+ local not_set="!!! NOT SET !!!"
+ local invalid="!!! INVALID !!!"
+ local redmine_endpoint_display
+ local redmine_user_id_display
+ local github_endpoint_display
+ local github_user_display
+ local upstream_remote_display
+ local fork_remote_display
+ local redmine_key_display
+ local github_token_display
+ debug "Entering vet_setup with argument $argument"
+ if [ "$argument" = "--report" ] || [ "$argument" = "--normal-operation" ] ; then
+ [ "$github_token" ] && [ "$setup_ok" ] && set_github_user_from_github_token quiet
+ init_upstream_remote
+ [ "$github_token" ] && [ "$setup_ok" ] && init_fork_remote
+ vet_remotes
+ [ "$redmine_key" ] && set_redmine_user_from_redmine_key
+ fi
+ if [ "$github_token" ] ; then
+ if [ "$setup_ok" ] ; then
+ github_token_display="(OK; value not shown)"
+ else
+ github_token_display="$invalid"
+ fi
+ else
+ github_token_display="$not_set"
+ fi
+ if [ "$redmine_key" ] ; then
+ if [ "$setup_ok" ] ; then
+ redmine_key_display="(OK; value not shown)"
+ else
+ redmine_key_display="$invalid"
+ fi
+ else
+ redmine_key_display="$not_set"
+ fi
+ redmine_endpoint_display="${redmine_endpoint:-$not_set}"
+ redmine_user_id_display="${redmine_user_id:-$not_set}"
+ github_endpoint_display="${github_endpoint:-$not_set}"
+ github_user_display="${github_user:-$not_set}"
+ upstream_remote_display="${upstream_remote:-$not_set}"
+ fork_remote_display="${fork_remote:-$not_set}"
+ test "$redmine_endpoint" || failed_mandatory_var_check redmine_endpoint "not set"
+ test "$redmine_user_id" || failed_mandatory_var_check redmine_user_id "could not be determined"
+ test "$redmine_key" || failed_mandatory_var_check redmine_key "not set"
+ test "$github_endpoint" || failed_mandatory_var_check github_endpoint "not set"
+ test "$github_user" || failed_mandatory_var_check github_user "could not be determined"
+ test "$github_token" || failed_mandatory_var_check github_token "not set"
+ test "$upstream_remote" || failed_mandatory_var_check upstream_remote "could not be determined"
+ test "$fork_remote" || failed_mandatory_var_check fork_remote "could not be determined"
+ if [ "$argument" = "--report" ] || [ "$argument" == "--interactive" ] ; then
+ read -r -d '' setup_summary <<EOM || true > /dev/null 2>&1
+redmine_endpoint $redmine_endpoint
+redmine_user_id $redmine_user_id_display
+redmine_key $redmine_key_display
+github_endpoint $github_endpoint
+github_user $github_user_display
+github_token $github_token_display
+upstream_remote $upstream_remote_display
+fork_remote $fork_remote_display
+EOM
+ log bare
+ log bare "============================================="
+ log bare " ${this_script} setup report"
+ log bare "============================================="
+ log bare "variable name value"
+ log bare "---------------------------------------------"
+ log bare "$setup_summary"
+ log bare "---------------------------------------------"
+ else
+ verbose "redmine_endpoint $redmine_endpoint_display"
+ verbose "redmine_user_id $redmine_user_id_display"
+ verbose "redmine_key $redmine_key_display"
+ verbose "github_endpoint $github_endpoint_display"
+ verbose "github_user $github_user_display"
+ verbose "github_token $github_token_display"
+ verbose "upstream_remote $upstream_remote_display"
+ verbose "fork_remote $fork_remote_display"
+ fi
+ if [ "$argument" = "--report" ] || [ "$argument" = "--interactive" ] ; then
+ if [ "$setup_ok" ] ; then
+ info "setup is OK"
+ else
+ info "setup is NOT OK"
+ fi
+ log bare "=============================================="
+ log bare
+ fi
+}
+
+function warning {
+ log warning "$@"
+}
+
+
+#
+# are we in a local git clone?
+#
+
+if git status >/dev/null 2>&1 ; then
+ debug "In a local git clone. Good."
+else
+ error "This script must be run from inside a local git clone"
+ abort_due_to_setup_problem
+fi
+
+#
+# do we have jq available?
+#
+
+if type jq >/dev/null 2>&1 ; then
+ debug "jq is available. Good."
+else
+ error "This script uses jq, but it does not seem to be installed"
+ abort_due_to_setup_problem
+fi
+
+#
+# is jq available?
+#
+
+if command -v jq >/dev/null ; then
+ debug "jq is available. Good."
+else
+ error "This script needs \"jq\" in order to work, and it is not available"
+ abort_due_to_setup_problem
+fi
+
+
+#
+# process command-line arguments
+#
+
+munged_options=$(getopt -o c:dhsv --long "cherry-pick-only,component:,debug,existing-pr:,force,fork:,help,milestones,prepare,setup,setup-report,troubleshooting,update-version,usage,verbose,version" -n "$this_script" -- "$@")
+eval set -- "$munged_options"
+
+ADVICE=""
+CHECK_MILESTONES=""
+CHERRY_PICK_ONLY=""
+CHERRY_PICK_PHASE="yes"
+DEBUG=""
+EXISTING_PR=""
+EXPLICIT_COMPONENT=""
+EXPLICIT_FORK=""
+FORCE=""
+HELP=""
+INTERACTIVE_SETUP_ROUTINE=""
+ISSUE=""
+PR_PHASE="yes"
+SETUP_OPTION=""
+TRACKER_PHASE="yes"
+TROUBLESHOOTING_ADVICE=""
+USAGE_ADVICE=""
+VERBOSE=""
+while true ; do
+ case "$1" in
+ --cherry-pick-only) CHERRY_PICK_PHASE="yes" ; PR_PHASE="" ; TRACKER_PHASE="" ; shift ;;
+ --component|-c) shift ; EXPLICIT_COMPONENT="$1" ; shift ;;
+ --debug|-d) DEBUG="$1" ; shift ;;
+ --existing-pr) shift ; EXISTING_PR="$1" ; CHERRY_PICK_PHASE="" ; PR_PHASE="" ; shift ;;
+ --force) FORCE="$1" ; shift ;;
+ --fork) shift ; EXPLICIT_FORK="$1" ; shift ;;
+ --help|-h) ADVICE="1" ; HELP="$1" ; shift ;;
+ --milestones) CHECK_MILESTONES="$1" ; shift ;;
+ --prepare) CHERRY_PICK_PHASE="yes" ; PR_PHASE="" ; TRACKER_PHASE="" ; shift ;;
+ --setup*|-s) SETUP_OPTION="$1" ; shift ;;
+ --troubleshooting) ADVICE="$1" ; TROUBLESHOOTING_ADVICE="$1" ; shift ;;
+ --update-version) update_version_number_and_exit ;;
+ --usage) ADVICE="$1" ; USAGE_ADVICE="$1" ; shift ;;
+ --verbose|-v) VERBOSE="$1" ; shift ;;
+ --version) display_version_message_and_exit ;;
+ --) shift ; ISSUE="$1" ; break ;;
+ *) echo "Internal error" ; false ;;
+ esac
+done
+
+if [ "$ADVICE" ] ; then
+ [ "$HELP" ] && usage
+ [ "$USAGE_ADVICE" ] && usage_advice
+ [ "$TROUBLESHOOTING_ADVICE" ] && troubleshooting_advice
+ exit 0
+fi
+
+if [ "$SETUP_OPTION" ] || [ "$CHECK_MILESTONES" ] ; then
+ ISSUE="0"
+fi
+
+if [[ $ISSUE =~ ^[0-9]+$ ]] ; then
+ issue=$ISSUE
+else
+ error "Invalid or missing argument"
+ usage
+ false
+fi
+
+if [ "$DEBUG" ]; then
+ set -x
+ VERBOSE="--verbose"
+fi
+
+if [ "$VERBOSE" ]; then
+ info "Verbose mode ON"
+ VERBOSE="--verbose"
+fi
+
+
+#
+# make sure setup has been completed
+#
+
+init_endpoints
+init_github_token
+init_redmine_key
+setup_ok="OK"
+if [ "$SETUP_OPTION" ] ; then
+ vet_setup --report
+ maybe_delete_deprecated_backport_common
+ if [ "$setup_ok" ] ; then
+ exit 0
+ else
+ default_val="y"
+ echo -n "Run the interactive setup routine now? (default: ${default_val}) "
+ yes_or_no_answer="$(get_user_input "$default_val")"
+ [ "$yes_or_no_answer" ] && yes_or_no_answer="${yes_or_no_answer:0:1}"
+ if [ "$yes_or_no_answer" = "y" ] ; then
+ INTERACTIVE_SETUP_ROUTINE="yes"
+ else
+ if [ "$FORCE" ] ; then
+ warning "--force was given; proceeding with broken setup"
+ else
+ info "Bailing out!"
+ exit 1
+ fi
+ fi
+ fi
+fi
+if [ "$INTERACTIVE_SETUP_ROUTINE" ] ; then
+ interactive_setup_routine
+else
+ vet_setup --normal-operation
+ maybe_delete_deprecated_backport_common
+fi
+if [ "$INTERACTIVE_SETUP_ROUTINE" ] || [ "$SETUP_OPTION" ] ; then
+ echo
+ if [ "$setup_ok" ] ; then
+ if [ "$ISSUE" ] && [ "$ISSUE" != "0" ] ; then
+ true
+ else
+ exit 0
+ fi
+ else
+ exit 1
+ fi
+fi
+vet_remotes
+[ "$setup_ok" ] || abort_due_to_setup_problem
+
+#
+# query remote GitHub API for active milestones
+#
+
+verbose "Querying GitHub API for active milestones"
+remote_api_output="$(curl -u ${github_user}:${github_token} --silent -X GET "https://api.github.com/repos/ceph/ceph/milestones")"
+active_milestones="$(echo "$remote_api_output" | jq -r '.[] | .title')"
+if [ "$active_milestones" = "null" ] ; then
+ error "Could not determine the active milestones"
+ bail_out_github_api "$remote_api_output"
+fi
+
+if [ "$CHECK_MILESTONES" ] ; then
+ check_milestones "$active_milestones"
+ exit 0
+fi
+
+#
+# query remote Redmine API for information about the Backport tracker issue
+#
+
+redmine_url="$(number_to_url "redmine" "${issue}")"
+debug "Considering Redmine issue: $redmine_url - is it in the Backport tracker?"
+
+remote_api_output="$(curl --silent "${redmine_url}.json")"
+tracker="$(echo "$remote_api_output" | jq -r '.issue.tracker.name')"
+if [ "$tracker" = "Backport" ]; then
+ debug "Yes, $redmine_url is a Backport issue"
+else
+ error "Issue $redmine_url is not a Backport"
+ info "(This script only works with Backport tracker issues.)"
+ false
+fi
+
+debug "Looking up release/milestone of $redmine_url"
+milestone="$(echo "$remote_api_output" | jq -r '.issue.custom_fields[0].value')"
+if [ "$milestone" ] ; then
+ debug "Release/milestone: $milestone"
+else
+ error "could not obtain release/milestone from ${redmine_url}"
+ false
+fi
+
+debug "Looking up status of $redmine_url"
+tracker_status_id="$(echo "$remote_api_output" | jq -r '.issue.status.id')"
+tracker_status_name="$(echo "$remote_api_output" | jq -r '.issue.status.name')"
+if [ "$tracker_status_name" ] ; then
+ debug "Tracker status: $tracker_status_name"
+ if [ "$FORCE" ] || [ "$EXISTING_PR" ] ; then
+ test "$(check_tracker_status "$tracker_status_name")" || true
+ else
+ test "$(check_tracker_status "$tracker_status_name")"
+ fi
+else
+ error "could not obtain status from ${redmine_url}"
+ false
+fi
+
+tracker_title="$(echo "$remote_api_output" | jq -r '.issue.subject')"
+debug "Title of $redmine_url is ->$tracker_title<-"
+
+tracker_description="$(echo "$remote_api_output" | jq -r '.issue.description')"
+debug "Description of $redmine_url is ->$tracker_description<-"
+
+tracker_assignee_id="$(echo "$remote_api_output" | jq -r '.issue.assigned_to.id')"
+tracker_assignee_name="$(echo "$remote_api_output" | jq -r '.issue.assigned_to.name')"
+if [ "$tracker_assignee_id" = "null" ] || [ "$tracker_assignee_id" = "$redmine_user_id" ] ; then
+ true
+else
+ error_msg_1="$redmine_url is assigned to someone else: $tracker_assignee_name (ID $tracker_assignee_id)"
+ error_msg_2="(my ID is $redmine_user_id)"
+ if [ "$FORCE" ] || [ "$EXISTING_PR" ] ; then
+ warning "$error_msg_1"
+ info "$error_msg_2"
+ info "--force and/or --existing-pr given: continuing execution"
+ else
+ error "$error_msg_1"
+ info "$error_msg_2"
+ info "Cowardly refusing to continue"
+ false
+ fi
+fi
+
+if [ -z "$(is_active_milestone "$milestone")" ] ; then
+ error "$redmine_url is a backport to $milestone which is not an active milestone"
+ info "Cowardly refusing to work on a backport to an inactive release"
+ false
+fi
+
+milestone_number=$(try_known_milestones "$milestone")
+if [ "$milestone_number" -gt "0" ] >/dev/null 2>&1 ; then
+ debug "Milestone ->$milestone<- is known to have number ->$milestone_number<-: skipping remote API call"
+else
+ warning "Milestone ->$milestone<- is unknown to the script: falling back to GitHub API"
+ milestone_number=$(milestone_number_from_remote_api "$milestone")
+fi
+target_branch="$milestone"
+info "milestone/release is $milestone"
+debug "milestone number is $milestone_number"
+
+if [ "$CHERRY_PICK_PHASE" ] ; then
+ local_branch=wip-${issue}-${target_branch}
+ if git show-ref --verify --quiet "refs/heads/$local_branch" ; then
+ if [ "$FORCE" ] ; then
+ warning "local branch $local_branch already exists"
+ info "--force was given: will clobber $local_branch and attempt automated cherry-pick"
+ cherry_pick_phase
+ elif [ "$CHERRY_PICK_ONLY" ] ; then
+ error "local branch $local_branch already exists"
+ info "Cowardly refusing to clobber $local_branch as it might contain valuable data"
+ info "(hint) run with --force to clobber it and attempt the cherry-pick"
+ false
+ fi
+ if [ "$FORCE" ] || [ "$CHERRY_PICK_ONLY" ] ; then
+ true
+ else
+ info "local branch $local_branch already exists: skipping cherry-pick phase"
+ fi
+ else
+ info "$local_branch does not exist: will create it and attempt automated cherry-pick"
+ cherry_pick_phase
+ fi
+fi
+
+if [ "$PR_PHASE" ] ; then
+ current_branch=$(git rev-parse --abbrev-ref HEAD)
+ if [ "$current_branch" = "$local_branch" ] ; then
+ true
+ else
+ set -x
+ git checkout "$local_branch"
+ set +x
+ maybe_restore_set_x
+ fi
+
+ set -x
+ git push -u "$fork_remote" "$local_branch"
+ set +x
+ maybe_restore_set_x
+
+ original_issue=""
+ original_pr=""
+ original_pr_url=""
+
+ debug "Generating backport PR description"
+ populate_original_issue
+ populate_original_pr
+ desc="backport tracker: ${redmine_url}"
+ if [ "$original_pr" ] || [ "$original_issue" ] ; then
+ desc="${desc}\n\n---\n"
+ [ "$original_pr" ] && desc="${desc}\nbackport of $(number_to_url "github" "${original_pr}")"
+ [ "$original_issue" ] && desc="${desc}\nparent tracker: $(number_to_url "redmine" "${original_issue}")"
+ fi
+ desc="${desc}\n\nthis backport was staged using ceph-backport.sh version ${SCRIPT_VERSION}\nfind the latest version at ${github_endpoint}/blob/main/src/script/ceph-backport.sh"
+
+ debug "Generating backport PR title"
+ if [ "$original_pr" ] ; then
+ backport_pr_title="${milestone}: $(curl --silent https://api.github.com/repos/ceph/ceph/pulls/${original_pr} | jq -r '.title')"
+ else
+ if [[ $tracker_title =~ ^${milestone}: ]] ; then
+ backport_pr_title="${tracker_title}"
+ else
+ backport_pr_title="${milestone}: ${tracker_title}"
+ fi
+ fi
+ if [[ "$backport_pr_title" =~ \" ]] ; then
+ backport_pr_title="${backport_pr_title//\"/\\\"}"
+ fi
+
+ debug "Opening backport PR"
+ if [ "$EXPLICIT_FORK" ] ; then
+ source_repo="$EXPLICIT_FORK"
+ else
+ source_repo="$github_user"
+ fi
+ remote_api_output=$(curl -u ${github_user}:${github_token} --silent --data-binary "{\"title\":\"${backport_pr_title}\",\"head\":\"${source_repo}:${local_branch}\",\"base\":\"${target_branch}\",\"body\":\"${desc}\"}" "https://api.github.com/repos/ceph/ceph/pulls")
+ backport_pr_number=$(echo "$remote_api_output" | jq -r .number)
+ if [ -z "$backport_pr_number" ] || [ "$backport_pr_number" = "null" ] ; then
+ error "failed to open backport PR"
+ bail_out_github_api "$remote_api_output"
+ fi
+ backport_pr_url="$(number_to_url "github" "$backport_pr_number")"
+ info "Opened backport PR ${backport_pr_url}"
+fi
+
+if [ "$EXISTING_PR" ] ; then
+ populate_original_issue
+ populate_original_pr
+ backport_pr_number="$EXISTING_PR"
+ backport_pr_url="$(number_to_url "github" "$backport_pr_number")"
+ existing_pr_routine
+fi
+
+if [ "$PR_PHASE" ] || [ "$EXISTING_PR" ] ; then
+ maybe_update_pr_milestone_labels
+ pgrep firefox >/dev/null && firefox "${backport_pr_url}"
+fi
+
+if [ "$TRACKER_PHASE" ] ; then
+ debug "Considering Backport tracker issue ${redmine_url}"
+ status_should_be=2 # In Progress
+ desc_should_be="${backport_pr_url}"
+ assignee_should_be="${redmine_user_id}"
+ if [ "$EXISTING_PR" ] ; then
+ data_binary="{\"issue\":{\"description\":\"${desc_should_be}\",\"status_id\":${status_should_be}}}"
+ else
+ data_binary="{\"issue\":{\"description\":\"${desc_should_be}\",\"status_id\":${status_should_be},\"assigned_to_id\":${assignee_should_be}}}"
+ fi
+ remote_api_status_code="$(curl --write-out '%{http_code}' --output /dev/null --silent -X PUT --header "Content-type: application/json" --data-binary "${data_binary}" "${redmine_url}.json?key=$redmine_key")"
+ if [ "$FORCE" ] || [ "$EXISTING_PR" ] ; then
+ true
+ else
+ if [ "${remote_api_status_code:0:1}" = "2" ] ; then
+ true
+ elif [ "${remote_api_status_code:0:1}" = "4" ] ; then
+ warning "remote API ${redmine_endpoint} returned status ${remote_api_status_code}"
+ info "This merely indicates that you cannot modify issue fields at ${redmine_endpoint}"
+ info "and does not limit your ability to do backports."
+ else
+ error "Remote API ${redmine_endpoint} returned unexpected response code ${remote_api_status_code}"
+ fi
+ fi
+ # check if anything actually changed on the Redmine issue
+ remote_api_output=$(curl --silent "${redmine_url}.json?include=journals")
+ status_is="$(echo "$remote_api_output" | jq -r '.issue.status.id')"
+ desc_is="$(echo "$remote_api_output" | jq -r '.issue.description')"
+ assignee_is="$(echo "$remote_api_output" | jq -r '.issue.assigned_to.id')"
+ tracker_was_updated=""
+ tracker_is_in_desired_state="yes"
+ [ "$(tracker_component_was_updated "status" "$tracker_status_id" "$status_is")" ] && tracker_was_updated="yes"
+ [ "$(tracker_component_was_updated "desc" "$tracker_description" "$desc_is")" ] && tracker_was_updated="yes"
+ if [ "$EXISTING_PR" ] ; then
+ true
+ else
+ [ "$(tracker_component_was_updated "assignee" "$tracker_assignee_id" "$assignee_is")" ] && tracker_was_updated="yes"
+ fi
+ [ "$(tracker_component_is_in_desired_state "status" "$status_is" "$status_should_be")" ] || tracker_is_in_desired_state=""
+ [ "$(tracker_component_is_in_desired_state "desc" "$desc_is" "$desc_should_be")" ] || tracker_is_in_desired_state=""
+ if [ "$EXISTING_PR" ] ; then
+ true
+ else
+ [ "$(tracker_component_is_in_desired_state "assignee" "$assignee_is" "$assignee_should_be")" ] || tracker_is_in_desired_state=""
+ fi
+ if [ "$tracker_is_in_desired_state" ] ; then
+ [ "$tracker_was_updated" ] && info "Backport tracker ${redmine_url} was updated"
+ info "Backport tracker ${redmine_url} is in the desired state"
+ pgrep firefox >/dev/null && firefox "${redmine_url}"
+ exit 0
+ fi
+ if [ "$tracker_was_updated" ] ; then
+ warning "backport tracker ${redmine_url} was updated, but is not in the desired state. Please check it."
+ pgrep firefox >/dev/null && firefox "${redmine_url}"
+ exit 1
+ else
+ data_binary="{\"issue\":{\"notes\":\"please link this Backport tracker issue with GitHub PR ${desc_should_be}\nceph-backport.sh version ${SCRIPT_VERSION}\"}}"
+ remote_api_status_code=$(curl --write-out '%{http_code}' --output /dev/null --silent -X PUT --header "Content-type: application/json" --data-binary "${data_binary}" "${redmine_url}.json?key=$redmine_key")
+ if [ "${remote_api_status_code:0:1}" = "2" ] ; then
+ info "Comment added to ${redmine_url}"
+ fi
+ exit 0
+ fi
+fi
diff --git a/src/script/ceph-debug-docker.sh b/src/script/ceph-debug-docker.sh
new file mode 100755
index 000000000..22afec8a0
--- /dev/null
+++ b/src/script/ceph-debug-docker.sh
@@ -0,0 +1,175 @@
+#!/usr/bin/env bash
+
+# This can be run from e.g. the senta machines which have docker available. You
+# may need to run this script with sudo.
+#
+# Once you have booted into the image, you should be able to debug the core file:
+# $ gdb -q /ceph/teuthology-archive/.../coredump/1500013578.8678.core
+#
+# You may want to install other packages (yum) as desired.
+#
+# Once you're finished, please delete old images in a timely fashion.
+
+set -e
+
+CACHE=""
+FLAVOR="default"
+SUDO=""
+PRIVILEGED=""
+
+function run {
+ printf "%s\n" "$*"
+ "$@"
+}
+
+function main {
+ eval set -- $(getopt --name "$0" --options 'h' --longoptions 'help,no-cache,flavor:,sudo,privileged' -- "$@")
+
+ while [ "$#" -gt 0 ]; do
+ case "$1" in
+ -h|--help)
+ printf '%s: [--no-cache] <branch>[:sha1] <environment>\n' "$0"
+ exit 0
+ ;;
+ --no-cache)
+ CACHE="--no-cache"
+ shift
+ ;;
+ --flavor)
+ FLAVOR=$2
+ shift 2
+ ;;
+ --privileged)
+ PRIVILEGED=--privileged
+ shift 1
+ ;;
+ --sudo)
+ SUDO=sudo
+ shift 1
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+ done
+
+ if [ -z "$1" ]; then
+ printf "specify the branch [default \"main:latest\"]: "
+ read branch
+ if [ -z "$branch" ]; then
+ branch=main:latest
+ fi
+ else
+ branch="$1"
+ fi
+ if [ "${branch%%:*}" != "${branch}" ]; then
+ sha=${branch##*:}
+ else
+ sha=latest
+ fi
+ branch=${branch%%:*}
+ printf "branch: %s\nsha1: %s\n" "$branch" "$sha"
+
+ if [ -z "$2" ]; then
+ printf "specify the build environment [default \"centos:8\"]: "
+ read env
+ if [ -z "$env" ]; then
+ env=centos:8
+ fi
+ else
+ env="$2"
+ fi
+ printf "env: %s\n" "$env"
+
+ if [ -n "$SUDO_USER" ]; then
+ user="$SUDO_USER"
+ elif [ -n "$USER" ]; then
+ user="$USER"
+ else
+ user="$(whoami)"
+ fi
+
+ tag="${user}:ceph-ci-${branch}-${sha}-${env/:/-}"
+
+ T=$(mktemp -d)
+ pushd "$T"
+ case "$env" in
+ centos:stream)
+ distro="centos/8"
+ ;;
+ *)
+ distro="${env/://}"
+ esac
+ api_url="https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=${FLAVOR}&distros=${distro}/$(arch)&ref=${branch}&sha1=${sha}"
+ repo_url="$(wget -O - "$api_url" | jq -r '.[0].chacra_url')repo"
+ # validate url:
+ wget -O /dev/null "$repo_url"
+ if grep ubuntu <<<"$env" > /dev/null 2>&1; then
+ # Docker makes it impossible to access anything outside the CWD : /
+ wget -O cephdev.asc 'https://download.ceph.com/keys/autobuild.asc'
+ cat > Dockerfile <<EOF
+FROM ${env}
+
+WORKDIR /root
+RUN apt-get update --yes --quiet && \
+ apt-get install --yes --quiet screen gdb software-properties-common apt-transport-https curl
+COPY cephdev.asc cephdev.asc
+RUN apt-key add cephdev.asc && \
+ curl -L $repo_url | tee /etc/apt/sources.list.d/ceph_dev.list && \
+ cat /etc/apt/sources.list.d/ceph_dev.list|sed -e 's/^deb/deb-src/' >>/etc/apt/sources.list.d/ceph_dev.list && \
+ apt-get update --yes && \
+ DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical apt-get --assume-yes -q --no-install-recommends install -o Dpkg::Options::=--force-confnew --allow-unauthenticated ceph ceph-osd-dbg ceph-mds-dbg ceph-mgr-dbg ceph-mon-dbg ceph-common-dbg ceph-fuse-dbg ceph-test-dbg radosgw-dbg python3-cephfs python3-rados
+EOF
+ time run $SUDO docker build $CACHE --tag "$tag" .
+ else # try RHEL flavor
+ case "$env" in
+ centos:7)
+ python_bindings="python36-rados python36-cephfs"
+ base_debuginfo=""
+ ceph_debuginfo="ceph-debuginfo"
+ debuginfo=/etc/yum.repos.d/CentOS-Linux-Debuginfo.repo
+ ;;
+ centos:8)
+ python_bindings="python3-rados python3-cephfs"
+ base_debuginfo="glibc-debuginfo"
+ ceph_debuginfo="ceph-base-debuginfo"
+ debuginfo=/etc/yum.repos.d/CentOS-Linux-Debuginfo.repo
+ base_url="s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g"
+ ;;
+ centos:stream)
+ python_bindings="python3-rados python3-cephfs"
+ base_debuginfo="glibc-debuginfo"
+ ceph_debuginfo="ceph-base-debuginfo"
+ debuginfo=/etc/yum.repos.d/CentOS-Stream-Debuginfo.repo
+ ;;
+ esac
+ if [ "${FLAVOR}" = "crimson" ]; then
+ ceph_debuginfo+=" ceph-crimson-osd-debuginfo ceph-crimson-osd"
+ fi
+ cat > Dockerfile <<EOF
+FROM ${env}
+
+WORKDIR /root
+RUN sed -i '${base_url}' /etc/yum.repos.d/CentOS-* && \
+ yum update -y && \
+ sed -i 's/enabled=0/enabled=1/' ${debuginfo} && \
+ yum update -y && \
+ yum install -y tmux epel-release wget psmisc ca-certificates gdb
+RUN wget -O /etc/yum.repos.d/ceph-dev.repo $repo_url && \
+ yum clean all && \
+ yum upgrade -y && \
+ yum install -y ceph ${base_debuginfo} ${ceph_debuginfo} ${python_bindings}
+EOF
+ time run $SUDO docker build $CACHE --tag "$tag" .
+ fi
+ popd
+ rm -rf -- "$T"
+
+ printf "built image %s\n" "$tag"
+
+ run $SUDO docker run $PRIVILEGED -ti -v /ceph:/ceph:ro -v /cephfs:/cephfs:ro -v /teuthology:/teuthology:ro "$tag"
+ return 0
+}
+
+main "$@"
diff --git a/src/script/ceph-release-notes b/src/script/ceph-release-notes
new file mode 100755
index 000000000..0d6024581
--- /dev/null
+++ b/src/script/ceph-release-notes
@@ -0,0 +1,375 @@
+#!/usr/bin/env python
+# Originally modified from A. Israel's script seen at
+# https://gist.github.com/aisrael/b2b78d9dfdd176a232b9
+"""To run this script first install the dependencies
+
+
+ python3 -m venv v
+ source v/bin/activate
+ pip install githubpy GitPython requests
+
+Generate a github access token; this is needed as the anonymous access
+to Github's API will easily hit the limit even with a single invocation.
+For details see:
+https://help.github.com/articles/creating-an-access-token-for-command-line-use/
+
+Next either set the github token as an env variable
+`GITHUB_ACCESS_TOKEN` or alternatively invoke the script with
+`--token` switch.
+
+Example:
+
+ ceph-release-notes -r tags/v0.87..origin/giant \
+ $(git rev-parse --show-toplevel)
+
+"""
+
+from __future__ import print_function
+import argparse
+import github
+import os
+import re
+import sys
+import requests
+import time
+
+from git import Repo
+
+
+fixes_re = re.compile(r"Fixes\:? #(\d+)")
+reviewed_by_re = re.compile(r"Rev(.*)By", re.IGNORECASE)
+# labels is the list of relevant labels defined for github.com/ceph/ceph
+labels = {'bluestore', 'build/ops', 'cephfs', 'common', 'core', 'mgr',
+ 'mon', 'performance', 'pybind', 'rdma', 'rgw', 'rbd', 'tests',
+ 'tools'}
+merge_re = re.compile("Merge (pull request|PR) #(\d+).*")
+# prefixes is the list of commit description prefixes we recognize
+prefixes = ['bluestore', 'build/ops', 'cephfs', 'cephx', 'cli', 'cmake',
+ 'common', 'core', 'crush', 'doc', 'fs', 'librados', 'librbd',
+ 'log', 'mds', 'mgr', 'mon', 'msg', 'objecter', 'osd', 'pybind',
+ 'rbd', 'rbd-mirror', 'rbd-nbd', 'rgw', 'tests', 'tools']
+signed_off_re = re.compile("Signed-off-by: (.+) <")
+tracker_re = re.compile("http://tracker.ceph.com/issues/(\d+)")
+rst_link_re = re.compile(r"([a-zA-Z0-9])_(\W)")
+release_re = re.compile(r"^(nautilus|octopus|pacific|quincy):\s*")
+
+tracker_uri = "http://tracker.ceph.com/issues/{0}.json"
+
+
+def get_original_issue(issue, verbose):
+ r = requests.get(tracker_uri.format(issue),
+ params={"include": "relations"}).json()
+
+ # looking up for the original issue only makes sense
+ # when dealing with an issue in the Backport tracker
+ if r["issue"]["tracker"]["name"] != "Backport":
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " is from the tracker " + r["issue"]["tracker"]["name"] +
+ ", do not look for the original issue")
+ return issue
+
+ # if a Backport issue does not have a relation, keep it
+ if "relations" not in r["issue"]:
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " has no relations, do not look for the original issue")
+ return issue
+
+ copied_to = [
+ str(i['issue_id']) for i in r["issue"]["relations"]
+ if i["relation_type"] == "copied_to"
+ ]
+ if copied_to:
+ if len(copied_to) > 1:
+ if verbose:
+ print ("ERROR: http://tracker.ceph.com/issues/" + issue +
+ " has more than one Copied To relation")
+ return issue
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " is the backport of http://tracker.ceph.com/issues/" +
+ copied_to[0])
+ return copied_to[0]
+ else:
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " has no copied_to relations; do not look for the" +
+ " original issue")
+ return issue
+
+
+def split_component(title, gh, number):
+ title_re = '(' + '|'.join(prefixes) + ')(:.*)'
+ match = re.match(title_re, title)
+ if match:
+ return match.group(1)+match.group(2)
+ else:
+ issue = gh.repos("ceph")("ceph").issues(number).get()
+ issue_labels = {it['name'] for it in issue['labels']}
+ if 'documentation' in issue_labels:
+ return 'doc: ' + title
+ item = set(prefixes).intersection(issue_labels)
+ if item:
+ return ",".join(sorted(item)) + ': ' + title
+ else:
+ return 'UNKNOWN: ' + title
+
+def _title_message(commit, pr, strict):
+ title = pr['title']
+ message_lines = commit.message.split('\n')
+ if strict or len(message_lines) < 1:
+ return (title, None)
+ lines = []
+ for line in message_lines[1:]:
+ if reviewed_by_re.match(line):
+ continue
+ line = line.strip()
+ if line:
+ lines.append(line)
+ if len(lines) == 0:
+ return (title, None)
+ duplicates_pr_title = lines[0] == pr['title'].strip()
+ if duplicates_pr_title:
+ return (title, None)
+ assert len(lines) > 0, "missing message content"
+ if len(lines) == 1:
+ # assume that a single line means the intention is to
+ # re-write the PR title
+ return (lines[0], None)
+ elif len(lines) < 3 and 'refs/pull' in lines[0]:
+ # assume the intent was rewriting the title and something like
+ # ptl-tool was used to generate the merge message
+ return (lines[1], None)
+ message = " " + "\n ".join(lines)
+ return (title, message)
+
+def make_release_notes(gh, repo, ref, plaintext, html, markdown, verbose, strict, use_tags, include_pr_messages):
+
+ issue2prs = {}
+ pr2issues = {}
+ pr2info = {}
+
+ for commit in repo.iter_commits(ref, merges=True):
+ merge = merge_re.match(commit.summary)
+ if not merge:
+ continue
+ number = merge.group(2)
+ print ("Considering PR#" + number)
+ # do not pick up ceph/ceph-qa-suite.git PRs
+ if int(number) < 1311:
+ print ("Ignoring low-numbered PR, probably picked up from"
+ " ceph/ceph-qa-suite.git")
+ continue
+
+ attempts = 0
+ retries = 30
+ while attempts < retries:
+ try:
+ pr = gh.repos("ceph")("ceph").pulls(number).get()
+ break
+ except Exception:
+ if attempts < retries:
+ attempts += 1
+ sleep_time = 2 * attempts
+ print(f"Failed to fetch PR {number}, sleeping for {sleep_time} seconds")
+ time.sleep(sleep_time)
+ else:
+ print(f"Could not fetch PR {number} in {retries} tries.")
+ raise
+ (title, message) = _title_message(commit, pr, strict)
+ issues = []
+ if pr['body']:
+ issues = fixes_re.findall(pr['body']) + tracker_re.findall(
+ pr['body']
+ )
+
+ authors = {}
+ for c in repo.iter_commits(
+ "{sha1}^1..{sha1}^2".format(sha1=commit.hexsha)
+ ):
+ for author in re.findall(
+ "Signed-off-by:\s*(.*?)\s*<", c.message
+ ):
+ authors[author] = 1
+ issues.extend(fixes_re.findall(c.message) +
+ tracker_re.findall(c.message))
+ if authors:
+ author = ", ".join(authors.keys())
+ else:
+ author = commit.parents[-1].author.name
+
+ if strict and not issues:
+ print ("ERROR: https://github.com/ceph/ceph/pull/" +
+ str(number) + " has no associated issue")
+ continue
+
+ if strict:
+ title_re = (
+ '^(?:nautilus|octopus|pacific|quincy):\s+(' +
+ '|'.join(prefixes) +
+ ')(:.*)'
+ )
+ match = re.match(title_re, title)
+ if not match:
+ print ("ERROR: https://github.com/ceph/ceph/pull/" +
+ str(number) + " title " + title +
+ " does not match " + title_re)
+ else:
+ title = match.group(1) + match.group(2)
+ if use_tags:
+ title = split_component(title, gh, number)
+
+ title = title.strip(' \t\n\r\f\v\.\,\;\:\-\=')
+ # escape asterisks, which is used by reStructuredTextrst for inline
+ # emphasis
+ title = title.replace('*', '\*')
+ # and escape the underscores for noting a link
+ title = rst_link_re.sub(r'\1\_\2', title)
+ # remove release prefix for backports
+ title = release_re.sub('', title)
+ pr2info[number] = (author, title, message)
+
+ for issue in set(issues):
+ if strict:
+ issue = get_original_issue(issue, verbose)
+ issue2prs.setdefault(issue, set([])).add(number)
+ pr2issues.setdefault(number, set([])).add(issue)
+ sys.stdout.write('.')
+
+ print (" done collecting merges.")
+
+ if strict:
+ for (issue, prs) in issue2prs.items():
+ if len(prs) > 1:
+ print (">>>>>>> " + str(len(prs)) + " pr for issue " +
+ issue + " " + str(prs))
+
+ for (pr, (author, title, message)) in sorted(
+ pr2info.items(), key=lambda title: title[1][1].lower()
+ ):
+ if pr in pr2issues:
+ if plaintext:
+ issues = map(lambda issue: '#' + str(issue), pr2issues[pr])
+ elif html:
+ issues = map(lambda issue: (
+ '<a href="http://tracker.ceph.com/issues/{issue}">issue#{issue}</a>'
+ ).format(issue=issue), pr2issues[pr]
+ )
+ elif markdown:
+ issues = map(lambda issue: (
+ '[issue#{issue}](http://tracker.ceph.com/issues/{issue})'
+ ).format(issue=issue), pr2issues[pr]
+ )
+ else:
+ issues = map(lambda issue: (
+ '`issue#{issue} <http://tracker.ceph.com/issues/{issue}>`_'
+ ).format(issue=issue), pr2issues[pr]
+ )
+ issues = ", ".join(issues) + ", "
+ else:
+ issues = ''
+ if plaintext:
+ print ("* {title} ({issues}{author})".format(
+ title=title,
+ issues=issues,
+ author=author
+ )
+ )
+ elif html:
+ print (
+ (
+ "<li><p>{title} ({issues}<a href=\""
+ "https://github.com/ceph/ceph/pull/{pr}\""
+ ">pr#{pr}</a>, {author})</p></li>"
+ ).format(
+ title=title,
+ issues=issues,
+ author=author, pr=pr
+ )
+ )
+ elif markdown:
+ markdown_title = title.replace('_', '\_').replace('.', '<span></span>.')
+ print ("- {title} ({issues}[pr#{pr}](https://github.com/ceph/ceph/pull/{pr}), {author})\n".format(
+ title=markdown_title,
+ issues=issues,
+ author=author, pr=pr
+ )
+ )
+ else:
+ print (
+ (
+ "* {title} ({issues}`pr#{pr} <"
+ "https://github.com/ceph/ceph/pull/{pr}"
+ ">`_, {author})"
+ ).format(
+ title=title,
+ issues=issues,
+ author=author, pr=pr
+ )
+ )
+ if include_pr_messages and message:
+ print (message)
+
+
+if __name__ == "__main__":
+ desc = '''
+ Make ceph release notes for a given revision. Eg usage:
+
+ $ ceph-release-notes -r tags/v0.87..origin/giant \
+ $(git rev-parse --show-toplevel)
+
+ It is recommended to set the github env. token in order to avoid
+ hitting the api rate limits.
+ '''
+
+ parser = argparse.ArgumentParser(
+ description=desc,
+ formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ parser.add_argument("--rev", "-r",
+ help="git revision range for creating release notes")
+ parser.add_argument("--text", "-t",
+ action='store_true', default=None,
+ help="output plain text only, no links")
+ parser.add_argument("--html",
+ action='store_true', default=None,
+ help="output html format for (old wordpress) website blog")
+ parser.add_argument("--markdown",
+ action='store_true', default=None,
+ help="output markdown format for new ceph.io blog")
+ parser.add_argument("--verbose", "-v",
+ action='store_true', default=None,
+ help="verbose")
+ parser.add_argument("--strict",
+ action='store_true', default=None,
+ help="strict, recommended only for backport releases")
+ parser.add_argument("repo", metavar="repo",
+ help="path to ceph git repo")
+ parser.add_argument(
+ "--token",
+ default=os.getenv("GITHUB_ACCESS_TOKEN"),
+ help="Github Access Token ($GITHUB_ACCESS_TOKEN otherwise)",
+ )
+ parser.add_argument("--use-tags", default=False,
+ help="Use github tags to guess the component")
+ parser.add_argument("--include-pr-messages", default=False, action='store_true',
+ help="Include full PR message in addition to PR title, if available")
+
+ args = parser.parse_args()
+ gh = github.GitHub(
+ access_token=args.token)
+
+ make_release_notes(
+ gh,
+ Repo(args.repo),
+ args.rev,
+ args.text,
+ args.html,
+ args.markdown,
+ args.verbose,
+ args.strict,
+ args.use_tags,
+ args.include_pr_messages
+ )
diff --git a/src/script/ceph_dump_log.py b/src/script/ceph_dump_log.py
new file mode 100644
index 000000000..5fb947d83
--- /dev/null
+++ b/src/script/ceph_dump_log.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2018 Red Hat Inc.
+#
+# Authors: Sergio Lopez Pascual <slopezpa@redhat.com>
+# Brad Hubbard <bhubbard@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+# By default ceph daemons and clients maintain a list of log_max_recent (default
+# 10000) log entries at a high debug level. This script will attempt to dump out
+# that log from a ceph::log::Log* passed to the ceph-dump-log function or, if no
+# object is passed, default to the globally available 'g_ceph_context->_log'
+# (thanks Kefu). This pointer may be obtained via the _log member of a
+# CephContext object (i.e. *cct->_log) from any thread that contains such a
+# CephContext. Normally, you will find a thread waiting in
+# ceph::logging::Log::entry and the 'this' pointer from such a frame can also be
+# passed to ceph-dump-log.
+
+import gdb
+from datetime import datetime
+
+try:
+ # Python 2 forward compatibility
+ range = xrange
+except NameError:
+ pass
+
+class CephDumpLog(gdb.Command):
+ def __init__(self):
+ super(CephDumpLog, self).__init__(
+ 'ceph-dump-log',
+ gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL, False)
+
+ def invoke(self, args, from_tty):
+ arg_list = gdb.string_to_argv(args)
+ if len(arg_list) < 1:
+ log = gdb.parse_and_eval('g_ceph_context->_log')
+ else:
+ log = gdb.parse_and_eval(arg_list[0])
+
+ luminous_mimic = None
+
+ try:
+ entry = log['m_recent']['m_head']
+ size = log['m_recent']['m_len']
+ luminous_mimic = True
+ except gdb.error:
+ entry = log['m_recent']['m_first']
+ size = log['m_recent']['m_size']
+ end = log['m_recent']['m_end']
+ buff = log['m_recent']['m_buff']
+
+ for i in range(size):
+ if luminous_mimic:
+ try: # early luminous
+ stamp = int(str(entry['m_stamp']['tv']['tv_sec']) + str(entry['m_stamp']['tv']['tv_nsec']))
+ logline = entry['m_streambuf']['m_buf']
+ strlen = int(entry['m_streambuf']['m_buf_len'])
+ except gdb.error: # mimic
+ stamp = entry['m_stamp']['__d']['__r']['count']
+ pptr = entry['m_data']['m_pptr']
+ logline = entry['m_data']['m_buf']
+ strlen = int(pptr - logline)
+ else:
+ stamp = entry['m_stamp']['__d']['__r']['count']
+ logline = entry['str']['m_holder']['m_start']
+ strlen = int(entry['str']['m_holder']['m_size'])
+ thread = entry['m_thread']
+ prio = entry['m_prio']
+ subsys = entry['m_subsys']
+ dt = datetime.fromtimestamp(int(stamp) / 1e9) # Giving up some precision
+ gdb.write(dt.strftime('%Y-%m-%d %H:%M:%S.%f '))
+ gdb.write("thread: {0:#x} priority: {1} subsystem: {2} ".
+ format(int(thread), prio, subsys))
+ gdb.write(logline.string("ascii", errors='ignore')[0:strlen])
+ gdb.write("\n")
+ if luminous_mimic:
+ entry = entry['m_next'].dereference()
+ else:
+ entry = entry + 1
+ if entry >= end:
+ entry = buff
+
+CephDumpLog()
diff --git a/src/script/check_commands.sh b/src/script/check_commands.sh
new file mode 100755
index 000000000..589c3d6a6
--- /dev/null
+++ b/src/script/check_commands.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+git grep -e COMMAND\( -e COMMAND_WITH_FLAG\( | grep -o "(\"[a-zA-Z ]*\"" | grep -o "[a-zA-Z ]*" | sort | uniq > commands.txt
+missing_test=false
+good_tests=""
+bad_tests=""
+while read cmd; do
+ if git grep -q "$cmd" -- src/test qa/; then
+ good_tests="$good_tests '$cmd'"
+ else
+ echo "'$cmd' has no apparent tests"
+ missing_test=true
+ bad_tests="$bad_tests '$cmd'"
+ fi
+done < commands.txt
+
+if [ "$missing_test" == true ]; then
+ echo "Missing tests!" $bad_tests
+ exit 1;
+fi
diff --git a/src/script/cmake_uninstall.cmake.in b/src/script/cmake_uninstall.cmake.in
new file mode 100644
index 000000000..4c07dc7bd
--- /dev/null
+++ b/src/script/cmake_uninstall.cmake.in
@@ -0,0 +1,21 @@
+if(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt")
+ message(FATAL_ERROR "Cannot find install manifest: @CMAKE_BINARY_DIR@/install_manifest.txt")
+endif(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt")
+
+file(READ "@CMAKE_BINARY_DIR@/install_manifest.txt" files)
+string(REGEX REPLACE "\n" ";" files "${files}")
+foreach(file ${files})
+ message(STATUS "Uninstalling $ENV{DESTDIR}${file}")
+ if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ exec_program(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ if(NOT "${rm_retval}" STREQUAL 0)
+ message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}")
+ endif(NOT "${rm_retval}" STREQUAL 0)
+ else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ message(STATUS "File $ENV{DESTDIR}${file} does not exist.")
+ endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+endforeach(file)
diff --git a/src/script/cpatch b/src/script/cpatch
new file mode 100755
index 000000000..e63b765b7
--- /dev/null
+++ b/src/script/cpatch
@@ -0,0 +1,246 @@
+#!/bin/bash -e
+
+SCRIPT=$(readlink -f "$0")
+SCRIPTPATH=$(dirname "$SCRIPT")
+BUILDPATH=$(pwd)
+
+if [ ! -e Makefile ] && [ ! -e build.ninja ] || [ ! -e ../do_cmake.sh ]; then
+ echo "must run from cmake build dir"
+ exit 1
+fi
+
+base="quay.ceph.io/ceph-ci/ceph:main"
+target=""
+push=0
+strip=1
+
+py=0
+dashboard=0
+core=0
+cephfs=0
+rgw=0
+rbd=0
+all=1
+asroot=""
+
+usage() {
+ echo "usage: $SCRIPT [options]"
+ echo " --base <image> base container image [$base]"
+ echo " --target <image> target image (required)"
+ echo " --push push when done"
+ echo " --strip strip binaries"
+ echo " --root-build build image as root"
+ echo
+ echo " --py python components (python-common, mgr)"
+ echo " --dashboard dashboard"
+ echo " --core mon, mgr, osd, mds, bins and libraries"
+ echo " --rgw radosgw, radosgw-admin"
+}
+
+while [ -n "$1" ]; do
+ case $1 in
+ --base)
+ shift
+ base="$1"
+ ;;
+ --target | -t)
+ shift
+ target="$1"
+ ;;
+ --nostrip)
+ strip=0
+ ;;
+ --root-build)
+ asroot="sudo"
+ ;;
+ -h | --help)
+ usage
+ exit 0
+ ;;
+ --push)
+ push=1
+ ;;
+
+ --py)
+ py=1
+ all=0
+ ;;
+ --dashboard)
+ py=1
+ dashboard=1
+ all=0
+ ;;
+ --core)
+ core=1
+ all=0
+ rbd=1
+ cephfs=1
+ ;;
+ --rgw)
+ rgw=1
+ all=0
+ ;;
+
+ *)
+ echo "unrecognized option $1"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+if [ -z "$target" ]; then
+ echo "must specify --target <image>"
+ exit 1
+fi
+
+if [ -x /usr/bin/podman ]; then
+ runtime="podman"
+elif [ -x /usr/bin/docker ]; then
+ runtime="docker"
+else
+ echo "cannot find podman or docker in PATH"
+ exit 1
+fi
+
+TMP="$BUILDPATH/tmp.cpatch"
+if [ -d $TMP ]; then rm -rf $TMP ; fi
+mkdir -p $TMP
+
+if [ $all -eq 1 ]; then
+ echo "consider --py, --core, and/or --rgw for an abbreviated (faster) build."
+fi
+
+dockerfile="FROM $base"$'\n'
+
+if [ $py -eq 1 ] || [ $all -eq 1 ]; then
+ pushd ../src/pybind/mgr > /dev/null
+ find ./ -name "*.pyc" -exec rm -f {} \;
+ if [ $dashboard -eq 1 ] || [ $all -eq 1 ]; then
+ echo "py + dashboard"
+ exclude=""
+ else
+ echo "py"
+ # Exclude node_modules because it's the huge sources in
+ # dashboard/frontend
+ exclude="--exclude=node_modules"
+ fi
+ tar $exclude --exclude=tests --exclude-backups -cf $TMP/mgr_plugins.tar *
+ popd > /dev/null
+ dockerfile+=$'ADD mgr_plugins.tar /usr/share/ceph/mgr\n'
+
+ pushd ../src/python-common > /dev/null
+ find ./ -name "*.pyc" -exec rm -f {} \;
+ # Exclude node_modules because it's the huge sources in dashboard/frontend
+ tar --exclude=node_modules --exclude=tests --exclude-backups -cf $TMP/python_common.tar *
+ popd > /dev/null
+ dockerfile+=$'ADD python_common.tar /usr/lib/python3.8/site-packages\n'
+
+ pushd lib/cython_modules/lib.3
+ CYTHONLIBS="*.cpython-3*.so"
+ mkdir -p $TMP/cythonlib
+ for f in $CYTHONLIBS; do cp $f $TMP/cythonlib ; done
+ [ $strip -eq 1 ] && strip $TMP/cythonlib/*
+ popd > /dev/null
+ dockerfile+=$'ADD cythonlib /usr/lib64/python3.8/site-packages\n'
+
+ # cephadm
+ pushd ../src/cephadm > /dev/null
+ build.sh $TMP/cephadm
+ dockerfile+=$'ADD cephadm /usr/sbin/cephadm\n'
+ popd > /dev/null
+fi
+
+if [ $core -eq 1 ] || [ $all -eq 1 ]; then
+ # binaries are annoying because the ceph version is embedded all over
+ # the place, so we have to include everything but the kitchen sink.
+ echo "core"
+
+ BINS="ceph-mgr ceph-mon ceph-osd rados"
+ mkdir -p $TMP/bin
+ for f in $BINS; do cp bin/$f $TMP/bin ; done
+ [ $strip -eq 1 ] && strip $TMP/bin/*
+ dockerfile+=$'ADD bin /usr/bin\n'
+
+ # We need ceph-common to support the binaries
+ # We need librados/rbd to support mgr modules
+ # that import the python bindings
+ LIBS="libceph-common.so.2 libceph-common.so librados.so.2 librados.so librados.so.2.0.0"
+ mkdir -p $TMP/lib
+ for f in $LIBS; do cp lib/$f $TMP/lib ; done
+ [ $strip -eq 1 ] && strip $TMP/lib/*
+ dockerfile+=$'ADD lib /usr/lib64\n'
+
+ ECLIBS="libec_*.so*"
+ mkdir -p $TMP/eclib
+ for f in lib/$ECLIBS; do cp $f $TMP/eclib ; done
+ [ $strip -eq 1 ] && strip $TMP/eclib/*
+ dockerfile+=$'ADD eclib /usr/lib64/ceph/erasure-code\n'
+
+ CLSLIBS="libcls_*.so*"
+ mkdir -p $TMP/clslib
+ for f in lib/$CLSLIBS; do cp $f $TMP/clslib ; done
+ [ $strip -eq 1 ] && strip $TMP/clslib/*
+ dockerfile+=$'ADD clslib /usr/lib64/rados-classes\n'
+
+ # by default locally built binaries assume /usr/local
+ dockerfile+=$'RUN rm -rf /usr/local/lib64 ; ln -s /usr/lib64 /usr/local ; ln -s /usr/share/ceph /usr/local/share\n'
+fi
+
+if [ $rgw -eq 1 ] || [ $all -eq 1 ]; then
+ echo "rgw"
+ RGW="radosgw radosgw-admin"
+ mkdir -p $TMP/rgw
+ for f in $RGW; do cp bin/$f $TMP/rgw ; done
+ [ $strip -eq 1 ] && strip $TMP/rgw/*
+ dockerfile+=$'ADD rgw /usr/bin\n'
+
+ RGWLIBS="libradosgw.so*"
+ mkdir -p $TMP/rgwlib
+ for f in lib/$RGWLIBS; do cp $f $TMP/rgwlib ; done
+ [ $strip -eq 1 ] && strip $TMP/rgwlib/*
+ dockerfile+=$'ADD rgwlib /usr/lib64\n'
+fi
+
+if [ $cephfs -eq 1 ] || [ $all -eq 1 ]; then
+ echo "cephfs"
+ FS="ceph-mds"
+ mkdir -p $TMP/fs
+ for f in $FS; do cp bin/$f $TMP/fs ; done
+ [ $strip -eq 1 ] && strip $TMP/fs/*
+ dockerfile+=$'ADD fs /usr/bin\n'
+
+ FSLIBS="libcephfs.so*"
+ mkdir -p $TMP/fslib
+ for f in lib/$FSLIBS; do cp $f $TMP/fslib ; done
+ [ $strip -eq 1 ] && strip $TMP/fslib/*
+ dockerfile+=$'ADD fslib /usr/lib64\n'
+fi
+
+if [ $rbd -eq 1 ] || [ $all -eq 1 ]; then
+ echo "rbd"
+ RBD="rbd rbd-mirror"
+ mkdir -p $TMP/rbd
+ for f in $RBD; do cp bin/$f $TMP/rbd ; done
+ [ $strip -eq 1 ] && strip $TMP/rbd/*
+ dockerfile+=$'ADD rbd /usr/bin\n'
+
+ RBDLIBS="librbd.so*"
+ mkdir -p $TMP/rbdlib
+ for f in lib/$RBDLIBS; do cp $f $TMP/rbdlib ; done
+ [ $strip -eq 1 ] && strip $TMP/rbdlib/*
+ dockerfile+=$'ADD rbdlib /usr/lib64\n'
+fi
+
+echo "build"
+pushd $TMP > /dev/null
+echo "$dockerfile" > Dockerfile
+$asroot $runtime build -t $target .
+popd > /dev/null
+
+if [ $push -eq 1 ]; then
+ echo "push"
+ $asroot $runtime push $target
+fi
+
+rm -r $TMP
diff --git a/src/script/crash_bdev.sh b/src/script/crash_bdev.sh
new file mode 100755
index 000000000..da31b69b1
--- /dev/null
+++ b/src/script/crash_bdev.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+set -ex
+
+while true; do
+ ./ceph daemon osd.0 config set bdev_inject_crash 2
+ sleep 5
+ tail -n 1000 out/osd.0.log | grep bdev_inject_crash || exit 1
+ ./init-ceph start osd.0
+ sleep 20
+done
diff --git a/src/script/credits.sh b/src/script/credits.sh
new file mode 100755
index 000000000..f7214d9f1
--- /dev/null
+++ b/src/script/credits.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+range="$1"
+TMP=/tmp/credits
+declare -A mail2author
+declare -A mail2organization
+remap="s/'/ /g"
+git log --pretty='%ae %aN <%aE>' $range | sed -e "$remap" | sort -u > $TMP
+while read mail who ; do
+ author=$(echo $who | git -c mailmap.file=.peoplemap check-mailmap --stdin)
+ mail2author[$mail]="$author"
+ organization=$(echo $who | git -c mailmap.file=.organizationmap check-mailmap --stdin)
+ mail2organization[$mail]="$organization"
+done < $TMP
+declare -A author2lines
+declare -A organization2lines
+git log --no-merges --pretty='%ae' $range | sed -e "$remap" | sort -u > $TMP
+while read mail ; do
+ count=$(git log --numstat --author="$mail" --pretty='%h' $range |
+ egrep -v 'package-lock\.json|\.xlf' | # generated files that should be excluded from line counting
+ perl -e 'while(<STDIN>) { if(/(\d+)\t(\d+)/) { $added += $1; $deleted += $2 } }; print $added + $deleted;')
+ (( author2lines["${mail2author[$mail]}"] += $count ))
+ (( organization2lines["${mail2organization[$mail]}"] += $count ))
+done < $TMP
+echo
+echo "Number of lines added and removed, by authors"
+for author in "${!author2lines[@]}" ; do
+ printf "%6s %s\n" ${author2lines["$author"]} "$author"
+done | sort -rn | nl
+echo
+echo "Number of lines added and removed, by organization"
+for organization in "${!organization2lines[@]}" ; do
+ printf "%6s %s\n" ${organization2lines["$organization"]} "$organization"
+done | sort -rn | nl
+echo
+echo "Commits, by authors"
+git log --no-merges --pretty='%aN <%aE>' $range | git -c mailmap.file=.peoplemap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
+echo
+echo "Commits, by organizations"
+git log --no-merges --pretty='%aN <%aE>' $range | git -c mailmap.file=.organizationmap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
+echo
+echo "Reviews, by authors (one review spans multiple commits)"
+git log --pretty=%b $range | perl -n -e 'print "$_\n" if(s/^\s*Reviewed-by:\s*(.*<.*>)\s*$/\1/i)' | git check-mailmap --stdin | git -c mailmap.file=.peoplemap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
+echo
+echo "Reviews, by organizations (one review spans multiple commits)"
+git log --pretty=%b $range | perl -n -e 'print "$_\n" if(s/^\s*Reviewed-by:\s*(.*<.*>)\s*$/\1/i)' | git check-mailmap --stdin | git -c mailmap.file=.organizationmap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
diff --git a/src/script/extend_stretch_cluster.sh b/src/script/extend_stretch_cluster.sh
new file mode 100755
index 000000000..f5886368f
--- /dev/null
+++ b/src/script/extend_stretch_cluster.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -ex
+
+../src/script/add_osd.sh 4 'host=host1-1 datacenter=site1 root=default'
+../src/script/add_osd.sh 5 'host=host1-2 datacenter=site1 root=default'
+../src/script/add_osd.sh 6 'host=host2-1 datacenter=site2 root=default'
+../src/script/add_osd.sh 7 'host=host2-2 datacenter=site2 root=default'
diff --git a/src/script/find_dups_in_pg_log.sh b/src/script/find_dups_in_pg_log.sh
new file mode 100755
index 000000000..b4d1afb63
--- /dev/null
+++ b/src/script/find_dups_in_pg_log.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# pipe output of grep for objectname in osd logs to me
+#
+# e.g.,
+#
+# zgrep smithi01817880-936 remote/*/log/*osd* | ~/src/ceph/src/script/find_dups_in_pg_log.sh
+#
+# or
+#
+# zcat remote/*/log/*osd* | ~/src/ceph/src/script/find_dups_in_pg_log.sh
+#
+# output will be any requests that appear in the pg log >1 time (along with
+# their count)
+
+#grep append_log | sort -k 2 | sed 's/.*append_log//' | awk '{print $3 " " $8}' | sort | uniq | awk '{print $2}' | sort | uniq -c | grep -v ' 1 '
+
+grep append_log | grep ' by ' | \
+ perl -pe 's/(.*) \[([^ ]*) (.*) by ([^ ]+) (.*)/$2 $4/' | \
+ sort | uniq | \
+ awk '{print $2}' | \
+ sort | uniq -c | grep -v ' 1 '
diff --git a/src/script/fix_modeline.pl b/src/script/fix_modeline.pl
new file mode 100755
index 000000000..8eadde9b5
--- /dev/null
+++ b/src/script/fix_modeline.pl
@@ -0,0 +1,29 @@
+#!/usr/bin/perl
+
+use strict;
+my $fn = shift @ARGV;
+my $old = `cat $fn`;
+my $header = `cat doc/modeline.txt`;
+
+# strip existing modeline
+my $new = $old;
+$new =~ s/^\/\/ \-\*\- ([^\n]+) \-\*\-([^\n]*)\n//s; # emacs
+$new =~ s/^\/\/ vim: ([^\n]*)\n//s; # vim;
+$new =~ s/^\/\/ \-\*\- ([^\n]+) \-\*\-([^\n]*)\n//s; # emacs
+$new =~ s/^\/\/ vim: ([^\n]*)\n//s; # vim;
+$new =~ s/^\/\/ \-\*\- ([^\n]+) \-\*\-([^\n]*)\n//s; # emacs
+$new =~ s/^\/\/ vim: ([^\n]*)\n//s; # vim;
+
+# add correct header
+$new = $header . $new;
+
+if ($new ne $old) {
+ print "$fn\n";
+ open(O, ">$fn.new");
+ print O $new;
+ close O;
+ system "diff $fn $fn.new";
+ rename "$fn.new", $fn;
+ #unlink "$fn.new";
+}
+
diff --git a/src/script/gen-corpus.sh b/src/script/gen-corpus.sh
new file mode 100755
index 000000000..8550c2080
--- /dev/null
+++ b/src/script/gen-corpus.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+# -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
+# vim: softtabstop=4 shiftwidth=4 expandtab
+
+set -ex
+
+function get_jobs() {
+ local jobs=$(nproc)
+ if [ $jobs -ge 8 ] ; then
+ echo 8
+ else
+ echo $jobs
+ fi
+}
+
+[ -z "$BUILD_DIR" ] && BUILD_DIR=build
+
+function build() {
+ local encode_dump_path=$1
+ shift
+
+ ./do_cmake.sh \
+ -DWITH_MGR_DASHBOARD_FRONTEND=OFF \
+ -DWITH_DPDK=OFF \
+ -DWITH_SPDK=OFF \
+ -DCMAKE_CXX_FLAGS="-DENCODE_DUMP_PATH=${encode_dump_path}"
+ cd ${BUILD_DIR}
+ cmake --build . -- -j$(get_jobs)
+}
+
+function run() {
+ MON=3 MGR=2 OSD=3 MDS=3 RGW=1 ../src/vstart.sh -n -x
+
+ local old_path="$PATH"
+ export PATH="$PWD/bin:$PATH"
+ export CEPH_CONF="$PWD/ceph.conf"
+ ceph osd pool create mypool
+ rados -p mypool bench 10 write -b 123
+ ceph osd out 0
+ ceph osd in 0
+ init-ceph restart osd.1
+ for f in ../qa/workunits/cls/*.sh ; do
+ $f
+ done
+ ../qa/workunits/rados/test.sh
+ ceph_test_librbd
+ ceph_test_libcephfs
+ init-ceph restart mds.a
+ ../qa/workunits/rgw/run-s3tests.sh
+ PATH="$old_path"
+
+ ../src/stop.sh
+}
+
+function import_corpus() {
+ local encode_dump_path=$1
+ shift
+ local version=$1
+ shift
+
+ # import the corpus
+ ../src/test/encoding/import.sh \
+ ${encode_dump_path} \
+ ${version} \
+ ../ceph-object-corpus/archive
+ ../src/test/encoding/import-generated.sh \
+ ../ceph-object-corpus/archive
+ # prune it
+ pushd ../ceph-object-corpus
+ bin/prune-archive.sh
+ popd
+}
+
+function verify() {
+ ctest -R readable.sh
+}
+
+function commit_and_push() {
+ local version=$1
+ shift
+
+ pushd ../ceph-object-corpus
+ git checkout -b wip-${version}
+ git add archive/${version}
+ git commit --signoff --message=${version}
+ git remote add cc git@github.com:ceph/ceph-object-corpus.git
+ git push cc wip-${version}
+ popd
+}
+
+encode_dump_path=$(mktemp -d)
+build $encode_dump_path
+echo "generating corpus objects.."
+run
+version=$(bin/ceph-dencoder version)
+echo "importing corpus. it may take over 30 minutes.."
+import_corpus $encode_dump_path $version
+echo "verifying imported corpus.."
+verify
+echo "all good, pushing to remote repo.."
+commit_and_push ${version}
+rm -rf encode_dump_path
diff --git a/src/script/kcon_all.sh b/src/script/kcon_all.sh
new file mode 100755
index 000000000..c3056f9d9
--- /dev/null
+++ b/src/script/kcon_all.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -x
+
+p() {
+ echo "$*" > /sys/kernel/debug/dynamic_debug/control
+}
+
+echo 9 > /proc/sysrq-trigger
+p 'module ceph +p'
+p 'module libceph +p'
+p 'module rbd +p'
diff --git a/src/script/kcon_most.sh b/src/script/kcon_most.sh
new file mode 100755
index 000000000..e62db2ac8
--- /dev/null
+++ b/src/script/kcon_most.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+
+p() {
+ echo "$*" > /sys/kernel/debug/dynamic_debug/control
+}
+
+echo 9 > /proc/sysrq-trigger
+p 'module ceph +p'
+p 'module libceph +p'
+p 'module rbd +p'
+p 'file net/ceph/messenger.c -p'
+p 'file' `grep -- --- /sys/kernel/debug/dynamic_debug/control | grep ceph | awk '{print $1}' | sed 's/:/ line /'` '+p'
+p 'file' `grep -- === /sys/kernel/debug/dynamic_debug/control | grep ceph | awk '{print $1}' | sed 's/:/ line /'` '+p'
diff --git a/src/script/kubejacker/Dockerfile b/src/script/kubejacker/Dockerfile
new file mode 100644
index 000000000..2b111be76
--- /dev/null
+++ b/src/script/kubejacker/Dockerfile
@@ -0,0 +1,15 @@
+FROM ceph/daemon-base:latest-master
+# for openSUSE, use:
+# FROM registry.opensuse.org/home/ssebastianwagner/rook-ceph/images/opensuse/leap:latest
+
+
+#ADD bin.tar.gz /usr/bin/
+#ADD lib.tar.gz /usr/lib64/
+
+# Assume developer is using default paths (i.e. /usr/local), so
+# build binaries will be looking for libs there.
+#ADD eclib.tar.gz /usr/local/lib64/ceph/erasure-code/
+#ADD clslib.tar.gz /usr/local/lib64/rados-classes/
+
+ADD python_common.tar.gz /usr/share/ceph/python_common
+ADD mgr_plugins.tar.gz /usr/share/ceph/mgr
diff --git a/src/script/kubejacker/README.rst b/src/script/kubejacker/README.rst
new file mode 100644
index 000000000..07e948a5e
--- /dev/null
+++ b/src/script/kubejacker/README.rst
@@ -0,0 +1,11 @@
+
+This tool is for developers who want to run their WIP Ceph code
+inside a Rook/kubernetes cluster without waiting for packages
+to build.
+
+It simply takes a Rook image, overlays all the binaries from your
+built Ceph tree into it, and spits out a new Rook image. This will
+only work as long as your build environment is sufficiently similar
+(in terms of dependencies etc) to the version of Ceph that was
+originally in the images you're injecting into.
+
diff --git a/src/script/kubejacker/kubejacker.sh b/src/script/kubejacker/kubejacker.sh
new file mode 100755
index 000000000..e013669e9
--- /dev/null
+++ b/src/script/kubejacker/kubejacker.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+set -x
+set -e
+SCRIPT=$(readlink -f "$0")
+SCRIPTPATH=$(dirname "$SCRIPT")
+
+# Run me from your build dir! I look for binaries in bin/, lib/ etc.
+BUILDPATH=$(pwd)
+
+
+# PREREQUISITE: a repo that you can push to. You are probably running
+# a local docker registry that your kubelet nodes also have access to.
+REPO=${REPO:-"$1"}
+
+if [ -z "$REPO" ]
+then
+ echo "ERROR: no \$REPO set!"
+ echo "Run a docker repository and set REPO to <hostname>:<port>"
+ exit -1
+fi
+
+# The output image name: this should match whatever is configured as
+# the image name in your Rook cluster CRD object.
+IMAGE=ceph/ceph
+TAG=latest
+
+# The namespace where ceph containers are running in your
+# test cluster: used for bouncing the containers.
+NAMESPACE=rook-ceph
+
+mkdir -p kubejacker
+cp $SCRIPTPATH/Dockerfile kubejacker
+
+# TODO: let user specify which daemon they're interested
+# in -- doing all bins all the time is too slow and bloaty
+#BINS="ceph-mgr ceph-mon ceph-mds ceph-osd rados radosgw-admin radosgw"
+#pushd bin
+#strip $BINS #TODO: make stripping optional
+#tar czf $BUILDPATH/kubejacker/bin.tar.gz $BINS
+#popd
+
+# We need ceph-common to support the binaries
+# We need librados/rbd to support mgr modules
+# that import the python bindings
+#LIBS="libceph-common.so.0 libceph-common.so librados.so.2 librados.so librados.so.2.0.0 librbd.so librbd.so.1 librbd.so.1.12.0"
+#pushd lib
+#strip $LIBS #TODO: make stripping optional
+#tar czf $BUILDPATH/kubejacker/lib.tar.gz $LIBS
+#popd
+
+pushd ../src/python-common/ceph
+tar --exclude=__pycache__ --exclude=tests -czf $BUILDPATH/kubejacker/python_common.tar.gz *
+popd
+
+pushd ../src/pybind/mgr
+find ./ -name "*.pyc" -exec rm -f {} \;
+# Exclude node_modules because it's the huge sources in dashboard/frontend
+tar --exclude=node_modules --exclude=tests --exclude-backups -czf $BUILDPATH/kubejacker/mgr_plugins.tar.gz *
+popd
+
+#ECLIBS="libec_*.so*"
+#pushd lib
+#strip $ECLIBS #TODO: make stripping optional
+#tar czf $BUILDPATH/kubejacker/eclib.tar.gz $ECLIBS
+#popd
+
+#CLSLIBS="libcls_*.so*"
+#pushd lib
+#strip $CLSLIBS #TODO: make stripping optional
+#tar czf $BUILDPATH/kubejacker/clslib.tar.gz $CLSLIBS
+#popd
+
+pushd kubejacker
+docker build -t $REPO/ceph/ceph:latest .
+popd
+
+# Push the image to the repository
+#docker tag $REPO/$IMAGE:$TAG $REPO/$IMAGE:latest
+docker push $REPO/ceph/ceph:latest
+#docker push $REPO/$IMAGE:$TAG
+# With a plain HTTP registry
+#podman push $REPO/ceph/ceph:latest --tls-verify=false
+
+# Finally, bounce the containers to pick up the new image
+kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mds
+kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mgr
+kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mon
diff --git a/src/script/ptl-tool.py b/src/script/ptl-tool.py
new file mode 100755
index 000000000..110fa9e3a
--- /dev/null
+++ b/src/script/ptl-tool.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python3
+
+# README:
+#
+# This tool's purpose is to make it easier to merge PRs into test branches and
+# into main. Make sure you generate a Personal access token in GitHub and
+# add it your ~/.github.key.
+#
+# Because developers often have custom names for the ceph upstream remote
+# (https://github.com/ceph/ceph.git), You will probably want to export the
+# PTL_TOOL_BASE_PATH environment variable in your shell rc files before using
+# this script:
+#
+# export PTL_TOOL_BASE_PATH=refs/remotes/<remotename>/
+#
+# and PTL_TOOL_BASE_REMOTE as the name of your Ceph upstream remote (default: "upstream"):
+#
+# export PTL_TOOL_BASE_REMOTE=<remotename>
+#
+#
+# ** Here are some basic exmples to get started: **
+#
+# Merging all PRs labeled 'wip-pdonnell-testing' into a new test branch:
+#
+# $ src/script/ptl-tool.py --pr-label wip-pdonnell-testing
+# Adding labeled PR #18805 to PR list
+# Adding labeled PR #18774 to PR list
+# Adding labeled PR #18600 to PR list
+# Will merge PRs: [18805, 18774, 18600]
+# Detaching HEAD onto base: main
+# Merging PR #18805
+# Merging PR #18774
+# Merging PR #18600
+# Checked out new branch wip-pdonnell-testing-20171108.054517
+# Created tag testing/wip-pdonnell-testing-20171108.054517
+#
+#
+# Merging all PRs labeled 'wip-pdonnell-testing' into main:
+#
+# $ src/script/ptl-tool.py --pr-label wip-pdonnell-testing --branch main
+# Adding labeled PR #18805 to PR list
+# Adding labeled PR #18774 to PR list
+# Adding labeled PR #18600 to PR list
+# Will merge PRs: [18805, 18774, 18600]
+# Detaching HEAD onto base: main
+# Merging PR #18805
+# Merging PR #18774
+# Merging PR #18600
+# Checked out branch main
+#
+# Now push to main:
+# $ git push upstream main
+# ...
+#
+#
+# Merging PR #1234567 and #2345678 into a new test branch with a testing label added to the PR:
+#
+# $ src/script/ptl-tool.py 1234567 2345678 --label wip-pdonnell-testing
+# Detaching HEAD onto base: main
+# Merging PR #1234567
+# Labeled PR #1234567 wip-pdonnell-testing
+# Merging PR #2345678
+# Labeled PR #2345678 wip-pdonnell-testing
+# Deleted old test branch wip-pdonnell-testing-20170928
+# Created branch wip-pdonnell-testing-20170928
+# Created tag testing/wip-pdonnell-testing-20170928_03
+#
+#
+# Merging PR #1234567 into main leaving a detached HEAD (i.e. do not update your repo's main branch) and do not label:
+#
+# $ src/script/ptl-tool.py --branch HEAD --merge-branch-name main 1234567
+# Detaching HEAD onto base: main
+# Merging PR #1234567
+# Leaving HEAD detached; no branch anchors your commits
+#
+# Now push to main:
+# $ git push upstream HEAD:main
+#
+#
+# Merging PR #12345678 into luminous leaving a detached HEAD (i.e. do not update your repo's main branch) and do not label:
+#
+# $ src/script/ptl-tool.py --base luminous --branch HEAD --merge-branch-name luminous 12345678
+# Detaching HEAD onto base: luminous
+# Merging PR #12345678
+# Leaving HEAD detached; no branch anchors your commits
+#
+# Now push to luminous:
+# $ git push upstream HEAD:luminous
+#
+#
+# Merging all PRs labelled 'wip-pdonnell-testing' into main leaving a detached HEAD:
+#
+# $ src/script/ptl-tool.py --base main --branch HEAD --merge-branch-name main --pr-label wip-pdonnell-testing
+# Adding labeled PR #18192 to PR list
+# Will merge PRs: [18192]
+# Detaching HEAD onto base: main
+# Merging PR #18192
+# Leaving HEAD detached; no branch anchors your commit
+
+
+# TODO
+# Look for check failures?
+# redmine issue update: http://www.redmine.org/projects/redmine/wiki/Rest_Issues
+
+import argparse
+import codecs
+import datetime
+import getpass
+import git
+import itertools
+import json
+import logging
+import os
+import re
+import requests
+import sys
+
+from os.path import expanduser
+
+log = logging.getLogger(__name__)
+log.addHandler(logging.StreamHandler())
+log.setLevel(logging.INFO)
+
+BASE_PROJECT = os.getenv("PTL_TOOL_BASE_PROJECT", "ceph")
+BASE_REPO = os.getenv("PTL_TOOL_BASE_REPO", "ceph")
+BASE_REMOTE = os.getenv("PTL_TOOL_BASE_REMOTE", "upstream")
+BASE_PATH = os.getenv("PTL_TOOL_BASE_PATH", "refs/remotes/upstream/")
+GITDIR = os.getenv("PTL_TOOL_GITDIR", ".")
+USER = os.getenv("PTL_TOOL_USER", getpass.getuser())
+with open(expanduser("~/.github.key")) as f:
+ PASSWORD = f.read().strip()
+TEST_BRANCH = os.getenv("PTL_TOOL_TEST_BRANCH", "wip-{user}-testing-%Y%m%d.%H%M%S")
+
+SPECIAL_BRANCHES = ('main', 'luminous', 'jewel', 'HEAD')
+
+INDICATIONS = [
+ re.compile("(Reviewed-by: .+ <[\w@.-]+>)", re.IGNORECASE),
+ re.compile("(Acked-by: .+ <[\w@.-]+>)", re.IGNORECASE),
+ re.compile("(Tested-by: .+ <[\w@.-]+>)", re.IGNORECASE),
+]
+
+# find containing git dir
+git_dir = GITDIR
+max_levels = 6
+while not os.path.exists(git_dir + '/.git'):
+ git_dir += '/..'
+ max_levels -= 1
+ if max_levels < 0:
+ break
+
+CONTRIBUTORS = {}
+NEW_CONTRIBUTORS = {}
+with codecs.open(git_dir + "/.githubmap", encoding='utf-8') as f:
+ comment = re.compile("\s*#")
+ patt = re.compile("([\w-]+)\s+(.*)")
+ for line in f:
+ if comment.match(line):
+ continue
+ m = patt.match(line)
+ CONTRIBUTORS[m.group(1)] = m.group(2)
+
+BZ_MATCH = re.compile("(.*https?://bugzilla.redhat.com/.*)")
+TRACKER_MATCH = re.compile("(.*https?://tracker.ceph.com/.*)")
+
+def get(session, url, params=None, paging=True):
+ if params is None:
+ params = {}
+ params['per_page'] = 100
+
+ log.debug(f"Fetching {url}")
+ response = session.get(url, auth=(USER, PASSWORD), params=params)
+ log.debug(f"Response = {response}; links = {response.headers.get('link', '')}")
+ if response.status_code != 200:
+ log.error(f"Failed to fetch {url}: {response}")
+ sys.exit(1)
+ j = response.json()
+ yield j
+ if paging:
+ link = response.headers.get('link', None)
+ page = 2
+ while link is not None and 'next' in link:
+ log.debug(f"Fetching {url}")
+ new_params = dict(params)
+ new_params.update({'page': page})
+ response = session.get(url, auth=(USER, PASSWORD), params=new_params)
+ log.debug(f"Response = {response}; links = {response.headers.get('link', '')}")
+ if response.status_code != 200:
+ log.error(f"Failed to fetch {url}: {response}")
+ sys.exit(1)
+ yield response.json()
+ link = response.headers.get('link', None)
+ page += 1
+
+def get_credits(session, pr, pr_req):
+ comments = [pr_req]
+
+ log.debug(f"Getting comments for #{pr}")
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/issues/{pr}/comments"
+ for c in get(session, endpoint):
+ comments.extend(c)
+
+ log.debug(f"Getting reviews for #{pr}")
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/pulls/{pr}/reviews"
+ reviews = []
+ for c in get(session, endpoint):
+ comments.extend(c)
+ reviews.extend(c)
+
+ log.debug(f"Getting review comments for #{pr}")
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/pulls/{pr}/comments"
+ for c in get(session, endpoint):
+ comments.extend(c)
+
+ credits = set()
+ for comment in comments:
+ body = comment["body"]
+ if body:
+ url = comment["html_url"]
+ for m in BZ_MATCH.finditer(body):
+ log.info("[ {url} ] BZ cited: {cite}".format(url=url, cite=m.group(1)))
+ for m in TRACKER_MATCH.finditer(body):
+ log.info("[ {url} ] Ceph tracker cited: {cite}".format(url=url, cite=m.group(1)))
+ for indication in INDICATIONS:
+ for cap in indication.findall(comment["body"]):
+ credits.add(cap)
+
+ new_new_contributors = {}
+ for review in reviews:
+ if review["state"] == "APPROVED":
+ user = review["user"]["login"]
+ try:
+ credits.add("Reviewed-by: "+CONTRIBUTORS[user])
+ except KeyError as e:
+ try:
+ credits.add("Reviewed-by: "+NEW_CONTRIBUTORS[user])
+ except KeyError as e:
+ try:
+ name = input("Need name for contributor \"%s\" (use ^D to skip); Reviewed-by: " % user)
+ name = name.strip()
+ if len(name) == 0:
+ continue
+ NEW_CONTRIBUTORS[user] = name
+ new_new_contributors[user] = name
+ credits.add("Reviewed-by: "+name)
+ except EOFError as e:
+ continue
+
+ return "\n".join(credits), new_new_contributors
+
+def build_branch(args):
+ base = args.base
+ branch = datetime.datetime.utcnow().strftime(args.branch).format(user=USER)
+ label = args.label
+ merge_branch_name = args.merge_branch_name
+ if merge_branch_name is False:
+ merge_branch_name = branch
+
+ session = requests.Session()
+
+ if label:
+ # Check the label format
+ if re.search(r'\bwip-(.*?)-testing\b', label) is None:
+ log.error("Unknown Label '{lblname}'. Label Format: wip-<name>-testing".format(lblname=label))
+ sys.exit(1)
+
+ # Check if the Label exist in the repo
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/labels/{label}"
+ get(session, endpoint, paging=False)
+
+ G = git.Repo(args.git)
+
+ # First get the latest base branch and PRs from BASE_REMOTE
+ remote = getattr(G.remotes, BASE_REMOTE)
+ remote.fetch()
+
+ prs = args.prs
+ if args.pr_label is not None:
+ if args.pr_label == '' or args.pr_label.isspace():
+ log.error("--pr-label must have a non-space value")
+ sys.exit(1)
+ payload = {'labels': args.pr_label, 'sort': 'created', 'direction': 'desc'}
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/issues"
+ labeled_prs = []
+ for l in get(session, endpoint, params=payload):
+ labeled_prs.extend(l)
+ if len(labeled_prs) == 0:
+ log.error("Search for PRs matching label '{}' returned no results!".format(args.pr_label))
+ sys.exit(1)
+ for pr in labeled_prs:
+ if pr['pull_request']:
+ n = pr['number']
+ log.info("Adding labeled PR #{} to PR list".format(n))
+ prs.append(n)
+ log.info("Will merge PRs: {}".format(prs))
+
+ if base == 'HEAD':
+ log.info("Branch base is HEAD; not checking out!")
+ else:
+ log.info("Detaching HEAD onto base: {}".format(base))
+ try:
+ base_path = args.base_path + base
+ base = next(ref for ref in G.refs if ref.path == base_path)
+ except StopIteration:
+ log.error("Branch " + base + " does not exist!")
+ sys.exit(1)
+
+ # So we know that we're not on an old test branch, detach HEAD onto ref:
+ base.checkout()
+
+ for pr in prs:
+ pr = int(pr)
+ log.info("Merging PR #{pr}".format(pr=pr))
+
+ remote_ref = "refs/pull/{pr}/head".format(pr=pr)
+ fi = remote.fetch(remote_ref)
+ if len(fi) != 1:
+ log.error("PR {pr} does not exist?".format(pr=pr))
+ sys.exit(1)
+ tip = fi[0].ref.commit
+
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/pulls/{pr}"
+ response = next(get(session, endpoint, paging=False))
+
+ message = "Merge PR #%d into %s\n\n* %s:\n" % (pr, merge_branch_name, remote_ref)
+
+ for commit in G.iter_commits(rev="HEAD.."+str(tip)):
+ message = message + ("\t%s\n" % commit.message.split('\n', 1)[0])
+ # Get tracker issues / bzs cited so the PTL can do updates
+ short = commit.hexsha[:8]
+ for m in BZ_MATCH.finditer(commit.message):
+ log.info("[ {sha1} ] BZ cited: {cite}".format(sha1=short, cite=m.group(1)))
+ for m in TRACKER_MATCH.finditer(commit.message):
+ log.info("[ {sha1} ] Ceph tracker cited: {cite}".format(sha1=short, cite=m.group(1)))
+
+ message = message + "\n"
+ if args.credits:
+ (addendum, new_contributors) = get_credits(session, pr, response)
+ message += addendum
+ else:
+ new_contributors = []
+
+ G.git.merge(tip.hexsha, '--no-ff', m=message)
+
+ if new_contributors:
+ # Check out the PR, add a commit adding to .githubmap
+ log.info("adding new contributors to githubmap in merge commit")
+ with open(git_dir + "/.githubmap", "a") as f:
+ for c in new_contributors:
+ f.write("%s %s\n" % (c, new_contributors[c]))
+ G.index.add([".githubmap"])
+ G.git.commit("--amend", "--no-edit")
+
+ if label:
+ req = session.post("https://api.github.com/repos/{project}/{repo}/issues/{pr}/labels".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), data=json.dumps([label]), auth=(USER, PASSWORD))
+ if req.status_code != 200:
+ log.error("PR #%d could not be labeled %s: %s" % (pr, label, req))
+ sys.exit(1)
+ log.info("Labeled PR #{pr} {label}".format(pr=pr, label=label))
+
+ # If the branch is 'HEAD', leave HEAD detached (but use "main" for commit message)
+ if branch == 'HEAD':
+ log.info("Leaving HEAD detached; no branch anchors your commits")
+ else:
+ created_branch = False
+ try:
+ G.head.reference = G.create_head(branch)
+ log.info("Checked out new branch {branch}".format(branch=branch))
+ created_branch = True
+ except:
+ G.head.reference = G.create_head(branch, force=True)
+ log.info("Checked out branch {branch}".format(branch=branch))
+
+ if created_branch:
+ # tag it for future reference.
+ tag = "testing/%s" % branch
+ git.refs.tag.Tag.create(G, tag)
+ log.info("Created tag %s" % tag)
+
+def main():
+ parser = argparse.ArgumentParser(description="Ceph PTL tool")
+ default_base = 'main'
+ default_branch = TEST_BRANCH
+ default_label = ''
+ if len(sys.argv) > 1 and sys.argv[1] in SPECIAL_BRANCHES:
+ argv = sys.argv[2:]
+ default_branch = 'HEAD' # Leave HEAD detached
+ default_base = default_branch
+ default_label = False
+ else:
+ argv = sys.argv[1:]
+ parser.add_argument('--branch', dest='branch', action='store', default=default_branch, help='branch to create ("HEAD" leaves HEAD detached; i.e. no branch is made)')
+ parser.add_argument('--merge-branch-name', dest='merge_branch_name', action='store', default=False, help='name of the branch for merge messages')
+ parser.add_argument('--base', dest='base', action='store', default=default_base, help='base for branch')
+ parser.add_argument('--base-path', dest='base_path', action='store', default=BASE_PATH, help='base for branch')
+ parser.add_argument('--git-dir', dest='git', action='store', default=git_dir, help='git directory')
+ parser.add_argument('--label', dest='label', action='store', default=default_label, help='label PRs for testing')
+ parser.add_argument('--pr-label', dest='pr_label', action='store', help='label PRs for testing')
+ parser.add_argument('--no-credits', dest='credits', action='store_false', help='skip indication search (Reviewed-by, etc.)')
+ parser.add_argument('prs', metavar="PR", type=int, nargs='*', help='Pull Requests to merge')
+ args = parser.parse_args(argv)
+ return build_branch(args)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/script/requirements.backport-create-issue.txt b/src/script/requirements.backport-create-issue.txt
new file mode 100644
index 000000000..832772fd9
--- /dev/null
+++ b/src/script/requirements.backport-create-issue.txt
@@ -0,0 +1 @@
+python-redmine == 2.3.0
diff --git a/src/script/run-cbt.sh b/src/script/run-cbt.sh
new file mode 100755
index 000000000..ad7e0ce2e
--- /dev/null
+++ b/src/script/run-cbt.sh
@@ -0,0 +1,148 @@
+#!/bin/sh
+
+usage() {
+ prog_name=$1
+ shift
+ cat <<EOF
+usage:
+ $prog_name [options] <config-file>...
+
+options:
+ -a,--archive-dir directory in which the test result is stored, default to $PWD/cbt-archive
+ --build-dir directory where CMakeCache.txt is located, default to $PWD
+ --cbt directory of cbt if you have already a copy of it. ceph/cbt:master will be cloned from github if not specified
+ -h,--help print this help message
+ --source-dir the path to the top level of Ceph source tree, default to $PWD/..
+ --use-existing do not setup/teardown a vstart cluster for testing
+
+example:
+ $prog_name --cbt ~/dev/cbt -a /tmp ../src/test/crimson/cbt/radosbench_4K_read.yaml
+EOF
+}
+
+prog_name=$(basename $0)
+archive_dir=$PWD/cbt-archive
+build_dir=$PWD
+source_dir=$(dirname $PWD)
+use_existing=false
+classical=false
+opts=$(getopt --options "a:h" --longoptions "archive-dir:,build-dir:,source-dir:,cbt:,help,use-existing,classical" --name $prog_name -- "$@")
+eval set -- "$opts"
+
+while true; do
+ case "$1" in
+ -a|--archive-dir)
+ archive_dir=$2
+ shift 2
+ ;;
+ --build-dir)
+ build_dir=$2
+ shift 2
+ ;;
+ --source-dir)
+ source_dir=$2
+ shift 2
+ ;;
+ --cbt)
+ cbt_dir=$2
+ shift 2
+ ;;
+ --use-existing)
+ use_existing=true
+ shift
+ ;;
+ --classical)
+ classical=true
+ shift
+ ;;
+ -h|--help)
+ usage $prog_name
+ return 0
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ echo "unexpected argument $1" 1>&2
+ return 1
+ ;;
+ esac
+done
+
+if test $# -gt 0; then
+ config_files="$@"
+else
+ echo "$prog_name: please specify one or more .yaml files" 1>&2
+ usage $prog_name
+ return 1
+fi
+
+if test -z "$cbt_dir"; then
+ cbt_dir=$PWD/cbt
+ git clone --depth 1 -b master https://github.com/ceph/cbt.git $cbt_dir
+fi
+
+# store absolute path before changing cwd
+source_dir=$(readlink -f $source_dir)
+if ! $use_existing; then
+ cd $build_dir || exit
+ # seastar uses 128*8 aio in reactor for io and 10003 aio for events pooling
+ # for each core, if it fails to enough aio context, the seastar application
+ # bails out. and take other process into consideration, let's make it
+ # 32768 per core
+ max_io=$(expr 32768 \* "$(nproc)")
+ if test "$(/sbin/sysctl --values fs.aio-max-nr)" -lt $max_io; then
+ sudo /sbin/sysctl -q -w fs.aio-max-nr=$max_io
+ fi
+ if $classical; then
+ MDS=0 MGR=1 OSD=3 MON=1 $source_dir/src/vstart.sh -n -X \
+ --without-dashboard
+ else
+ MDS=0 MGR=1 OSD=3 MON=1 $source_dir/src/vstart.sh -n -X \
+ --without-dashboard --cyanstore \
+ -o "memstore_device_bytes=34359738368" \
+ --crimson --nodaemon --redirect-output \
+ --osd-args "--memory 4G"
+ fi
+ cd - || exit
+fi
+
+# i need to read the performance events,
+# see https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
+if /sbin/capsh --supports=cap_sys_admin; then
+ perf_event_paranoid=$(/sbin/sysctl --values kernel.perf_event_paranoid)
+ if test $perf_event_paranoid -gt 0; then
+ sudo /sbin/sysctl -q -w kernel.perf_event_paranoid=0
+ fi
+else
+ echo "without cap_sys_admin, $(whoami) cannot read the perf events"
+fi
+
+for config_file in $config_files; do
+ echo "testing $config_file"
+ cbt_config=$(mktemp $config_file.XXXX.yaml)
+ python3 $source_dir/src/test/crimson/cbt/t2c.py \
+ --build-dir $build_dir \
+ --input $config_file \
+ --output $cbt_config
+ python3 $cbt_dir/cbt.py \
+ --archive $archive_dir \
+ --conf $build_dir/ceph.conf \
+ $cbt_config
+ rm -f $cbt_config
+done
+
+if test -n "$perf_event_paranoid"; then
+ # restore the setting
+ sudo /sbin/sysctl -q -w kernel.perf_event_paranoid=$perf_event_paranoid
+fi
+
+if ! $use_existing; then
+ cd $build_dir || exit
+ if $classical; then
+ $source_dir/src/stop.sh
+ else
+ $source_dir/src/stop.sh --crimson
+ fi
+fi
diff --git a/src/script/run-coverity b/src/script/run-coverity
new file mode 100755
index 000000000..c4254ba35
--- /dev/null
+++ b/src/script/run-coverity
@@ -0,0 +1,33 @@
+#!/bin/sh -ex
+
+export COVDIR="$HOME/cov-analysis"
+if [ ! -d "$COVDIR" ]; then
+ echo "missing $COVDIR; get that from coverity!"
+ exit 1
+fi
+if [ ! -e "$HOME/coverity.build.pass.txt" ]; then
+ echo "missing $HOME/coverity.build.pass.txt"
+ exit 1
+fi
+
+export PATH="$COVDIR/bin:$PATH"
+
+rm -rf build
+./do_cmake.sh
+cd build
+~/cov-analysis/bin/cov-build --dir cov-int make -j$(nproc)
+
+echo Sage Weil sage@newdream.net ceph >> README
+tar czvf project.tgz README cov-int
+rm -f README
+
+version=`git describe`
+token=`cat ~/coverity.build.pass.txt`
+curl --form token=$token \
+ --form email=sage@newdream.net \
+ --form file=@project.tgz \
+ --form version="$version" \
+ --form description="Automated Ceph build from `hostname`" \
+ https://scan.coverity.com/builds?project=ceph
+
+echo done.
diff --git a/src/script/run-make.sh b/src/script/run-make.sh
new file mode 100755
index 000000000..683272666
--- /dev/null
+++ b/src/script/run-make.sh
@@ -0,0 +1,204 @@
+#!/usr/bin/env bash
+
+set -e
+
+trap clean_up_after_myself EXIT
+
+ORIGINAL_CCACHE_CONF="$HOME/.ccache/ccache.conf"
+SAVED_CCACHE_CONF="$HOME/.run-make-check-saved-ccache-conf"
+
+function in_jenkins() {
+ test -n "$JENKINS_HOME"
+}
+
+function save_ccache_conf() {
+ test -f $ORIGINAL_CCACHE_CONF && cp $ORIGINAL_CCACHE_CONF $SAVED_CCACHE_CONF || true
+}
+
+function restore_ccache_conf() {
+ test -f $SAVED_CCACHE_CONF && mv $SAVED_CCACHE_CONF $ORIGINAL_CCACHE_CONF || true
+}
+
+function clean_up_after_myself() {
+ rm -fr ${CEPH_BUILD_VIRTUALENV:-/tmp}/*virtualenv*
+ restore_ccache_conf
+}
+
+function get_processors() {
+ # get_processors() depends on coreutils nproc.
+ if test -n "$NPROC" ; then
+ echo $NPROC
+ else
+ if test $(nproc) -ge 2 ; then
+ expr $(nproc) / 2
+ else
+ echo 1
+ fi
+ fi
+}
+
+function detect_ceph_dev_pkgs() {
+ local cmake_opts="-DWITH_FMT_VERSION=9.0.0"
+ local boost_root=/opt/ceph
+ if test -f $boost_root/include/boost/config.hpp; then
+ cmake_opts+=" -DWITH_SYSTEM_BOOST=ON -DBOOST_ROOT=$boost_root"
+ else
+ cmake_opts+=" -DBOOST_J=$(get_processors)"
+ fi
+
+ source /etc/os-release
+ if [[ "$ID" == "ubuntu" ]]; then
+ case "$VERSION" in
+ *Xenial*)
+ cmake_opts+=" -DWITH_RADOSGW_KAFKA_ENDPOINT=OFF";;
+ *Focal*)
+ cmake_opts+=" -DWITH_SYSTEM_ZSTD=ON";;
+ esac
+ fi
+ echo "$cmake_opts"
+}
+
+function do_install() {
+ local install_cmd
+ local pkgs
+ local ret
+ install_cmd=$1
+ shift
+ pkgs=$@
+ shift
+ ret=0
+ $DRY_RUN sudo $install_cmd $pkgs || ret=$?
+ if test $ret -eq 0 ; then
+ return
+ fi
+ # try harder if apt-get, and it was interrutped
+ if [[ $install_cmd == *"apt-get"* ]]; then
+ if test $ret -eq 100 ; then
+ # dpkg was interrupted
+ $DRY_RUN sudo dpkg --configure -a
+ in_jenkins && echo "CI_DEBUG: Running 'sudo $install_cmd $pkgs'"
+ $DRY_RUN sudo $install_cmd $pkgs
+ else
+ return $ret
+ fi
+ fi
+}
+function prepare() {
+ local install_cmd
+ local which_pkg="which"
+ source /etc/os-release
+ if test -f /etc/redhat-release ; then
+ if ! type bc > /dev/null 2>&1 ; then
+ echo "Please install bc and re-run."
+ exit 1
+ fi
+ if test "$(echo "$VERSION_ID >= 22" | bc)" -ne 0; then
+ install_cmd="dnf -y install"
+ else
+ install_cmd="yum install -y"
+ fi
+ elif type zypper > /dev/null 2>&1 ; then
+ install_cmd="zypper --gpg-auto-import-keys --non-interactive install --no-recommends"
+ elif type apt-get > /dev/null 2>&1 ; then
+ install_cmd="apt-get install -y"
+ which_pkg="debianutils"
+ fi
+
+ if ! type sudo > /dev/null 2>&1 ; then
+ echo "Please install sudo and re-run. This script assumes it is running"
+ echo "as a normal user with the ability to run commands as root via sudo."
+ exit 1
+ fi
+ if [ -n "$install_cmd" ]; then
+ in_jenkins && echo "CI_DEBUG: Running '$install_cmd ccache $which_pkg clang'"
+ do_install "$install_cmd" ccache $which_pkg clang
+ else
+ echo "WARNING: Don't know how to install packages" >&2
+ echo "This probably means distribution $ID is not supported by run-make-check.sh" >&2
+ fi
+
+ if ! type ccache > /dev/null 2>&1 ; then
+ echo "ERROR: ccache could not be installed"
+ exit 1
+ fi
+
+ if test -f ./install-deps.sh ; then
+ in_jenkins && echo "CI_DEBUG: Running install-deps.sh"
+ $DRY_RUN source ./install-deps.sh || return 1
+ trap clean_up_after_myself EXIT
+ fi
+
+ cat <<EOM
+Note that the binaries produced by this script do not contain correct time
+and git version information, which may make them unsuitable for debugging
+and production use.
+EOM
+ save_ccache_conf
+ # remove the entropy generated by the date/time embedded in the build
+ $DRY_RUN export SOURCE_DATE_EPOCH="946684800"
+ $DRY_RUN ccache -o sloppiness=time_macros
+ $DRY_RUN ccache -o run_second_cpp=true
+ if in_jenkins; then
+ # Build host has plenty of space available, let's use it to keep
+ # various versions of the built objects. This could increase the cache hit
+ # if the same or similar PRs are running several times
+ $DRY_RUN ccache -o max_size=100G
+ else
+ echo "Current ccache max_size setting:"
+ ccache -p | grep max_size
+ fi
+ $DRY_RUN ccache -sz # Reset the ccache statistics and show the current configuration
+}
+
+function configure() {
+ local cmake_build_opts=$(detect_ceph_dev_pkgs)
+ in_jenkins && echo "CI_DEBUG: Running do_cmake.sh"
+ $DRY_RUN ./do_cmake.sh $cmake_build_opts $@ || return 1
+}
+
+function build() {
+ local targets="$@"
+ if test -n "$targets"; then
+ targets="--target $targets"
+ fi
+ $DRY_RUN cd build
+ BUILD_MAKEOPTS=${BUILD_MAKEOPTS:-$DEFAULT_MAKEOPTS}
+ test "$BUILD_MAKEOPTS" && echo "make will run with option(s) $BUILD_MAKEOPTS"
+ # older cmake does not support --parallel or -j, so pass it to underlying generator
+ in_jenkins && echo "CI_DEBUG: Running cmake"
+ $DRY_RUN cmake --build . $targets -- $BUILD_MAKEOPTS || return 1
+ $DRY_RUN ccache -s # print the ccache statistics to evaluate the efficiency
+}
+
+DEFAULT_MAKEOPTS=${DEFAULT_MAKEOPTS:--j$(get_processors)}
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ # not sourced
+ if [ `uname` = FreeBSD ]; then
+ GETOPT=/usr/local/bin/getopt
+ else
+ GETOPT=getopt
+ fi
+
+ options=$(${GETOPT} --name "$0" --options "" --longoptions "cmake-args:" -- "$@")
+ if [ $? -ne 0 ]; then
+ exit 2
+ fi
+ eval set -- "${options}"
+ while true; do
+ case "$1" in
+ --cmake-args)
+ cmake_args=$2
+ shift 2;;
+ --)
+ shift
+ break;;
+ *)
+ echo "bad option $1" >& 2
+ exit 2;;
+ esac
+ done
+ prepare
+ configure "$cmake_args"
+ build "$@"
+fi
diff --git a/src/script/run_mypy.sh b/src/script/run_mypy.sh
new file mode 100755
index 000000000..318a2c622
--- /dev/null
+++ b/src/script/run_mypy.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+
+# needs to be executed from the src directory.
+# generates a report at src/mypy_report.txt
+
+set -e
+
+python3 -m venv .mypy_venv
+
+. .mypy_venv/bin/activate
+
+! pip install $(find -name requirements.txt -not -path './frontend/*' -printf '-r%p ')
+pip install mypy
+
+MYPY_INI="$PWD"/mypy.ini
+
+export MYPYPATH="$PWD/pybind/rados:$PWD/pybind/rbd:$PWD/pybind/cephfs"
+
+echo -n > mypy_report.txt
+pushd pybind
+mypy --config-file="$MYPY_INI" *.py | awk '{print "pybind/" $0}' >> ../mypy_report.txt
+popd
+
+pushd pybind/mgr
+mypy --config-file="$MYPY_INI" $(find * -name '*.py' | grep -v -e venv -e tox -e env -e gyp -e node_modules) | awk '{print "pybind/mgr/" $0}' >> ../../mypy_report.txt
+popd
+
+pushd ceph-volume/ceph_volume
+mypy --config-file="$MYPY_INI" $(find * -name '*.py' | grep -v -e venv -e tox -e env -e gyp -e node_modules -e tests) | awk '{print "ceph-volume/ceph_volume/" $0}' >> ../../mypy_report.txt
+popd
+
+SORT_MYPY=$(cat <<-EOF
+#!/bin/python3
+import re
+from collections import namedtuple
+
+class Line(namedtuple('Line', 'prefix no rest')):
+ @classmethod
+ def parse(cls, l):
+ if not l:
+ return cls('', 0, '')
+ if re.search('Found [0-9]+ errors in [0-9]+ files', l):
+ return cls('', 0, '')
+ p, *rest = l.split(':', 2)
+ if len(rest) == 1:
+ return cls(p, 0, rest[0])
+ elif len(rest) == 2:
+ try:
+ return cls(p, int(rest[0]), rest[1])
+ except ValueError:
+ return cls(p, 0, rest[0] + ':' + rest[1])
+ assert False, rest
+
+class Group(object):
+ def __init__(self, line):
+ self.line = line
+ self.lines = []
+
+ def matches(self, other):
+ return Line.parse(self.line).prefix == Line.parse(other).prefix
+
+ def __bool__(self):
+ return bool(self.lines) or ': note: In' not in self.line
+
+ def __str__(self):
+ return '\n'.join([self.line] + self.lines)
+
+ def key(self):
+ l1 = Line.parse(self.line)
+ if l1.no:
+ return l1.prefix, int(l1.no)
+ if not self.lines:
+ return l1.prefix, None
+ return l1.prefix, Line.parse(self.lines[0]).no
+
+def parse(text):
+ groups = []
+
+ def group():
+ try:
+ return groups[-1]
+ except IndexError:
+ groups.append(Group(''))
+ return groups[-1]
+
+ for l in text:
+ l = l.strip()
+ if ': note: In' in l or not group().matches(l):
+ groups.append(Group(l))
+ elif not l:
+ pass
+ else:
+ group().lines.append(l)
+
+ return (g for g in groups if g)
+
+def render(groups):
+ groups = sorted(groups, key=Group.key)
+ return '\n'.join(map(str, groups))
+
+with open('mypy_report.txt') as f:
+ new = render(parse(f))
+with open('mypy_report.txt', 'w') as f:
+ f.write(new)
+EOF
+)
+
+python <(echo "$SORT_MYPY")
diff --git a/src/script/run_tox.sh b/src/script/run_tox.sh
new file mode 100755
index 000000000..9d45d8b92
--- /dev/null
+++ b/src/script/run_tox.sh
@@ -0,0 +1,131 @@
+#!/usr/bin/env bash
+
+set -e
+
+if [ `uname` = FreeBSD ]; then
+ GETOPT=/usr/local/bin/getopt
+else
+ GETOPT=getopt
+fi
+
+function usage() {
+ local prog_name=$(basename $1)
+ shift
+ cat <<EOF
+$prog_name [options] ... [test_name]
+
+options:
+
+ [-h|--help] display this help message
+ [--source-dir dir] root source directory of Ceph. deduced by the path of this script by default.
+ [--build-dir dir] build directory of Ceph. "\$source_dir/build" by default.
+ [--tox-path dir] directory in which "tox.ini" is located. if "test_name" is not specified, it is the current directory by default, otherwise the script will try to find a directory with the name of specified \$test_name with a "tox.ini" under it.
+ <--tox-envs envs> tox envlist. this option is required.
+ [--venv-path] the python virtualenv path. \$build_dir/\$test_name by default.
+
+example:
+
+following command will run tox with envlist of "py3,mypy" using the "tox.ini" in current directory.
+
+ $prog_name --tox-envs py3,mypy
+
+following command will run tox with envlist of "py3" using "/ceph/src/python-common/tox.ini"
+
+ $prog_name --tox-envs py3 --tox-path /ceph/src/python-common
+EOF
+}
+
+function get_cmake_variable() {
+ local cmake_cache=$1/CMakeCache.txt
+ shift
+ local variable=$1
+ shift
+ if [ -e $cmake_cache ]; then
+ grep "$variable" $cmake_cache | cut -d "=" -f 2
+ fi
+}
+
+function get_tox_path() {
+ local test_name=$1
+ if [ -n "$test_name" ]; then
+ local found=$(find $source_dir -path "*/$test_name/tox.ini")
+ echo $(dirname $found)
+ elif [ -e tox.ini ]; then
+ echo $(pwd)
+ fi
+}
+
+function main() {
+ local tox_path
+ local script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+ local build_dir=$script_dir/../../build
+ local source_dir=$(get_cmake_variable $build_dir ceph_SOURCE_DIR)
+ local tox_envs
+ local options
+
+ options=$(${GETOPT} --name "$0" --options 'h' --longoptions "help,source-dir:,build-dir:,tox-path:,tox-envs:,venv-path:" -- "$@")
+ if [ $? -ne 0 ]; then
+ exit 2
+ fi
+ eval set -- "${options}"
+ while true; do
+ case "$1" in
+ -h|--help)
+ usage $0
+ exit 0;;
+ --source-dir)
+ source_dir=$2
+ shift 2;;
+ --build-dir)
+ build_dir=$2
+ shift 2;;
+ --tox-path)
+ tox_path=$2
+ shift 2;;
+ --tox-envs)
+ tox_envs=$2
+ shift 2;;
+ --venv-path)
+ venv_path=$2
+ shift 2;;
+ --)
+ shift
+ break;;
+ *)
+ echo "bad option $1" >& 2
+ exit 2;;
+ esac
+ done
+
+ local test_name
+ if [ -z "$tox_path" ]; then
+ # try harder
+ if [ $# -gt 0 ]; then
+ test_name=$1
+ shift
+ fi
+ tox_path=$(get_tox_path $test_name)
+ venv_path="$build_dir/$test_name"
+ else
+ test_name=$(basename $tox_path)
+ fi
+
+ if [ ! -f ${venv_path}/bin/activate ]; then
+ if [ -d "$venv_path" ]; then
+ cd $venv_path
+ echo "$PWD already exists, but it's not a virtualenv. test_name empty?"
+ exit 1
+ fi
+ $source_dir/src/tools/setup-virtualenv.sh ${venv_path}
+ fi
+ source ${venv_path}/bin/activate
+ pip install tox
+
+ # tox.ini will take care of this.
+ export CEPH_BUILD_DIR=$build_dir
+ # use the wheelhouse prepared by install-deps.sh
+ export PIP_FIND_LINKS="$tox_path/wheelhouse"
+ tox -c $tox_path/tox.ini -e "$tox_envs" "$@"
+}
+
+main "$@"
diff --git a/src/script/run_uml.sh b/src/script/run_uml.sh
new file mode 100755
index 000000000..9bff38b22
--- /dev/null
+++ b/src/script/run_uml.sh
@@ -0,0 +1,212 @@
+#!/bin/bash -norc
+
+# Magic startup script for a UML instance. As long as unique
+# instances are started, more than one of them can be concurrently
+# in use on a single system. All their network interfaces are
+# bridged together onto the virtual bridge "virbr0" which is
+# supplied by the "libvirt" package.
+#
+# Note that a DHCP server is started for that interface. It's
+# configured in this file:
+# /etc/libvirt/qemu/networks/default.xml
+# Unfortunately what I see there serves all possible DHCP addresses,
+# so stealing them like we do here isn't really kosher. To fix
+# it, that configuration should change to serve a smaller subset
+# of the available address range.
+#
+# Each instance uses its own tun/tap device, created using the
+# "tunctl" command. The assigned tap device will correspond with
+# the guest id (a small integer representing the instance), i.e.,
+# guest id 1 uses tap1, etc. The tap device is attached to the
+# virtual bridge, which will have its own subnet associated with it.
+# The guest side of that interface will have the same subnet as the
+# bridge interface, with the bottom bits representing (normally) 100
+# more than the guest id. So for subnet 192.168.122.0/24, guest
+# id 1 will use ip 192.168.122.101, guest id 2 will use ip
+# 192.168.122.102, and so on. Because these interfaces are bridged,
+# they can all communicate with each other.
+
+# You will want to override this by setting and exporting the
+# "CEPH_TOP" environment variable to be the directory that contains
+# the "ceph-client" source tree.
+CEPH_TOP="${CEPH_TOP:-/home/elder/ceph}"
+
+# You may want to change this too, if you want guest UML instances
+# to have a diffeerent IP address range. The guest IP will be based
+# on this plus GUEST_ID (defined below).
+GUEST_IP_OFFSET="${GUEST_IP_OFFSET:-100}"
+
+#############################
+
+if [ $# -gt 1 ]; then
+ echo "" >&2
+ echo "Usage: $(basename $0) [guest_id]" >&2
+ echo "" >&2
+ echo " guest_id is a small integer (default 1)" >&2
+ echo " (each UML instance needs a distinct guest_id)" >&2
+ echo "" >&2
+ exit 1
+elif [ $# -eq 1 ]; then
+ GUEST_ID="$1"
+else
+ GUEST_ID=1
+fi
+
+# This will be what the guest host calls itself.
+GUEST_HOSTNAME="uml-${GUEST_ID}"
+
+# This is the path to the boot disk image used by UML.
+DISK_IMAGE_A="${CEPH_TOP}/ceph-client/uml.${GUEST_ID}"
+if [ ! -f "${DISK_IMAGE_A}" ]; then
+ echo "root disk image not found (or not a file)" >&2
+ exit 2
+fi
+
+# Hostid 1 uses tun/tap device tap1, hostid 2 uses tap2, etc.
+TAP_ID="${GUEST_ID}"
+# This is the tap device used for this UML instance
+TAP="tap${TAP_ID}"
+
+# This is just used to mount an image temporarily
+TMP_MNT="/tmp/m$$"
+
+# Where to put a config file generated for this tap device
+TAP_IFUPDOWN_CONFIG="/tmp/interface-${TAP}"
+
+# Compute the HOST_IP and BROADCAST address values to use,
+# and assign shell variables with those names to their values.
+# Also compute BITS, which is the network prefix length used.
+# The NETMASK is then computed using that BITS value.
+eval $(
+ip addr show virbr0 | awk '
+/inet/ {
+ split($2, a, "/")
+ printf("HOST_IP=%s\n", a[1]);
+ printf("BROADCAST=%s\n", $4);
+ printf("BITS=%s\n", a[2]);
+ exit(0);
+}')
+
+# Use bc to avoid 32-bit wrap when computing netmask
+eval $(
+echo -n "NETMASK="
+bc <<! | fmt | sed 's/ /./g'
+m = 2 ^ 32 - 2 ^ (32 - ${BITS})
+for (p = 24; p >= 0; p = p - 8)
+ m / (2 ^ p) % 256
+!
+)
+
+# Now use the netmask and the host IP to compute the subnet address
+# and from that the guest IP address to use.
+eval $(
+awk '
+function from_quad(addr, a, val, i) {
+ if (split(addr, a, ".") != 4)
+ exit(1); # address not in dotted quad format
+ val = 0;
+ for (i = 1; i <= 4; i++)
+ val = val * 256 + a[i];
+ return val;
+}
+function to_quad(val, addr, i) {
+ addr = "";
+ for (i = 1; i <= 4; i++) {
+ addr = sprintf("%u%s%s", val % 256, i > 1 ? "." : "", addr);
+ val = int(val / 256);
+ }
+ if ((val + 0) != 0)
+ exit(1); # value provided exceeded 32 bits
+ return addr;
+}
+BEGIN {
+ host_ip = from_quad("'${HOST_IP}'");
+ netmask = from_quad("'${NETMASK}'");
+ guest_net_ip = '${GUEST_IP_OFFSET}' + '${GUEST_ID}';
+ if (and(netmask, guest_net_ip))
+ exit(1); # address too big for subnet
+ subnet = and(host_ip, netmask);
+ guest_ip = or(subnet, guest_net_ip);
+ if (guest_ip == host_ip)
+ exit(1); # computed guest ip matches host ip
+
+ printf("SUBNET=%s\n", to_quad(subnet));
+ printf("GUEST_IP=%s\n", to_quad(guest_ip));
+}
+' < /dev/null
+)
+
+############## OK, we now know all our network parameters...
+
+# There is a series of things that need to be done as superuser,
+# so group them all into one big (and sort of nested!) sudo request.
+sudo -s <<EnD_Of_sUdO
+# Mount the boot disk for the UML and set up some configuration
+# files there.
+mkdir -p "${TMP_MNT}"
+mount -o loop "${DISK_IMAGE_A}" "${TMP_MNT}"
+
+# Arrange for loopback and eth0 to load automatically,
+# and for eth0 to have our desired network parameters.
+cat > "${TMP_MNT}/etc/network/interfaces" <<!
+# Used by ifup(8) and ifdown(8). See the interfaces(5) manpage or
+# /usr/share/doc/ifupdown/examples for more information.
+auto lo
+iface lo inet loopback
+auto eth0
+# iface eth0 inet dhcp
+iface eth0 inet static
+ address ${GUEST_IP}
+ netmask ${NETMASK}
+ broadcast ${BROADCAST}
+ gateway ${HOST_IP}
+!
+
+# Have the guest start with an appropriate host name.
+# Also record an entry for it in its own hosts file.
+echo "${GUEST_HOSTNAME}" > "${TMP_MNT}/etc/hostname"
+echo "${GUEST_IP} ${GUEST_HOSTNAME}" >> "${TMP_MNT}/etc/hosts"
+
+# The host will serve as the name server also
+cat > "${TMP_MNT}/etc/resolv.conf" <<!
+nameserver ${HOST_IP}
+!
+
+# OK, done tweaking the boot image.
+sync
+umount "${DISK_IMAGE_A}"
+rmdir "${TMP_MNT}"
+
+# Set up a config file for "ifup" and "ifdown" (on the host) to use.
+# All the backslashes below are needed because we're sitting inside
+# a double here-document...
+cat > "${TAP_IFUPDOWN_CONFIG}" <<!
+iface ${TAP} inet manual
+ up brctl addif virbr0 "\\\${IFACE}"
+ up ip link set dev "\\\${IFACE}" up
+ pre-down brctl delif virbr0 "\\\${IFACE}"
+ pre-down ip link del dev "\\\${IFACE}"
+ tunctl_user $(whoami)
+!
+
+# OK, bring up the tap device using our config file
+ifup -i "${TAP_IFUPDOWN_CONFIG}" "${TAP}"
+
+EnD_Of_sUdO
+
+# Finally ready to launch the UML instance.
+./linux \
+ umid="${GUEST_HOSTNAME}" \
+ ubda="${DISK_IMAGE_A}" \
+ eth0="tuntap,${TAP}" \
+ mem=1024M
+
+# When we're done, clean up. Bring down the tap interface and
+# delete the config file.
+#
+# Note that if the above "./linux" crashes, you'll need to run the
+# following commands manually in order to clean up state.
+sudo ifdown -i "${TAP_IFUPDOWN_CONFIG}" "${TAP}"
+sudo rm -f "${TAP_IFUPDOWN_CONFIG}"
+
+exit 0
diff --git a/src/script/set_up_stretch_mode.sh b/src/script/set_up_stretch_mode.sh
new file mode 100755
index 000000000..79b214915
--- /dev/null
+++ b/src/script/set_up_stretch_mode.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+set -x
+
+./bin/ceph config set osd osd_crush_update_on_start false
+
+./bin/ceph osd crush move osd.0 host=host1-1 datacenter=site1 root=default
+./bin/ceph osd crush move osd.1 host=host1-2 datacenter=site1 root=default
+./bin/ceph osd crush move osd.2 host=host2-1 datacenter=site2 root=default
+./bin/ceph osd crush move osd.3 host=host2-2 datacenter=site2 root=default
+
+./bin/ceph osd getcrushmap > crush.map.bin
+./bin/crushtool -d crush.map.bin -o crush.map.txt
+cat <<EOF >> crush.map.txt
+rule stretch_rule {
+ id 1
+ type replicated
+ step take site1
+ step chooseleaf firstn 2 type host
+ step emit
+ step take site2
+ step chooseleaf firstn 2 type host
+ step emit
+}
+rule stretch_rule2 {
+ id 2
+ type replicated
+ step take site1
+ step chooseleaf firstn 2 type host
+ step emit
+ step take site2
+ step chooseleaf firstn 2 type host
+ step emit
+}
+rule stretch_rule3 {
+ id 3
+ type replicated
+ step take site1
+ step chooseleaf firstn 2 type host
+ step emit
+ step take site2
+ step chooseleaf firstn 2 type host
+ step emit
+}
+EOF
+./bin/crushtool -c crush.map.txt -o crush2.map.bin
+./bin/ceph osd setcrushmap -i crush2.map.bin
+./bin/ceph mon set election_strategy connectivity
+
+./bin/ceph mon set_location a datacenter=site1
+./bin/ceph mon set_location b datacenter=site2
+./bin/ceph mon set_location c datacenter=site3
+./bin/ceph osd pool create test_stretch1 1024 1024 replicated
+./bin/ceph mon enable_stretch_mode c stretch_rule datacenter
diff --git a/src/script/smr_benchmark/linearCopy.sh b/src/script/smr_benchmark/linearCopy.sh
new file mode 100755
index 000000000..416a7e742
--- /dev/null
+++ b/src/script/smr_benchmark/linearCopy.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+# copy a linear file from srcFile to destination disk in a loop until writeSize MBs is written
+# destinationDisk is a SMR Host Aware Disk eg. /dev/sdb
+
+if [ "$#" -lt 3 ]; then
+ echo "Usage ./linearCopy.sh srcFile destinationDisk writeSize(MB)"
+ exit
+fi
+
+if [ "$(id -u)" != "0" ]; then
+ echo "Please run as sudo user"
+ exit
+fi
+
+srcFile=$1
+destDisk=$2
+writeSize=$3
+verbose=true
+
+if [ -f time ]; then
+ rm -rf time
+fi
+
+#chunkSize=4096 # in bytes
+chunkSize=1048576 # in bytes
+fileSize=`stat --printf="%s" $srcFile`
+
+numChunksInFile=`echo "$fileSize * (1048576 / $chunkSize)" | bc`
+chunksLeft=$(( $(($writeSize * 1048576)) / $chunkSize))
+
+
+echo "fileSize = $fileSize"
+
+if [ "$(($fileSize % 512))" -ne 0 ]; then
+ echo "$srcFile not 512 byte aligned"
+ exit
+fi
+
+if [ "$(($chunkSize % 512))" -ne 0 ]; then
+ echo "$chunkSize not 512 byte aligned"
+ exit
+fi
+
+if [ "$fileSize" -lt "$chunkSize" ]; then
+ echo "filesize $fileSize should be greater than chunkSize $chunkSize"
+ exit
+fi
+
+
+numFileChunks=$(($fileSize / $chunkSize))
+if [ $verbose == true ]; then
+ echo "numFileChunks = $numFileChunks"
+fi
+
+smrLBAStart=33554432 # TODO query from SMR Drive
+#smrLBAStart=37224448
+
+offset=$(( $smrLBAStart / $(( $chunkSize / 512)) ))
+
+if [ $verbose == true ]; then
+ echo "chunksLeft = $chunksLeft, offset = $offset"
+fi
+
+chunkNum=0
+
+while [ "$chunksLeft" -gt 0 ]; do
+ chunkNum=$(($chunkNum + 1))
+ if [ $verbose == true ]; then
+ echo "CHUNK $chunkNum `date +%H:%M:%S`" >> time
+ fi
+ dd if=$srcFile of=$destDisk seek=$offset bs=$chunkSize 2> tmp
+ cat tmp | grep MB >> time # > /dev/null 2>&1
+ if [ $verbose == true ]; then
+ echo "chunksLeft = $chunksLeft, offset = $offset"
+ fi
+ chunksLeft=$(($chunksLeft - $numFileChunks))
+ offset=$(($offset + $numFileChunks))
+done
+
+if [ -f tmp ]; then
+ rm tmp
+fi
+
+if [ $verbose == false ]; then
+ rm time
+else
+ echo "Time Stamp for Chunk Writes"
+ cat time
+ rm time
+fi
diff --git a/src/script/smr_benchmark/linearSMRCopy.sh b/src/script/smr_benchmark/linearSMRCopy.sh
new file mode 100755
index 000000000..1ff2695c6
--- /dev/null
+++ b/src/script/smr_benchmark/linearSMRCopy.sh
@@ -0,0 +1,69 @@
+#! /usr/bin/env bash
+
+# copy a linear file from srcFile to destination SMRDisk in a loop until writeSize MBs is written
+# SMRDisk is the SMR Host Aware / Host Managed Disk eg. /dev/sdb
+
+usage(){
+ echo "linearSMRCopy.sh <srcFile> <SMRDisk> <writeSize (MB)>"
+}
+
+if [ "$#" -lt 3 ]; then
+ usage
+ exit
+fi
+
+if [ "$(id -u)" != "0" ]; then
+ echo "Please run as sudo user"
+ exit
+fi
+
+if which zbc_open_zone > /dev/null 2>&1 && which zbc_read_zone > /dev/null 2>&1 && which zbc_write_zone > /dev/null 2>&1 ; then
+ echo "libzbc commands present... refreshing zones"
+ # reset all write pointers before starting to write
+ sudo zbc_reset_write_ptr /dev/sdb -1
+else
+ echo "libzbc commands not detected. Please install libzbc"
+ exit
+fi
+
+srcFile=$1
+SMRDisk=$2
+writeSize=$3
+iosize=10240
+
+numberOfSectors=$(($writeSize * 2048))
+
+smrZoneStart=33554432 # TODO query this from SMR drive
+
+#dd if=$srcFile of=$destDisk seek=$smrZoneStart bs=512
+
+fileSize=`stat --printf="%s" $srcFile`
+
+if [ "$(($fileSize % 512))" -ne 0 ]; then
+ echo "$srcFile not 512 byte aligned"
+ exit
+fi
+
+sectorsLeftToWrite=$(($fileSize / 512))
+
+znum=64 # TODO query this from SMR Drive
+
+zoneLength=524288 # number of sectors in each zone TODO query from SMR drive
+
+writeOffset=$smrZoneStart
+
+sectorsLeftToWrite=$numberOfSectors
+
+echo "write begin sectors Left = $sectorsLeftToWrite, writeOffset = $writeOffset zone Num = $znum"
+
+while [ "$sectorsLeftToWrite" -gt 0 ];
+do
+ sudo zbc_open_zone $SMRDisk $znum
+ sudo time zbc_write_zone -f $srcFile -loop $SMRDisk $znum $iosize
+ sudo zbc_close_zone /dev/sdb $znum
+ writeOffset=$(($writeOffset+$zoneLength))
+ znum=$(($znum+1))
+ sectorsLeftToWrite=$(($sectorsLeftToWrite - $zoneLength))
+done
+
+echo "write end sectors Left = $sectorsLeftToWrite, writeOffset = $writeOffset zone Num = $znum"
diff --git a/src/script/strip_trailing_whitespace.sh b/src/script/strip_trailing_whitespace.sh
new file mode 100755
index 000000000..7fa8060a6
--- /dev/null
+++ b/src/script/strip_trailing_whitespace.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+sed -i 's/[ \t]*$//' $1
+sed -i 's/^ /\t/' $1
diff --git a/src/script/unhexdump-C b/src/script/unhexdump-C
new file mode 100755
index 000000000..b4e755973
--- /dev/null
+++ b/src/script/unhexdump-C
@@ -0,0 +1,18 @@
+#/bin/bash
+
+cat $1 | \
+ sed -E 's/ /: /' | \
+ cut -c 1-59 | \
+ sed -E 's/ (..) (..)/ \1\2/g' | \
+ sed 's/ / /g' | \
+ grep ': ' | \
+ xxd -r > $2
+
+# real hexdump -C has a trailing file size, but it isn't always
+# present
+hexsize=$(tail -1 $1)
+if [ ${#hexsize} = 8 ]; then
+ decsize=$(printf '%d' $hexsize)
+ echo "truncate up to $decsize"
+ truncate --size $decsize $2
+fi