summaryrefslogtreecommitdiffstats
path: root/src/script
diff options
context:
space:
mode:
Diffstat (limited to 'src/script')
-rw-r--r--src/script/CMakeLists.txt7
-rwxr-xr-xsrc/script/add_header.pl26
-rwxr-xr-xsrc/script/backport-create-issue259
-rwxr-xr-xsrc/script/bdev_grep.pl19
-rwxr-xr-xsrc/script/build-integration-branch70
-rwxr-xr-xsrc/script/ceph-backport.sh158
-rwxr-xr-xsrc/script/ceph-debug-docker.sh120
-rwxr-xr-xsrc/script/ceph-release-notes310
-rwxr-xr-xsrc/script/check_commands.sh19
-rw-r--r--src/script/cmake_uninstall.cmake.in21
-rwxr-xr-xsrc/script/crash_bdev.sh10
-rwxr-xr-xsrc/script/credits.sh46
-rwxr-xr-xsrc/script/dep-report.sh120
-rwxr-xr-xsrc/script/find_dups_in_pg_log.sh22
-rwxr-xr-xsrc/script/fix_modeline.pl29
-rwxr-xr-xsrc/script/kcon_all.sh10
-rwxr-xr-xsrc/script/kcon_most.sh13
-rw-r--r--src/script/kubejacker/Dockerfile34
-rw-r--r--src/script/kubejacker/README.rst11
-rwxr-xr-xsrc/script/kubejacker/kubejacker.sh86
-rwxr-xr-xsrc/script/ptl-tool.py368
-rwxr-xr-xsrc/script/run-coverity33
-rwxr-xr-xsrc/script/run-make.sh159
-rwxr-xr-xsrc/script/run_mypy.sh37
-rwxr-xr-xsrc/script/run_uml.sh212
-rwxr-xr-xsrc/script/sepia_bt.sh182
-rwxr-xr-xsrc/script/smr_benchmark/linearCopy.sh91
-rwxr-xr-xsrc/script/smr_benchmark/linearSMRCopy.sh69
-rwxr-xr-xsrc/script/strip_trailing_whitespace.sh4
29 files changed, 2545 insertions, 0 deletions
diff --git a/src/script/CMakeLists.txt b/src/script/CMakeLists.txt
new file mode 100644
index 00000000..fdc0e83e
--- /dev/null
+++ b/src/script/CMakeLists.txt
@@ -0,0 +1,7 @@
+configure_file(
+ "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in"
+ "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
+ IMMEDIATE @ONLY)
+
+add_custom_target(uninstall
+ COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
diff --git a/src/script/add_header.pl b/src/script/add_header.pl
new file mode 100755
index 00000000..023c06e4
--- /dev/null
+++ b/src/script/add_header.pl
@@ -0,0 +1,26 @@
+#!/usr/bin/perl
+
+use strict;
+my $fn = shift @ARGV;
+my $old = `cat $fn`;
+
+my $header = `cat doc/header.txt`;
+
+# strip existing header
+my $new = $old;
+if ($new =~ /^(.*)\* Ceph - scalable distributed file system/s) {
+ my ($a,@b) = split(/\*\/\n/, $new);
+ $new = join("*/\n",@b);
+}
+$new = $header . $new;
+
+if ($new ne $old) {
+ open(O, ">$fn.new");
+ print O $new;
+ close O;
+ system "diff $fn $fn.new";
+ rename "$fn.new", $fn;
+ #unlink "$fn.new";
+
+}
+
diff --git a/src/script/backport-create-issue b/src/script/backport-create-issue
new file mode 100755
index 00000000..c9954af3
--- /dev/null
+++ b/src/script/backport-create-issue
@@ -0,0 +1,259 @@
+#!/usr/bin/env python3
+#
+# backport-create-issue
+#
+# Standalone version of the "backport-create-issue" subcommand of
+# "ceph-workbench" by Loic Dachary.
+#
+# This script scans Redmine (tracker.ceph.com) for issues in "Pending Backport"
+# status and creates backport issues for them, based on the contents of the
+# "Backport" field while trying to avoid creating duplicate backport issues.
+#
+# Copyright (C) 2015 <contact@redhat.com>
+# Copyright (C) 2018, SUSE LLC
+#
+# Author: Loic Dachary <loic@dachary.org>
+# Author: Nathan Cutler <ncutler@suse.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see http://www.gnu.org/licenses/>
+#
+import argparse
+import logging
+import re
+import time
+from redminelib import Redmine # https://pypi.org/project/python-redmine/
+
+redmine_endpoint = "http://tracker.ceph.com"
+project_name = "Ceph"
+release_id = 16
+delay_seconds = 5
+#
+# NOTE: release_id is hard-coded because
+# http://www.redmine.org/projects/redmine/wiki/Rest_CustomFields
+# requires administrative permissions. If and when
+# https://www.redmine.org/issues/18875
+# is resolved, it could maybe be replaced by the following code:
+#
+# for field in redmine.custom_field.all():
+# if field.name == 'Release':
+# release_id = field.id
+#
+status2status_id = {}
+project_id2project = {}
+tracker2tracker_id = {}
+version2version_id = {}
+
+def usage():
+ logging.error("Command-line arguments must include either a Redmine key (--key) "
+ "or a Redmine username and password (via --user and --password). "
+ "Optionally, one or more issue numbers can be given via positional "
+ "argument(s). In the absence of positional arguments, the script "
+ "will loop through all issues in Pending Backport status.")
+ exit(-1)
+
+def parse_arguments():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("issue_numbers", nargs='*', help="Issue number")
+ parser.add_argument("--key", help="Redmine user key")
+ parser.add_argument("--user", help="Redmine user")
+ parser.add_argument("--password", help="Redmine password")
+ parser.add_argument("--debug", help="Show debug-level messages",
+ action="store_true")
+ parser.add_argument("--dry-run", help="Do not write anything to Redmine",
+ action="store_true")
+ return parser.parse_args()
+
+def set_logging_level(a):
+ if a.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+ return None
+
+def report_dry_run(a):
+ if a.dry_run:
+ logging.info("Dry run: nothing will be written to Redmine")
+ else:
+ logging.warning("Missing issues will be created in Backport tracker "
+ "of the relevant Redmine project")
+
+def connect_to_redmine(a):
+ if a.key:
+ logging.info("Redmine key was provided; using it")
+ return Redmine(redmine_endpoint, key=a.key)
+ elif a.user and a.password:
+ logging.info("Redmine username and password were provided; using them")
+ return Redmine(redmine_endpoint, username=a.user, password=a.password)
+ else:
+ usage()
+
+def releases():
+ return ('argonaut', 'bobtail', 'cuttlefish', 'dumpling', 'emperor',
+ 'firefly', 'giant', 'hammer', 'infernalis', 'jewel', 'kraken',
+ 'luminous', 'mimic')
+
+def populate_status_dict(r):
+ for status in r.issue_status.all():
+ status2status_id[status.name] = status.id
+ logging.debug("Statuses {}".format(status2status_id))
+ return None
+
+# not used currently, but might be useful
+def populate_version_dict(r, p_id):
+ versions = r.version.filter(project_id=p_id)
+ for version in versions:
+ version2version_id[version.name] = version.id
+ #logging.debug("Versions {}".format(version2version_id))
+ return None
+
+def populate_tracker_dict(r):
+ for tracker in r.tracker.all():
+ tracker2tracker_id[tracker.name] = tracker.id
+ logging.debug("Trackers {}".format(tracker2tracker_id))
+ return None
+
+def has_tracker(r, p_id, tracker_name):
+ for tracker in get_project(r, p_id).trackers:
+ if tracker['name'] == tracker_name:
+ return True
+ return False
+
+def get_project(r, p_id):
+ if p_id not in project_id2project:
+ p_obj = r.project.get(p_id, include='trackers')
+ project_id2project[p_id] = p_obj
+ return project_id2project[p_id]
+
+def url(issue):
+ return redmine_endpoint + "/issues/" + str(issue['id'])
+
+def set_backport(issue):
+ for field in issue['custom_fields']:
+ if field['name'] == 'Backport' and field['value'] != 0:
+ issue['backports'] = set(re.findall('\w+', field['value']))
+ logging.debug("backports for " + str(issue['id']) +
+ " is " + str(field['value']) + " " +
+ str(issue['backports']))
+ return True
+ return False
+
+def get_release(issue):
+ for field in issue.custom_fields:
+ if field['name'] == 'Release':
+ return field['value']
+
+def update_relations(r, issue, dry_run):
+ relations = r.issue_relation.filter(issue_id=issue['id'])
+ existing_backports = set()
+ for relation in relations:
+ other = r.issue.get(relation['issue_to_id'])
+ if other['tracker']['name'] != 'Backport':
+ logging.debug(url(issue) + " ignore relation to " +
+ url(other) + " because it is not in the Backport " +
+ "tracker")
+ continue
+ if relation['relation_type'] != 'copied_to':
+ logging.error(url(issue) + " unexpected relation '" +
+ relation['relation_type'] + "' to " + url(other))
+ continue
+ release = get_release(other)
+ if release in existing_backports:
+ logging.error(url(issue) + " duplicate " + release +
+ " backport issue detected")
+ continue
+ existing_backports.add(release)
+ logging.debug(url(issue) + " backport to " + release + " is " +
+ redmine_endpoint + "/issues/" + str(relation['issue_to_id']))
+ if existing_backports == issue['backports']:
+ logging.debug(url(issue) + " has all the required backport issues")
+ return None
+ if existing_backports.issuperset(issue['backports']):
+ logging.error(url(issue) + " has more backport issues (" +
+ ",".join(sorted(existing_backports)) + ") than expected (" +
+ ",".join(sorted(issue['backports'])) + ")")
+ return None
+ backport_tracker_id = tracker2tracker_id['Backport']
+ for release in issue['backports'] - existing_backports:
+ if release not in releases():
+ logging.error(url(issue) + " requires backport to " +
+ "unknown release " + release)
+ break
+ subject = release + ": " + issue['subject']
+ if dry_run:
+ logging.info(url(issue) + " add backport to " + release)
+ continue
+ other = r.issue.create(project_id=issue['project']['id'],
+ tracker_id=backport_tracker_id,
+ subject=subject,
+ priority='Normal',
+ target_version=None,
+ custom_fields=[{
+ "id": release_id,
+ "value": release,
+ }])
+ logging.debug("Rate-limiting to avoid seeming like a spammer")
+ time.sleep(delay_seconds)
+ r.issue_relation.create(issue_id=issue['id'],
+ issue_to_id=other['id'],
+ relation_type='copied_to')
+ logging.info(url(issue) + " added backport to " +
+ release + " " + url(other))
+ return None
+
+def iterate_over_backports(r, issues, dry_run):
+ counter = 0
+ for issue in issues:
+ counter += 1
+ logging.debug("{} ({}) {}".format(issue.id, issue.project,
+ issue.subject))
+ print('{}\r'.format(issue.id), end='', flush=True)
+ if not has_tracker(r, issue['project']['id'], 'Backport'):
+ logging.info("{} skipped because the project {} does not "
+ "have a Backport tracker".format(url(issue),
+ issue['project']['name']))
+ continue
+ if not set_backport(issue):
+ logging.error(url(issue) + " no backport field")
+ continue
+ if len(issue['backports']) == 0:
+ logging.error(url(issue) + " the backport field is empty")
+ update_relations(r, issue, dry_run)
+ logging.info("Processed {} issues with status Pending Backport"
+ .format(counter))
+ return None
+
+
+if __name__ == '__main__':
+ args = parse_arguments()
+ set_logging_level(args)
+ report_dry_run(args)
+ redmine = connect_to_redmine(args)
+ project = redmine.project.get(project_name)
+ ceph_project_id = project.id
+ logging.debug("Project {} has ID {}".format(project_name, ceph_project_id))
+ populate_status_dict(redmine)
+ pending_backport_status_id = status2status_id["Pending Backport"]
+ logging.debug("Pending Backport status has ID {}"
+ .format(pending_backport_status_id))
+ populate_tracker_dict(redmine)
+ if args.issue_numbers:
+ issue_list = ','.join(args.issue_numbers)
+ logging.info("Processing issue list ->{}<-".format(issue_list))
+ issues = redmine.issue.filter(project_id=ceph_project_id,
+ issue_id=issue_list,
+ status_id=pending_backport_status_id)
+ else:
+ issues = redmine.issue.filter(project_id=ceph_project_id,
+ status_id=pending_backport_status_id)
+ iterate_over_backports(redmine, issues, args.dry_run)
diff --git a/src/script/bdev_grep.pl b/src/script/bdev_grep.pl
new file mode 100755
index 00000000..a343aad4
--- /dev/null
+++ b/src/script/bdev_grep.pl
@@ -0,0 +1,19 @@
+#!/usr/bin/perl
+
+my $offset = shift @ARGV;
+
+while (<>) {
+ # next unless / \d\d bdev /;
+ my $rest = $_;
+ my @hit;
+ while ($rest =~ /([\da-f]+)[~\+]([\da-f]+)/) {
+ my ($o, $l) = $rest =~ /([\da-f]+)[~\+]([\da-f]+)/;
+ $rest = $';
+ if (hex($offset) >= hex($o) &&
+ hex($offset) < hex($o) + hex($l)) {
+ my $rel = hex($offset) - hex($o);
+ push(@hit, sprintf("%x",$rel));
+ }
+ }
+ print join(',',@hit) . "\t$_" if @hit;
+}
diff --git a/src/script/build-integration-branch b/src/script/build-integration-branch
new file mode 100755
index 00000000..a15d88cd
--- /dev/null
+++ b/src/script/build-integration-branch
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+
+from __future__ import print_function
+
+import json
+import os
+import requests
+from subprocess import call
+import sys
+import time
+try:
+ from urllib.parse import urljoin
+except:
+ from urlparse import urljoin
+
+label = sys.argv[1]
+repo = "ceph/ceph"
+
+with open(os.environ['HOME'] + '/.github_token', 'r') as myfile:
+ token = myfile.readline().strip()
+
+# get prs
+baseurl = urljoin('https://api.github.com', (
+ 'repos/{repo}/issues?labels={label}'
+ '&access_token={token}'
+ '&sort=created'
+ '&direction=asc'
+ )
+ )
+url = baseurl.format(
+ label=label,
+ repo=repo,
+ token=token)
+r = requests.get(url)
+assert(r.ok)
+j = json.loads(r.text or r.content)
+print("--- found %d issues tagged with %s" % (len(j), label))
+
+prs = []
+prtext = []
+for issue in j:
+ if 'pull_request' not in issue:
+ continue
+ r = requests.get(issue['pull_request']['url'] + '?access_token=' + token)
+ pr = json.loads(r.text or r.content)
+ prs.append(pr)
+ prtext.append(pr['html_url'] + ' - ' + pr['title'])
+print("--- queried %s prs" % len(prs))
+
+# name branch
+TIME_FORMAT = '%Y-%m-%d-%H%M'
+branch = label + "-" + time.strftime(TIME_FORMAT, time.localtime())
+print("branch %s" % branch)
+
+# assemble
+print('--- creating branch %s' % branch)
+r = call(['git', 'checkout', '-b', branch])
+assert not r
+for pr in prs:
+ print('--- pr %d --- pulling %s branch %s' % (
+ pr['number'],
+ pr['head']['repo']['clone_url'],
+ pr['head']['ref']))
+ r = call(['git', 'pull', '--no-edit',
+ pr['head']['repo']['clone_url'],
+ pr['head']['ref']])
+ assert not r
+print('--- done. these PRs were included:')
+print('\n'.join(prtext).encode('ascii', errors='ignore').decode())
+print('--- perhaps you want to: make && ctest -j12 && git push ci %s' % branch)
diff --git a/src/script/ceph-backport.sh b/src/script/ceph-backport.sh
new file mode 100755
index 00000000..cf1b7f26
--- /dev/null
+++ b/src/script/ceph-backport.sh
@@ -0,0 +1,158 @@
+#!/bin/bash -e
+#
+# ceph-backport.sh
+#
+# Ceph backporting script
+#
+# Assumes you have forked ceph/ceph.git, cloned your fork, and are running the
+# script in the local clone!
+#
+# With this script, backporting workflow for backport issue
+# http://tracker.ceph.com/issues/19206 (a jewel backport)
+# becomes something like this:
+#
+# git remote add ceph http://github.com/ceph/ceph.git
+# git fetch ceph
+# git checkout -b wip-19206-jewel ceph/jewel
+# git cherry-pick -x ...
+# ceph-backport.sh 19206 jewel
+#
+# The script takes care of opening the backport PR, updating the tracker issue,
+# and cross-linking the backport PR with the tracker issue.
+#
+# However, before you start you will need to find the right values for
+# the following:
+#
+# redmine_key # "My account" -> "API access key" -> "Show"
+# redmine_user_id # "Logged in as foobar", click on foobar link, Redmine User ID
+ # is in the URL, i.e. http://tracker.ceph.com/users/[redmine_user_id]
+# github_token # https://github.com/settings/tokens -> Generate new token ->
+ # ensure it has "Full control of private repositories" scope
+# github_user # Your github username
+#
+# Once you have the actual values for these three variables, create a file
+# $HOME/bin/backport_common.sh with the following contents
+#
+# redmine_key=[your_redmine_key]
+# redmine_user_id=[your_redmine_user_id]
+# github_token=[your_github_personal_access_token]
+# github_user=[your_github_username]
+#
+# Obviously, since this file contains secrets, you should protect it from
+# exposure using all available means (restricted file privileges, encrypted
+# filesystem, etc.). Without correct values for these four variables, this
+# script will not work!
+#
+#
+
+function deprecation_warning {
+ echo "*******************"
+ echo "DEPRECATION WARNING"
+ echo "*******************"
+ echo
+ echo "This is an outdated, unmaintained version of ceph-backport.sh. Using this"
+ echo "version can have unpredictable results. It is recommended to use the"
+ echo "version from the \"master\" branch, instead. In other words, use this:"
+ echo
+ echo "https://github.com/ceph/ceph/blob/master/src/script/ceph-backport.sh"
+ echo
+}
+
+if [[ "$@" =~ "--version" ]] ; then
+ deprecation_warning
+ echo "$0: version 14.2.0 (DEPRECATED - DO NOT USE)"
+ exit 0
+fi
+
+deprecation_warning
+echo "Sleeping for 5 seconds to give you time to hit CTRL-C..."
+sleep 5
+
+source $HOME/bin/backport_common.sh
+
+function failed_required_variable_check () {
+ local varname=$1
+ echo "$0: $varname not defined. Did you create $HOME/bin/backport_common.sh?"
+ echo "(For instructions, see comment block at beginning of script)"
+ exit 1
+}
+
+test "$redmine_key" || failed_required_variable_check redmine_key
+test "$redmine_user_id" || failed_required_variable_check redmine_user_id
+test "$github_token" || failed_required_variable_check github_token
+test "$github_user" || failed_required_variable_check github_user
+
+function usage () {
+ echo "Usage:"
+ echo " $0 [BACKPORT_TRACKER_ISSUE_NUMBER] [MILESTONE]"
+ echo
+ echo "Example:"
+ echo " $0 19206 jewel"
+ echo
+ echo "If MILESTONE is not given on the command line, the script will"
+ echo "try to use the value of the MILESTONE environment variable, if set."
+ echo
+ echo "The script must be run from inside the local git clone"
+ exit 1
+}
+
+[[ $1 != ?(-)+([0-9]) ]] && usage
+issue=$1
+echo "Backport issue: $issue"
+
+milestone=
+test "$2" && milestone="$2"
+if [ -z "$milestone" ] ; then
+ test "$MILESTONE" && milestone="$MILESTONE"
+fi
+test "$milestone" || usage
+echo "Milestone: $milestone"
+
+# ------------------------------------
+# How to find out the milestone number
+# ------------------------------------
+# can't seem to extract the milestone number with the API
+# milestone numbers can be obtained with:
+# curl --verbose -X GET https://api.github.com/repos/ceph/ceph/milestones
+
+if [[ "x$milestone" = "xhammer" ]] ; then
+ milestone_number=5
+ target_branch=hammer
+elif [[ "x$milestone" = "xjewel" ]] ; then
+ milestone_number=8
+ target_branch=jewel
+elif [[ "x$milestone" = "xkraken" ]] ; then
+ milestone_number=9
+ target_branch=kraken
+elif [[ "x$milestone" = "xluminous" ]] ; then
+ milestone_number=10
+ target_branch=luminous
+elif [[ "x$milestone" = "xmimic" ]] ; then
+ milestone_number=11
+ target_branch=mimic
+else
+ echo "Please enter hammer, jewel, kraken, luminous, or mimic"
+ exit 1
+fi
+echo "Milestone is $milestone and milestone number is $milestone_number"
+
+if [ $(curl --silent http://tracker.ceph.com/issues/$issue.json | jq -r .issue.tracker.name) != "Backport" ]
+then
+ echo "http://tracker.ceph.com/issues/$issue is not a backport (edit and change tracker?)"
+ exit 1
+fi
+
+title=$(curl --silent 'http://tracker.ceph.com/issues/'$issue.json?key=$redmine_key | jq .issue.subject | tr -d '\\"')
+echo "Issue title: $title"
+
+git push -u origin wip-$issue-$milestone
+number=$(curl --silent --data-binary '{"title":"'"$title"'","head":"'$github_user':wip-'$issue-$milestone'","base":"'$target_branch'","body":"http://tracker.ceph.com/issues/'$issue'"}' 'https://api.github.com/repos/ceph/ceph/pulls?access_token='$github_token | jq .number)
+echo "Opened pull request $number"
+
+component=core ; curl --silent --data-binary '{"milestone":'$milestone_number',"assignee":"'$github_user'","labels":["bug fix","'$component'"]}' 'https://api.github.com/repos/ceph/ceph/issues/'$number'?access_token='$github_token
+firefox https://github.com/ceph/ceph/pull/$number
+redmine_status=2 # In Progress
+curl --verbose -X PUT --header 'Content-type: application/json' --data-binary '{"issue":{"description":"https://github.com/ceph/ceph/pull/'$number'","status_id":'$redmine_status',"assigned_to_id":'$redmine_user_id'}}' 'http://tracker.ceph.com/issues/'$issue.json?key=$redmine_key
+echo "Staged http://tracker.ceph.com/issues/$issue"
+
+firefox http://tracker.ceph.com/issues/$issue
diff --git a/src/script/ceph-debug-docker.sh b/src/script/ceph-debug-docker.sh
new file mode 100755
index 00000000..eac98183
--- /dev/null
+++ b/src/script/ceph-debug-docker.sh
@@ -0,0 +1,120 @@
+#!/usr/bin/env bash
+
+# This can be run from e.g. the senta machines which have docker available. You
+# may need to run this script with sudo.
+#
+# Once you have booted into the image, you should be able to debug the core file:
+# $ gdb -q /ceph/teuthology-archive/.../coredump/1500013578.8678.core
+#
+# You may want to install other packages (yum) as desired.
+#
+# Once you're finished, please delete old images in a timely fashion.
+
+set -e
+
+CACHE=""
+
+function run {
+ printf "%s\n" "$*"
+ "$@"
+}
+
+function main {
+ eval set -- $(getopt --name "$0" --options 'h' --longoptions 'help,no-cache' -- "$@")
+
+ while [ "$#" -gt 0 ]; do
+ case "$1" in
+ -h|--help)
+ printf '%s: [--no-cache] <branch>[:sha1] <environment>\n' "$0"
+ exit 0
+ ;;
+ --no-cache)
+ CACHE="--no-cache"
+ shift
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+ done
+
+ if [ -z "$1" ]; then
+ printf "specify the branch [default \"master:latest\"]: "
+ read source
+ if [ -z "$source" ]; then
+ source=master:latest
+ fi
+ else
+ branch="$1"
+ fi
+ if [ "${branch%%:*}" != "${branch}" ]; then
+ sha=${branch##*:}
+ else
+ sha=latest
+ fi
+ branch=${branch%%:*}
+ printf "branch: %s\nsha1: %s\n" "$branch" "$sha"
+
+ if [ -z "$2" ]; then
+ printf "specify the build environment [default \"centos:7\"]: "
+ read env
+ if [ -z "$env" ]; then
+ env=centos:7
+ fi
+ else
+ env="$2"
+ fi
+ printf "env: %s\n" "$env"
+
+ if [ -n "$SUDO_USER" ]; then
+ user="$SUDO_USER"
+ elif [ -n "$USER" ]; then
+ user="$USER"
+ else
+ user="$(whoami)"
+ fi
+
+ tag="${user}:ceph-ci-${branch}-${sha}-${env/:/-}"
+
+ T=$(mktemp -d)
+ pushd "$T"
+ if grep ubuntu <<<"$env" > /dev/null 2>&1; then
+ # Docker makes it impossible to access anything outside the CWD : /
+ cp -- /ceph/shaman/cephdev.asc .
+ cat > Dockerfile <<EOF
+FROM ${env}
+
+WORKDIR /root
+RUN apt-get update --yes --quiet && \
+ apt-get install --yes --quiet screen gdb software-properties-common apt-transport-https curl
+COPY cephdev.asc cephdev.asc
+RUN apt-key add cephdev.asc && \
+ curl -L https://shaman.ceph.com/api/repos/ceph/${branch}/${sha}/${env/://}/repo | tee /etc/apt/sources.list.d/ceph_dev.list && \
+ apt-get update --yes && \
+ DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical apt-get --assume-yes -q --no-install-recommends install -o Dpkg::Options::=--force-confnew --allow-unauthenticated ceph ceph-osd-dbg ceph-mds-dbg ceph-mgr-dbg ceph-mon-dbg ceph-common-dbg ceph-fuse-dbg ceph-test-dbg radosgw-dbg python3-cephfs python3-rados
+EOF
+ time run docker build $CACHE --tag "$tag" .
+ else # try RHEL flavor
+ time run docker build $CACHE --tag "$tag" - <<EOF
+FROM ${env}
+
+WORKDIR /root
+RUN yum update -y && \
+ yum install -y screen epel-release wget psmisc ca-certificates gdb
+RUN wget -O /etc/yum.repos.d/ceph-dev.repo https://shaman.ceph.com/api/repos/ceph/${branch}/${sha}/centos/7/repo && \
+ yum clean all && \
+ yum upgrade -y && \
+ yum install -y ceph ceph-debuginfo ceph-fuse python34-rados python34-cephfs
+EOF
+ fi
+ popd
+ rm -rf -- "$T"
+
+ printf "built image %s\n" "$tag"
+
+ run docker run -ti -v /ceph:/ceph:ro "$tag"
+ return 0
+}
+
+main "$@"
diff --git a/src/script/ceph-release-notes b/src/script/ceph-release-notes
new file mode 100755
index 00000000..aba4426d
--- /dev/null
+++ b/src/script/ceph-release-notes
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+# Originally modified from A. Israel's script seen at
+# https://gist.github.com/aisrael/b2b78d9dfdd176a232b9
+"""To run this script first install the dependencies
+
+
+ virtualenv v
+ source v/bin/activate
+ pip install githubpy GitPython requests
+
+Generate a github access token; this is needed as the anonymous access
+to Github's API will easily hit the limit even with a single invocation.
+For details see:
+https://help.github.com/articles/creating-an-access-token-for-command-line-use/
+
+Next either set the github token as an env variable
+`GITHUB_ACCESS_TOKEN` or alternatively invoke the script with
+`--token` switch.
+
+Example:
+
+ ceph-release-notes -r tags/v0.87..origin/giant \
+ $(git rev-parse --show-toplevel)
+
+"""
+
+from __future__ import print_function
+import argparse
+import github
+import os
+import re
+import sys
+import requests
+
+from git import Repo
+
+
+fixes_re = re.compile(r"Fixes\:? #(\d+)")
+reviewed_by_re = re.compile(r"Rev(.*)By", re.IGNORECASE)
+# labels is the list of relevant labels defined for github.com/ceph/ceph
+labels = {'bluestore', 'build/ops', 'cephfs', 'common', 'core', 'mgr',
+ 'mon', 'performance', 'pybind', 'rdma', 'rgw', 'rbd', 'tests',
+ 'tools'}
+merge_re = re.compile("Merge pull request #(\d+).*")
+# prefixes is the list of commit description prefixes we recognize
+prefixes = ['bluestore', 'build/ops', 'cephfs', 'cephx', 'cli', 'cmake',
+ 'common', 'core', 'crush', 'doc', 'fs', 'librados', 'librbd',
+ 'log', 'mds', 'mgr', 'mon', 'msg', 'objecter', 'osd', 'pybind',
+ 'rbd', 'rbd-mirror', 'rbd-nbd', 'rgw', 'tests', 'tools']
+signed_off_re = re.compile("Signed-off-by: (.+) <")
+tracker_re = re.compile("http://tracker.ceph.com/issues/(\d+)")
+rst_link_re = re.compile(r"([a-zA-Z0-9])_(\W)")
+tracker_uri = "http://tracker.ceph.com/issues/{0}.json"
+
+
+def get_original_issue(issue, verbose):
+ r = requests.get(tracker_uri.format(issue),
+ params={"include": "relations"}).json()
+
+ # looking up for the original issue only makes sense
+ # when dealing with an issue in the Backport tracker
+ if r["issue"]["tracker"]["name"] != "Backport":
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " is from the tracker " + r["issue"]["tracker"]["name"] +
+ ", do not look for the original issue")
+ return issue
+
+ # if a Backport issue does not have a relation, keep it
+ if "relations" not in r["issue"]:
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " has no relations, do not look for the original issue")
+ return issue
+
+ copied_to = [
+ str(i['issue_id']) for i in r["issue"]["relations"]
+ if i["relation_type"] == "copied_to"
+ ]
+ if copied_to:
+ if len(copied_to) > 1:
+ if verbose:
+ print ("ERROR: http://tracker.ceph.com/issues/" + issue +
+ " has more than one Copied To relation")
+ return issue
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " is the backport of http://tracker.ceph.com/issues/" +
+ copied_to[0])
+ return copied_to[0]
+ else:
+ if verbose:
+ print ("http://tracker.ceph.com/issues/" + issue +
+ " has no copied_to relations; do not look for the" +
+ " original issue")
+ return issue
+
+
+def split_component(title, gh, number):
+ title_re = '(' + '|'.join(prefixes) + ')(:.*)'
+ match = re.match(title_re, title)
+ if match:
+ return match.group(1)+match.group(2)
+ else:
+ issue = gh.repos("ceph")("ceph").issues(number).get()
+ issue_labels = {it['name'] for it in issue['labels']}
+ if 'documentation' in issue_labels:
+ return 'doc: ' + title
+ item = set(prefixes).intersection(issue_labels)
+ if item:
+ return ",".join(sorted(item)) + ': ' + title
+ else:
+ return 'UNKNOWN: ' + title
+
+def _title_message(commit, pr, strict):
+ title = pr['title']
+ message_lines = commit.message.split('\n')
+ if strict or len(message_lines) < 1:
+ return (title, None)
+ lines = []
+ for line in message_lines[1:]:
+ if reviewed_by_re.match(line):
+ continue
+ line = line.strip()
+ if line:
+ lines.append(line)
+ if len(lines) == 0:
+ return (title, None)
+ duplicates_pr_title = lines[0] == pr['title'].strip()
+ if duplicates_pr_title:
+ return (title, None)
+ assert len(lines) > 0, "missing message content"
+ if len(lines) == 1:
+ # assume that a single line means the intention is to
+ # re-write the PR title
+ return (lines[0], None)
+ message = " " + "\n ".join(lines)
+ return (title, message)
+
+def make_release_notes(gh, repo, ref, plaintext, verbose, strict, use_tags):
+
+ issue2prs = {}
+ pr2issues = {}
+ pr2info = {}
+
+ for commit in repo.iter_commits(ref, merges=True):
+ merge = merge_re.match(commit.summary)
+ if not merge:
+ continue
+ number = merge.group(1)
+ print ("Considering PR#" + number)
+ # do not pick up ceph/ceph-qa-suite.git PRs
+ if int(number) < 1311:
+ print ("Ignoring low-numbered PR, probably picked up from"
+ " ceph/ceph-qa-suite.git")
+ continue
+ pr = gh.repos("ceph")("ceph").pulls(number).get()
+ (title, message) = _title_message(commit, pr, strict)
+ issues = []
+ if pr['body']:
+ issues = fixes_re.findall(pr['body']) + tracker_re.findall(
+ pr['body']
+ )
+
+ authors = {}
+ for c in repo.iter_commits(
+ "{sha1}^1..{sha1}^2".format(sha1=commit.hexsha)
+ ):
+ for author in re.findall(
+ "Signed-off-by:\s*(.*?)\s*<", c.message
+ ):
+ authors[author] = 1
+ issues.extend(fixes_re.findall(c.message) +
+ tracker_re.findall(c.message))
+ if authors:
+ author = ", ".join(authors.keys())
+ else:
+ author = commit.parents[-1].author.name
+
+ if strict and not issues:
+ print ("ERROR: https://github.com/ceph/ceph/pull/" +
+ str(number) + " has no associated issue")
+ continue
+
+ if strict:
+ title_re = (
+ '^(?:hammer|infernalis|jewel|kraken):\s+(' +
+ '|'.join(prefixes) +
+ ')(:.*)'
+ )
+ match = re.match(title_re, title)
+ if not match:
+ print ("ERROR: https://github.com/ceph/ceph/pull/" +
+ str(number) + " title " + title.encode("utf-8") +
+ " does not match " + title_re)
+ else:
+ title = match.group(1) + match.group(2)
+ if use_tags:
+ title = split_component(title, gh, number)
+
+ title = title.strip(' \t\n\r\f\v\.\,\;\:\-\=')
+ # escape asterisks, which is used by reStructuredTextrst for inline
+ # emphasis
+ title = title.replace('*', '\*')
+ # and escape the underscores for noting a link
+ title = rst_link_re.sub(r'\1\_\2', title)
+ pr2info[number] = (author, title, message)
+
+ for issue in set(issues):
+ if strict:
+ issue = get_original_issue(issue, verbose)
+ issue2prs.setdefault(issue, set([])).add(number)
+ pr2issues.setdefault(number, set([])).add(issue)
+ sys.stdout.write('.')
+
+ print (" done collecting merges.")
+
+ if strict:
+ for (issue, prs) in issue2prs.items():
+ if len(prs) > 1:
+ print (">>>>>>> " + str(len(prs)) + " pr for issue " +
+ issue + " " + str(prs))
+
+ for (pr, (author, title, message)) in sorted(
+ pr2info.items(), key=lambda title: title[1][1]
+ ):
+ if pr in pr2issues:
+ if plaintext:
+ issues = map(lambda issue: '#' + str(issue), pr2issues[pr])
+ else:
+ issues = map(lambda issue: (
+ '`issue#{issue} <http://tracker.ceph.com/issues/{issue}>`_'
+ ).format(issue=issue), pr2issues[pr]
+ )
+ issues = ", ".join(issues) + ", "
+ else:
+ issues = ''
+ if plaintext:
+ print ("* {title} ({issues}{author})".format(
+ title=title.encode("utf-8"),
+ issues=issues,
+ author=author.encode("utf-8")
+ )
+ )
+ else:
+ print (
+ (
+ "* {title} ({issues}`pr#{pr} <"
+ "https://github.com/ceph/ceph/pull/{pr}"
+ ">`_, {author})"
+ ).format(
+ title=title.encode("utf-8"),
+ issues=issues,
+ author=author.encode("utf-8"), pr=pr
+ )
+ )
+ if message:
+ print (message)
+
+
+if __name__ == "__main__":
+ desc = '''
+ Make ceph release notes for a given revision. Eg usage:
+
+ $ ceph-release-notes -r tags/v0.87..origin/giant \
+ $(git rev-parse --show-toplevel)
+
+ It is recommended to set the github env. token in order to avoid
+ hitting the api rate limits.
+ '''
+
+ parser = argparse.ArgumentParser(
+ description=desc,
+ formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ parser.add_argument("--rev", "-r",
+ help="git revision range for creating release notes")
+ parser.add_argument("--text", "-t",
+ action='store_true', default=None,
+ help="output plain text only, no links")
+ parser.add_argument("--verbose", "-v",
+ action='store_true', default=None,
+ help="verbose")
+ parser.add_argument("--strict",
+ action='store_true', default=None,
+ help="strict, recommended only for backport releases")
+ parser.add_argument("repo", metavar="repo",
+ help="path to ceph git repo")
+ parser.add_argument(
+ "--token",
+ default=os.getenv("GITHUB_ACCESS_TOKEN"),
+ help="Github Access Token ($GITHUB_ACCESS_TOKEN otherwise)",
+ )
+ parser.add_argument("--use-tags", default=False,
+ help="Use github tags to guess the component")
+
+ args = parser.parse_args()
+ gh = github.GitHub(
+ access_token=args.token)
+
+ make_release_notes(
+ gh,
+ Repo(args.repo),
+ args.rev,
+ args.text,
+ args.verbose,
+ args.strict,
+ args.use_tags
+ )
diff --git a/src/script/check_commands.sh b/src/script/check_commands.sh
new file mode 100755
index 00000000..17a15b40
--- /dev/null
+++ b/src/script/check_commands.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+git grep COMMAND\( | grep -o "(\"[a-zA-z ]*\"" | grep -o "[a-zA-z ]*" > commands.txt
+missing_test=false
+good_tests=""
+bad_tests=""
+while read cmd; do
+ if git grep -q "$cmd" -- src/test qa/; then
+ good_tests="$good_tests '$cmd'"
+ else
+ echo "'$cmd' has no apparent tests"
+ missing_test=true
+ bad_tests="$bad_tests '$cmd'"
+ fi
+done < commands.txt
+
+if [ "$missing_test" == true ]; then
+ echo "Missing tests!" $bad_tests
+ exit 1;
+fi
diff --git a/src/script/cmake_uninstall.cmake.in b/src/script/cmake_uninstall.cmake.in
new file mode 100644
index 00000000..4c07dc7b
--- /dev/null
+++ b/src/script/cmake_uninstall.cmake.in
@@ -0,0 +1,21 @@
+if(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt")
+ message(FATAL_ERROR "Cannot find install manifest: @CMAKE_BINARY_DIR@/install_manifest.txt")
+endif(NOT EXISTS "@CMAKE_BINARY_DIR@/install_manifest.txt")
+
+file(READ "@CMAKE_BINARY_DIR@/install_manifest.txt" files)
+string(REGEX REPLACE "\n" ";" files "${files}")
+foreach(file ${files})
+ message(STATUS "Uninstalling $ENV{DESTDIR}${file}")
+ if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ exec_program(
+ "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
+ OUTPUT_VARIABLE rm_out
+ RETURN_VALUE rm_retval
+ )
+ if(NOT "${rm_retval}" STREQUAL 0)
+ message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}")
+ endif(NOT "${rm_retval}" STREQUAL 0)
+ else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+ message(STATUS "File $ENV{DESTDIR}${file} does not exist.")
+ endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
+endforeach(file)
diff --git a/src/script/crash_bdev.sh b/src/script/crash_bdev.sh
new file mode 100755
index 00000000..da31b69b
--- /dev/null
+++ b/src/script/crash_bdev.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+set -ex
+
+while true; do
+ ./ceph daemon osd.0 config set bdev_inject_crash 2
+ sleep 5
+ tail -n 1000 out/osd.0.log | grep bdev_inject_crash || exit 1
+ ./init-ceph start osd.0
+ sleep 20
+done
diff --git a/src/script/credits.sh b/src/script/credits.sh
new file mode 100755
index 00000000..415889d5
--- /dev/null
+++ b/src/script/credits.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+range="$1"
+TMP=/tmp/credits
+declare -A mail2author
+declare -A mail2organization
+remap="s/'/ /g"
+git log --pretty='%ae %aN <%aE>' $range | sed -e "$remap" | sort -u > $TMP
+while read mail who ; do
+ author=$(echo $who | git -c mailmap.file=.peoplemap check-mailmap --stdin)
+ mail2author[$mail]="$author"
+ organization=$(echo $who | git -c mailmap.file=.organizationmap check-mailmap --stdin)
+ mail2organization[$mail]="$organization"
+done < $TMP
+declare -A author2lines
+declare -A organization2lines
+git log --no-merges --pretty='%ae' $range | sed -e "$remap" | sort -u > $TMP
+while read mail ; do
+ count=$(git log --numstat --author="$mail" --pretty='%h' $range |
+ egrep -v 'package-lock\.json|\.xlf' | # generated files that should be excluded from line counting
+ perl -e 'while(<STDIN>) { if(/(\d+)\t(\d+)/) { $added += $1; $deleted += $2 } }; print $added + $deleted;')
+ (( author2lines["${mail2author[$mail]}"] += $count ))
+ (( organization2lines["${mail2organization[$mail]}"] += $count ))
+done < $TMP
+echo
+echo "Number of lines added and removed, by authors"
+for author in "${!author2lines[@]}" ; do
+ printf "%6s %s\n" ${author2lines["$author"]} "$author"
+done | sort -rn | nl
+echo
+echo "Number of lines added and removed, by organization"
+for organization in "${!organization2lines[@]}" ; do
+ printf "%6s %s\n" ${organization2lines["$organization"]} "$organization"
+done | sort -rn | nl
+echo
+echo "Commits, by authors"
+git log --no-merges --pretty='%aN <%aE>' $range | git -c mailmap.file=.peoplemap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
+echo
+echo "Commits, by organizations"
+git log --no-merges --pretty='%aN <%aE>' $range | git -c mailmap.file=.organizationmap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
+echo
+echo "Reviews, by authors (one review spans multiple commits)"
+git log --pretty=%b $range | perl -n -e 'print "$_\n" if(s/^\s*Reviewed-by:\s*(.*<.*>)\s*$/\1/i)' | git check-mailmap --stdin | git -c mailmap.file=.peoplemap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
+echo
+echo "Reviews, by organizations (one review spans multiple commits)"
+git log --pretty=%b $range | perl -n -e 'print "$_\n" if(s/^\s*Reviewed-by:\s*(.*<.*>)\s*$/\1/i)' | git check-mailmap --stdin | git -c mailmap.file=.organizationmap check-mailmap --stdin | sort | uniq -c | sort -rn | nl
diff --git a/src/script/dep-report.sh b/src/script/dep-report.sh
new file mode 100755
index 00000000..c2eeaed7
--- /dev/null
+++ b/src/script/dep-report.sh
@@ -0,0 +1,120 @@
+#! /usr/bin/env bash
+
+shopt -s nullglob
+
+PLATFORM=`lsb_release -is`
+
+TMPFILE1=`mktemp --tmpdir depA.XXXXXX` || exit 1
+TMPFILE2=`mktemp --tmpdir depB.XXXXXX` || exit 2
+TMPFILE3=`mktemp --tmpdir depB.XXXXXX` || exit 3
+
+cleanup() {
+ rm -f $TMPFILE1
+ rm -f $TMPFILE2
+ rm -f $TMPFILE3
+}
+trap cleanup INT EXIT
+
+# find all the .deps directories
+DEPDIRS=`find . -name ".deps" -print`
+if [ -z "$DEPDIRS" ] ; then
+ echo "No depdirs found. Ceph must be built before running dependency check"
+fi
+
+# find all the headers
+echo "Looking for headers ... " >&2
+for DIR in $DEPDIRS
+do
+ for file in $DIR/*.Po $DIR/*.Plo
+ do
+ #echo "$DIR: $file" >&2
+ cut -d: -f1 $file | grep "^/" >> $TMPFILE1
+ done
+done
+
+# Add in required libraries
+echo "Looking for libraries ... " >&2
+LIB_PATHS="/lib64 /usr/lib64 /lib /usr/lib"
+FIND=`which find`
+autoconf --trace AC_CHECK_LIB | cut -d: -f4 | while read LIB
+do
+ for PATH in $LIB_PATHS
+ do
+ $FIND $PATH -name "lib$LIB.so*" -print 2> /dev/null >> $TMPFILE1
+ done
+done
+autoconf --trace AC_SEARCH_LIBS | cut -d: -f5 | while read LIBLIST
+do
+ for LIB in $LIBLIST ; do
+ for PATH in $LIB_PATHS ; do
+ $FIND $PATH -name "lib$LIB.so*" -print 2> /dev/null >> $TMPFILE1
+ done
+ done
+done
+autoconf --trace PKG_CHECK_MODULES | cut -d: -f5 | cut -d' ' -f1 | while read PKG
+do
+ LIBLIST=`pkg-config --libs $PKG 2> /dev/null`
+ for LIB in $LIBLIST ; do
+ LIB=${LIB#-l}
+ for PATH in $LIB_PATHS
+ do
+ $FIND $PATH -name "lib$LIB.so*" -print 2> /dev/null >> $TMPFILE1
+ done
+ done
+done
+
+# path to package
+echo "Looking up packages for hdr and lib paths ... " >&2
+sort $TMPFILE1 | uniq > $TMPFILE2
+
+rm $TMPFILE1
+cat $TMPFILE2 | while read LINE
+do
+ package=`rpm -q --whatprovides $LINE`
+ echo $package >> $TMPFILE1
+done
+
+# Add in any libraries needed for the devel packages
+echo "Adding libraries for devel packages ... " >&2
+sort $TMPFILE1 | uniq > $TMPFILE3
+cat $TMPFILE3 | grep devel | while read PACKAGE
+do
+ NAME=`rpm -q --qf %{NAME} $PACKAGE`
+ NAME=${NAME%-devel}
+ #echo "looking for matching $NAME ... " >&2
+ LPACKAGE=`rpm -q $NAME 2> /dev/null`
+ if [ $? -eq 0 ] ; then
+ #echo "Found $LPACKAGE ... " >&2
+ echo $LPACKAGE >> $TMPFILE1
+ else
+ LPACKAGE=`rpm -q $NAME-libs 2> /dev/null`
+ if [ $? -eq 0 ] ; then
+ #echo "Found $LPACKAGE ... " >&2
+ echo $LPACKAGE >> $TMPFILE1
+ fi
+ fi
+done
+echo "Checking licenses ... " >&2
+
+# Read package list and generate report
+sort $TMPFILE1 | uniq > $TMPFILE2
+
+rm $TMPFILE1
+echo -e "\nPackage Dependencies:\n"
+cat $TMPFILE2 | while read PACKAGE
+do
+ LICENSE=`rpm -q --qf %{LICENSE} $PACKAGE`
+ NAME=`rpm -q --qf %{NAME} $PACKAGE`
+ echo "${PACKAGE}.rpm"
+ echo " Name: $NAME"
+ echo " License: $LICENSE"
+done
+
+echo -e "\nSource Code Dependencies:\n"
+echo "src/leveldb"
+echo " Name: leveldb"
+echo " License: Google Public License"
+
+echo "Done"
+#echo "DEPDIRS: $DEPDIRS"
+
diff --git a/src/script/find_dups_in_pg_log.sh b/src/script/find_dups_in_pg_log.sh
new file mode 100755
index 00000000..b4d1afb6
--- /dev/null
+++ b/src/script/find_dups_in_pg_log.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# pipe output of grep for objectname in osd logs to me
+#
+# e.g.,
+#
+# zgrep smithi01817880-936 remote/*/log/*osd* | ~/src/ceph/src/script/find_dups_in_pg_log.sh
+#
+# or
+#
+# zcat remote/*/log/*osd* | ~/src/ceph/src/script/find_dups_in_pg_log.sh
+#
+# output will be any requests that appear in the pg log >1 time (along with
+# their count)
+
+#grep append_log | sort -k 2 | sed 's/.*append_log//' | awk '{print $3 " " $8}' | sort | uniq | awk '{print $2}' | sort | uniq -c | grep -v ' 1 '
+
+grep append_log | grep ' by ' | \
+ perl -pe 's/(.*) \[([^ ]*) (.*) by ([^ ]+) (.*)/$2 $4/' | \
+ sort | uniq | \
+ awk '{print $2}' | \
+ sort | uniq -c | grep -v ' 1 '
diff --git a/src/script/fix_modeline.pl b/src/script/fix_modeline.pl
new file mode 100755
index 00000000..8eadde9b
--- /dev/null
+++ b/src/script/fix_modeline.pl
@@ -0,0 +1,29 @@
+#!/usr/bin/perl
+
+use strict;
+my $fn = shift @ARGV;
+my $old = `cat $fn`;
+my $header = `cat doc/modeline.txt`;
+
+# strip existing modeline
+my $new = $old;
+$new =~ s/^\/\/ \-\*\- ([^\n]+) \-\*\-([^\n]*)\n//s; # emacs
+$new =~ s/^\/\/ vim: ([^\n]*)\n//s; # vim;
+$new =~ s/^\/\/ \-\*\- ([^\n]+) \-\*\-([^\n]*)\n//s; # emacs
+$new =~ s/^\/\/ vim: ([^\n]*)\n//s; # vim;
+$new =~ s/^\/\/ \-\*\- ([^\n]+) \-\*\-([^\n]*)\n//s; # emacs
+$new =~ s/^\/\/ vim: ([^\n]*)\n//s; # vim;
+
+# add correct header
+$new = $header . $new;
+
+if ($new ne $old) {
+ print "$fn\n";
+ open(O, ">$fn.new");
+ print O $new;
+ close O;
+ system "diff $fn $fn.new";
+ rename "$fn.new", $fn;
+ #unlink "$fn.new";
+}
+
diff --git a/src/script/kcon_all.sh b/src/script/kcon_all.sh
new file mode 100755
index 00000000..c3056f9d
--- /dev/null
+++ b/src/script/kcon_all.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -x
+
+p() {
+ echo "$*" > /sys/kernel/debug/dynamic_debug/control
+}
+
+echo 9 > /proc/sysrq-trigger
+p 'module ceph +p'
+p 'module libceph +p'
+p 'module rbd +p'
diff --git a/src/script/kcon_most.sh b/src/script/kcon_most.sh
new file mode 100755
index 00000000..e62db2ac
--- /dev/null
+++ b/src/script/kcon_most.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+
+p() {
+ echo "$*" > /sys/kernel/debug/dynamic_debug/control
+}
+
+echo 9 > /proc/sysrq-trigger
+p 'module ceph +p'
+p 'module libceph +p'
+p 'module rbd +p'
+p 'file net/ceph/messenger.c -p'
+p 'file' `grep -- --- /sys/kernel/debug/dynamic_debug/control | grep ceph | awk '{print $1}' | sed 's/:/ line /'` '+p'
+p 'file' `grep -- === /sys/kernel/debug/dynamic_debug/control | grep ceph | awk '{print $1}' | sed 's/:/ line /'` '+p'
diff --git a/src/script/kubejacker/Dockerfile b/src/script/kubejacker/Dockerfile
new file mode 100644
index 00000000..ab6dc644
--- /dev/null
+++ b/src/script/kubejacker/Dockerfile
@@ -0,0 +1,34 @@
+from BASEIMAGE
+
+# Some apt-get commands fail in docker builds because they try
+# and do interactive prompts
+ENV TERM linux
+
+# Baseline rook images may be from before the `rook` ceph-mgr module,
+# so let's install the dependencies of that
+# New RGW dependency since luminous: liboath
+# For the dashboard, if the rook images are pre-Mimic: ython-bcrypt librdmacm
+
+RUN (grep -q rhel /etc/os-release && ( \
+ yum install -y python-pip && \
+ yum install -y liboath && \
+ yum install -y python-bcrypt librdmacm && \
+ pip install kubernetes==6.0.0 \
+ )) || (grep -q suse /etc/os-release && ( \
+ zypper --non-interactive --gpg-auto-import-keys install --no-recommends --auto-agree-with-licenses --replacefiles --details \
+ python3-kubernetes \
+ liboauth-devel \
+ python-bcrypt \
+ lz4 \
+ librdmacm1 \
+ libopenssl1_1 \
+ ))
+
+ADD bin.tar.gz /usr/bin/
+ADD lib.tar.gz /usr/lib64/
+
+# Assume developer is using default paths (i.e. /usr/local), so
+# build binaries will be looking for libs there.
+ADD eclib.tar.gz /usr/local/lib64/ceph/erasure-code/
+ADD clslib.tar.gz /usr/local/lib64/rados-classes/
+ADD mgr_plugins.tar.gz /usr/local/lib64/ceph/mgr
diff --git a/src/script/kubejacker/README.rst b/src/script/kubejacker/README.rst
new file mode 100644
index 00000000..07e948a5
--- /dev/null
+++ b/src/script/kubejacker/README.rst
@@ -0,0 +1,11 @@
+
+This tool is for developers who want to run their WIP Ceph code
+inside a Rook/kubernetes cluster without waiting for packages
+to build.
+
+It simply takes a Rook image, overlays all the binaries from your
+built Ceph tree into it, and spits out a new Rook image. This will
+only work as long as your build environment is sufficiently similar
+(in terms of dependencies etc) to the version of Ceph that was
+originally in the images you're injecting into.
+
diff --git a/src/script/kubejacker/kubejacker.sh b/src/script/kubejacker/kubejacker.sh
new file mode 100755
index 00000000..cf30a80d
--- /dev/null
+++ b/src/script/kubejacker/kubejacker.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+set -x
+set -e
+SCRIPT=$(readlink -f "$0")
+SCRIPTPATH=$(dirname "$SCRIPT")
+
+# Run me from your build dir! I look for binaries in bin/, lib/ etc.
+BUILDPATH=$(pwd)
+
+# PREREQUISITE: a built rook image to use as a base, either self built
+# or from dockerhub. If you ran "make" in your rook source checkout
+# you'll have one like build-<hash>/rook-amd64
+DEFAULT_BASEIMAGE="`docker image ls | grep ceph-amd64 | cut -d " " -f 1`"
+BASEIMAGE="${BASEIMAGE:-$DEFAULT_BASEIMAGE}"
+
+# PREREQUISITE: a repo that you can push to. You are probably running
+# a local docker registry that your kubelet nodes also have access to.
+if [ -z "$REPO" ]
+then
+ echo "ERROR: no \$REPO set!"
+ echo "Run a docker repository and set REPO to <hostname>:<port>"
+ exit -1
+fi
+
+# The output image name: this should match whatever is configured as
+# the image name in your Rook cluster CRD object.
+IMAGE=rook/ceph
+TAG=$(git rev-parse --short HEAD)
+
+# The namespace where ceph containers are running in your
+# test cluster: used for bouncing the containers.
+NAMESPACE=rook-ceph
+
+mkdir -p kubejacker
+cp $SCRIPTPATH/Dockerfile kubejacker
+sed -i s@BASEIMAGE@$BASEIMAGE@ kubejacker/Dockerfile
+
+# TODO: let user specify which daemon they're interested
+# in -- doing all bins all the time is too slow and bloaty
+BINS="ceph-mgr ceph-mon ceph-mds ceph-osd rados radosgw-admin radosgw"
+pushd bin
+strip $BINS #TODO: make stripping optional
+tar czf $BUILDPATH/kubejacker/bin.tar.gz $BINS
+popd
+
+# We need ceph-common to support the binaries
+# We need librados/rbd to support mgr modules
+# that import the python bindings
+LIBS="libceph-common.so.0 libceph-common.so librados.so.2 librados.so librados.so.2.0.0 librbd.so librbd.so.1 librbd.so.1.12.0"
+pushd lib
+strip $LIBS #TODO: make stripping optional
+tar czf $BUILDPATH/kubejacker/lib.tar.gz $LIBS
+popd
+
+pushd ../src/pybind/mgr
+find ./ -name "*.pyc" -exec rm -f {} \;
+# Exclude node_modules because it's the huge sources in dashboard/frontend
+tar --exclude=node_modules --exclude=tests --exclude-backups -czf $BUILDPATH/kubejacker/mgr_plugins.tar.gz *
+popd
+
+ECLIBS="libec_*.so*"
+pushd lib
+strip $ECLIBS #TODO: make stripping optional
+tar czf $BUILDPATH/kubejacker/eclib.tar.gz $ECLIBS
+popd
+
+CLSLIBS="libcls_*.so*"
+pushd lib
+strip $CLSLIBS #TODO: make stripping optional
+tar czf $BUILDPATH/kubejacker/clslib.tar.gz $CLSLIBS
+popd
+
+pushd kubejacker
+docker build -t $REPO/$IMAGE:$TAG .
+popd
+
+# Push the image to the repository
+docker tag $REPO/$IMAGE:$TAG $REPO/$IMAGE:latest
+docker push $REPO/$IMAGE:latest
+docker push $REPO/$IMAGE:$TAG
+
+# Finally, bounce the containers to pick up the new image
+kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mds
+kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mgr
+kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mon
diff --git a/src/script/ptl-tool.py b/src/script/ptl-tool.py
new file mode 100755
index 00000000..6d374674
--- /dev/null
+++ b/src/script/ptl-tool.py
@@ -0,0 +1,368 @@
+#!/usr/bin/env python2
+
+# README:
+#
+# This tool's purpose is to make it easier to merge PRs into test branches and
+# into master. Make sure you generate a Personal access token in GitHub and
+# add it your ~/.github.key.
+#
+# Because developers often have custom names for the ceph upstream remote
+# (https://github.com/ceph/ceph.git), You will probably want to export the
+# PTL_TOOL_BASE_PATH environment variable in your shell rc files before using
+# this script:
+#
+# export PTL_TOOL_BASE_PATH=refs/remotes/<remotename>/
+#
+# and PTL_TOOL_BASE_REMOTE as the name of your Ceph upstream remote (default: "upstream"):
+#
+# export PTL_TOOL_BASE_REMOTE=<remotename>
+#
+#
+# ** Here are some basic exmples to get started: **
+#
+# Merging all PRs labeled 'wip-pdonnell-testing' into a new test branch:
+#
+# $ src/script/ptl-tool.py --pr-label wip-pdonnell-testing
+# Adding labeled PR #18805 to PR list
+# Adding labeled PR #18774 to PR list
+# Adding labeled PR #18600 to PR list
+# Will merge PRs: [18805, 18774, 18600]
+# Detaching HEAD onto base: master
+# Merging PR #18805
+# Merging PR #18774
+# Merging PR #18600
+# Checked out new branch wip-pdonnell-testing-20171108.054517
+# Created tag testing/wip-pdonnell-testing-20171108.054517
+#
+#
+# Merging all PRs labeled 'wip-pdonnell-testing' into master:
+#
+# $ src/script/ptl-tool.py --pr-label wip-pdonnell-testing --branch master
+# Adding labeled PR #18805 to PR list
+# Adding labeled PR #18774 to PR list
+# Adding labeled PR #18600 to PR list
+# Will merge PRs: [18805, 18774, 18600]
+# Detaching HEAD onto base: master
+# Merging PR #18805
+# Merging PR #18774
+# Merging PR #18600
+# Checked out branch master
+#
+# Now push to master:
+# $ git push upstream master
+# ...
+#
+#
+# Merging PR #1234567 and #2345678 into a new test branch with a testing label added to the PR:
+#
+# $ src/script/ptl-tool.py 1234567 2345678 --label wip-pdonnell-testing
+# Detaching HEAD onto base: master
+# Merging PR #1234567
+# Labeled PR #1234567 wip-pdonnell-testing
+# Merging PR #2345678
+# Labeled PR #2345678 wip-pdonnell-testing
+# Deleted old test branch wip-pdonnell-testing-20170928
+# Created branch wip-pdonnell-testing-20170928
+# Created tag testing/wip-pdonnell-testing-20170928_03
+#
+#
+# Merging PR #1234567 into master leaving a detached HEAD (i.e. do not update your repo's master branch) and do not label:
+#
+# $ src/script/ptl-tool.py --branch HEAD --merge-branch-name master 1234567
+# Detaching HEAD onto base: master
+# Merging PR #1234567
+# Leaving HEAD detached; no branch anchors your commits
+#
+# Now push to master:
+# $ git push upstream HEAD:master
+#
+#
+# Merging PR #12345678 into luminous leaving a detached HEAD (i.e. do not update your repo's master branch) and do not label:
+#
+# $ src/script/ptl-tool.py --base luminous --branch HEAD --merge-branch-name luminous 12345678
+# Detaching HEAD onto base: luminous
+# Merging PR #12345678
+# Leaving HEAD detached; no branch anchors your commits
+#
+# Now push to luminous:
+# $ git push upstream HEAD:luminous
+#
+#
+# Merging all PRs labelled 'wip-pdonnell-testing' into master leaving a detached HEAD:
+#
+# $ src/script/ptl-tool.py --base master --branch HEAD --merge-branch-name master --pr-label wip-pdonnell-testing
+# Adding labeled PR #18192 to PR list
+# Will merge PRs: [18192]
+# Detaching HEAD onto base: master
+# Merging PR #18192
+# Leaving HEAD detached; no branch anchors your commit
+
+
+# TODO
+# Look for check failures?
+# redmine issue update: http://www.redmine.org/projects/redmine/wiki/Rest_Issues
+
+import argparse
+import codecs
+import datetime
+import getpass
+import git
+import itertools
+import json
+import logging
+import os
+import re
+import requests
+import sys
+
+from os.path import expanduser
+
+log = logging.getLogger(__name__)
+log.addHandler(logging.StreamHandler())
+log.setLevel(logging.INFO)
+
+BASE_PROJECT = os.getenv("PTL_TOOL_BASE_PROJECT", "ceph")
+BASE_REPO = os.getenv("PTL_TOOL_BASE_REPO", "ceph")
+BASE_REMOTE = os.getenv("PTL_TOOL_BASE_REMOTE", "upstream")
+BASE_PATH = os.getenv("PTL_TOOL_BASE_PATH", "refs/remotes/upstream/")
+GITDIR = os.getenv("PTL_TOOL_GITDIR", ".")
+USER = os.getenv("PTL_TOOL_USER", getpass.getuser())
+with open(expanduser("~/.github.key")) as f:
+ PASSWORD = f.read().strip()
+TEST_BRANCH = os.getenv("PTL_TOOL_TEST_BRANCH", "wip-{user}-testing-%Y%m%d.%H%M%S")
+
+SPECIAL_BRANCHES = ('master', 'luminous', 'jewel', 'HEAD')
+
+INDICATIONS = [
+ re.compile("(Reviewed-by: .+ <[\w@.-]+>)", re.IGNORECASE),
+ re.compile("(Acked-by: .+ <[\w@.-]+>)", re.IGNORECASE),
+ re.compile("(Tested-by: .+ <[\w@.-]+>)", re.IGNORECASE),
+]
+
+# find containing git dir
+git_dir = GITDIR
+max_levels = 6
+while not os.path.exists(git_dir + '/.git'):
+ git_dir += '/..'
+ max_levels -= 1
+ if max_levels < 0:
+ break
+
+CONTRIBUTORS = {}
+NEW_CONTRIBUTORS = {}
+with codecs.open(git_dir + "/.githubmap", encoding='utf-8') as f:
+ comment = re.compile("\s*#")
+ patt = re.compile("([\w-]+)\s+(.*)")
+ for line in f:
+ if comment.match(line):
+ continue
+ m = patt.match(line)
+ CONTRIBUTORS[m.group(1)] = m.group(2)
+
+BZ_MATCH = re.compile("(.*https?://bugzilla.redhat.com/.*)")
+TRACKER_MATCH = re.compile("(.*https?://tracker.ceph.com/.*)")
+
+def build_branch(args):
+ base = args.base
+ branch = datetime.datetime.utcnow().strftime(args.branch).format(user=USER)
+ label = args.label
+ merge_branch_name = args.merge_branch_name
+ if merge_branch_name is False:
+ merge_branch_name = branch
+
+ if label:
+ #Check the label format
+ if re.search(r'\bwip-(.*?)-testing\b', label) is None:
+ log.error("Unknown Label '{lblname}'. Label Format: wip-<name>-testing".format(lblname=label))
+ sys.exit(1)
+
+ #Check if the Label exist in the repo
+ res = requests.get("https://api.github.com/repos/{project}/{repo}/labels/{lblname}".format(lblname=label, project=BASE_PROJECT, repo=BASE_REPO), auth=(USER, PASSWORD))
+ if res.status_code != 200:
+ log.error("Label '{lblname}' not found in the repo".format(lblname=label))
+ sys.exit(1)
+
+ G = git.Repo(args.git)
+
+ # First get the latest base branch and PRs from BASE_REMOTE
+ remote = getattr(G.remotes, BASE_REMOTE)
+ remote.fetch()
+
+ prs = args.prs
+ if args.pr_label is not None:
+ if args.pr_label == '' or args.pr_label.isspace():
+ log.error("--pr-label must have a non-space value")
+ sys.exit(1)
+ payload = {'labels': args.pr_label, 'sort': 'created', 'direction': 'desc'}
+ labeled_prs = requests.get("https://api.github.com/repos/{project}/{repo}/issues".format(project=BASE_PROJECT, repo=BASE_REPO), auth=(USER, PASSWORD), params=payload)
+ if labeled_prs.status_code != 200:
+ log.error("Failed to load labeled PRs: {}".format(labeled_prs))
+ sys.exit(1)
+ labeled_prs = labeled_prs.json()
+ if len(labeled_prs) == 0:
+ log.error("Search for PRs matching label '{}' returned no results!".format(args.pr_label))
+ sys.exit(1)
+ for pr in labeled_prs:
+ if pr['pull_request']:
+ n = pr['number']
+ log.info("Adding labeled PR #{} to PR list".format(n))
+ prs.append(n)
+ log.info("Will merge PRs: {}".format(prs))
+
+ if base == 'HEAD':
+ log.info("Branch base is HEAD; not checking out!")
+ else:
+ log.info("Detaching HEAD onto base: {}".format(base))
+ try:
+ base_path = args.base_path + base
+ base = filter(lambda r: r.path == base_path, G.refs)[0]
+ except IndexError:
+ log.error("Branch " + base + " does not exist!")
+ sys.exit(1)
+
+ # So we know that we're not on an old test branch, detach HEAD onto ref:
+ base.checkout()
+
+ for pr in prs:
+ pr = int(pr)
+ log.info("Merging PR #{pr}".format(pr=pr))
+
+ remote_ref = "refs/pull/{pr}/head".format(pr=pr)
+ fi = remote.fetch(remote_ref)
+ if len(fi) != 1:
+ log.error("PR {pr} does not exist?".format(pr=pr))
+ sys.exit(1)
+ tip = fi[0].ref.commit
+
+ pr_req = requests.get("https://api.github.com/repos/ceph/ceph/pulls/{pr}".format(pr=pr), auth=(USER, PASSWORD))
+ if pr_req.status_code != 200:
+ log.error("PR '{pr}' not found: {c}".format(pr=pr,c=pr_req))
+ sys.exit(1)
+
+ message = "Merge PR #%d into %s\n\n* %s:\n" % (pr, merge_branch_name, remote_ref)
+
+ for commit in G.iter_commits(rev="HEAD.."+str(tip)):
+ message = message + ("\t%s\n" % commit.message.split('\n', 1)[0])
+ # Get tracker issues / bzs cited so the PTL can do updates
+ short = commit.hexsha[:8]
+ for m in BZ_MATCH.finditer(commit.message):
+ log.info("[ {sha1} ] BZ cited: {cite}".format(sha1=short, cite=m.group(1)))
+ for m in TRACKER_MATCH.finditer(commit.message):
+ log.info("[ {sha1} ] Ceph tracker cited: {cite}".format(sha1=short, cite=m.group(1)))
+
+ message = message + "\n"
+
+ comments = requests.get("https://api.github.com/repos/{project}/{repo}/issues/{pr}/comments".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), auth=(USER, PASSWORD))
+ if comments.status_code != 200:
+ log.error("PR '{pr}' not found: {c}".format(pr=pr,c=comments))
+ sys.exit(1)
+
+ reviews = requests.get("https://api.github.com/repos/{project}/{repo}/pulls/{pr}/reviews".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), auth=(USER, PASSWORD))
+ if reviews.status_code != 200:
+ log.error("PR '{pr}' not found: {c}".format(pr=pr,c=comments))
+ sys.exit(1)
+
+ review_comments = requests.get("https://api.github.com/repos/{project}/{repo}/pulls/{pr}/comments".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), auth=(USER, PASSWORD))
+ if review_comments.status_code != 200:
+ log.error("PR '{pr}' not found: {c}".format(pr=pr,c=comments))
+ sys.exit(1)
+
+ indications = set()
+ for comment in [pr_req.json()]+comments.json()+reviews.json()+review_comments.json():
+ body = comment["body"]
+ if body:
+ url = comment["html_url"]
+ for m in BZ_MATCH.finditer(body):
+ log.info("[ {url} ] BZ cited: {cite}".format(url=url, cite=m.group(1)))
+ for m in TRACKER_MATCH.finditer(body):
+ log.info("[ {url} ] Ceph tracker cited: {cite}".format(url=url, cite=m.group(1)))
+ for indication in INDICATIONS:
+ for cap in indication.findall(comment["body"]):
+ indications.add(cap)
+
+ new_new_contributors = {}
+ for review in reviews.json():
+ if review["state"] == "APPROVED":
+ user = review["user"]["login"]
+ try:
+ indications.add("Reviewed-by: "+CONTRIBUTORS[user])
+ except KeyError as e:
+ try:
+ indications.add("Reviewed-by: "+NEW_CONTRIBUTORS[user])
+ except KeyError as e:
+ try:
+ name = raw_input("Need name for contributor \"%s\" (use ^D to skip); Reviewed-by: " % user)
+ name = name.strip()
+ if len(name) == 0:
+ continue
+ NEW_CONTRIBUTORS[user] = name
+ new_new_contributors[user] = name
+ indications.add("Reviewed-by: "+name)
+ except EOFError as e:
+ continue
+
+ for indication in indications:
+ message = message + indication + "\n"
+
+ G.git.merge(tip.hexsha, '--no-ff', m=message)
+
+ if new_new_contributors:
+ # Check out the PR, add a commit adding to .githubmap
+ log.info("adding new contributors to githubmap in merge commit")
+ with open(git_dir + "/.githubmap", "a") as f:
+ for c in new_new_contributors:
+ f.write("%s %s\n" % (c, new_new_contributors[c]))
+ G.index.add([".githubmap"])
+ G.git.commit("--amend", "--no-edit")
+
+ if label:
+ req = requests.post("https://api.github.com/repos/{project}/{repo}/issues/{pr}/labels".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), data=json.dumps([label]), auth=(USER, PASSWORD))
+ if req.status_code != 200:
+ log.error("PR #%d could not be labeled %s: %s" % (pr, label, req))
+ sys.exit(1)
+ log.info("Labeled PR #{pr} {label}".format(pr=pr, label=label))
+
+ # If the branch is 'HEAD', leave HEAD detached (but use "master" for commit message)
+ if branch == 'HEAD':
+ log.info("Leaving HEAD detached; no branch anchors your commits")
+ else:
+ created_branch = False
+ try:
+ G.head.reference = G.create_head(branch)
+ log.info("Checked out new branch {branch}".format(branch=branch))
+ created_branch = True
+ except:
+ G.head.reference = G.create_head(branch, force=True)
+ log.info("Checked out branch {branch}".format(branch=branch))
+
+ if created_branch:
+ # tag it for future reference.
+ tag = "testing/%s" % branch
+ git.refs.tag.Tag.create(G, tag)
+ log.info("Created tag %s" % tag)
+
+def main():
+ parser = argparse.ArgumentParser(description="Ceph PTL tool")
+ default_base = 'master'
+ default_branch = TEST_BRANCH
+ default_label = ''
+ if len(sys.argv) > 1 and sys.argv[1] in SPECIAL_BRANCHES:
+ argv = sys.argv[2:]
+ default_branch = 'HEAD' # Leave HEAD detached
+ default_base = default_branch
+ default_label = False
+ else:
+ argv = sys.argv[1:]
+ parser.add_argument('--branch', dest='branch', action='store', default=default_branch, help='branch to create ("HEAD" leaves HEAD detached; i.e. no branch is made)')
+ parser.add_argument('--merge-branch-name', dest='merge_branch_name', action='store', default=False, help='name of the branch for merge messages')
+ parser.add_argument('--base', dest='base', action='store', default=default_base, help='base for branch')
+ parser.add_argument('--base-path', dest='base_path', action='store', default=BASE_PATH, help='base for branch')
+ parser.add_argument('--git-dir', dest='git', action='store', default=git_dir, help='git directory')
+ parser.add_argument('--label', dest='label', action='store', default=default_label, help='label PRs for testing')
+ parser.add_argument('--pr-label', dest='pr_label', action='store', help='label PRs for testing')
+ parser.add_argument('prs', metavar="PR", type=int, nargs='*', help='Pull Requests to merge')
+ args = parser.parse_args(argv)
+ return build_branch(args)
+
+if __name__ == "__main__":
+ main()
diff --git a/src/script/run-coverity b/src/script/run-coverity
new file mode 100755
index 00000000..c4254ba3
--- /dev/null
+++ b/src/script/run-coverity
@@ -0,0 +1,33 @@
+#!/bin/sh -ex
+
+export COVDIR="$HOME/cov-analysis"
+if [ ! -d "$COVDIR" ]; then
+ echo "missing $COVDIR; get that from coverity!"
+ exit 1
+fi
+if [ ! -e "$HOME/coverity.build.pass.txt" ]; then
+ echo "missing $HOME/coverity.build.pass.txt"
+ exit 1
+fi
+
+export PATH="$COVDIR/bin:$PATH"
+
+rm -rf build
+./do_cmake.sh
+cd build
+~/cov-analysis/bin/cov-build --dir cov-int make -j$(nproc)
+
+echo Sage Weil sage@newdream.net ceph >> README
+tar czvf project.tgz README cov-int
+rm -f README
+
+version=`git describe`
+token=`cat ~/coverity.build.pass.txt`
+curl --form token=$token \
+ --form email=sage@newdream.net \
+ --form file=@project.tgz \
+ --form version="$version" \
+ --form description="Automated Ceph build from `hostname`" \
+ https://scan.coverity.com/builds?project=ceph
+
+echo done.
diff --git a/src/script/run-make.sh b/src/script/run-make.sh
new file mode 100755
index 00000000..d5c2e08e
--- /dev/null
+++ b/src/script/run-make.sh
@@ -0,0 +1,159 @@
+#!/usr/bin/env bash
+
+set -e
+
+trap clean_up_after_myself EXIT
+
+ORIGINAL_CCACHE_CONF="$HOME/.ccache/ccache.conf"
+SAVED_CCACHE_CONF="$HOME/.run-make-check-saved-ccache-conf"
+
+function save_ccache_conf() {
+ test -f $ORIGINAL_CCACHE_CONF && cp $ORIGINAL_CCACHE_CONF $SAVED_CCACHE_CONF || true
+}
+
+function restore_ccache_conf() {
+ test -f $SAVED_CCACHE_CONF && mv $SAVED_CCACHE_CONF $ORIGINAL_CCACHE_CONF || true
+}
+
+function clean_up_after_myself() {
+ rm -fr ${CEPH_BUILD_VIRTUALENV:-/tmp}/*virtualenv*
+ restore_ccache_conf
+}
+
+function get_processors() {
+ # get_processors() depends on coreutils nproc.
+ if test -n "$NPROC" ; then
+ echo $NPROC
+ else
+ if test $(nproc) -ge 2 ; then
+ expr $(nproc) / 2
+ else
+ echo 1
+ fi
+ fi
+}
+
+function detect_ceph_dev_pkgs() {
+ local cmake_opts
+ local boost_root=/opt/ceph
+ if test -f $boost_root/include/boost/config.hpp; then
+ cmake_opts+=" -DWITH_SYSTEM_BOOST=ON -DBOOST_ROOT=$boost_root"
+ else
+ cmake_opts+=" -DBOOST_J=$(get_processors)"
+ fi
+ echo "$cmake_opts"
+}
+
+function prepare() {
+ local install_cmd
+ local which_pkg="which"
+ source /etc/os-release
+ if test -f /etc/redhat-release ; then
+ if ! type bc > /dev/null 2>&1 ; then
+ echo "Please install bc and re-run."
+ exit 1
+ fi
+ if test "$(echo "$VERSION_ID >= 22" | bc)" -ne 0; then
+ install_cmd="dnf -y install"
+ else
+ install_cmd="yum install -y"
+ fi
+ elif type zypper > /dev/null 2>&1 ; then
+ install_cmd="zypper --gpg-auto-import-keys --non-interactive install --no-recommends"
+ elif type apt-get > /dev/null 2>&1 ; then
+ install_cmd="apt-get install -y"
+ which_pkg="debianutils"
+ fi
+
+ if ! type sudo > /dev/null 2>&1 ; then
+ echo "Please install sudo and re-run. This script assumes it is running"
+ echo "as a normal user with the ability to run commands as root via sudo."
+ exit 1
+ fi
+ if [ -n "$install_cmd" ]; then
+ $DRY_RUN sudo $install_cmd ccache $which_pkg
+ else
+ echo "WARNING: Don't know how to install packages" >&2
+ echo "This probably means distribution $ID is not supported by run-make-check.sh" >&2
+ fi
+
+ if ! type ccache > /dev/null 2>&1 ; then
+ echo "ERROR: ccache could not be installed"
+ exit 1
+ fi
+
+ if test -f ./install-deps.sh ; then
+ export WITH_SEASTAR=1
+ $DRY_RUN source ./install-deps.sh || return 1
+ trap clean_up_after_myself EXIT
+ fi
+
+ cat <<EOM
+Note that the binaries produced by this script do not contain correct time
+and git version information, which may make them unsuitable for debugging
+and production use.
+EOM
+ save_ccache_conf
+ # remove the entropy generated by the date/time embedded in the build
+ $DRY_RUN export SOURCE_DATE_EPOCH="946684800"
+ $DRY_RUN ccache -o sloppiness=time_macros
+ $DRY_RUN ccache -o run_second_cpp=true
+ if [ -n "$JENKINS_HOME" ]; then
+ # Build host has plenty of space available, let's use it to keep
+ # various versions of the built objects. This could increase the cache hit
+ # if the same or similar PRs are running several times
+ $DRY_RUN ccache -o max_size=100G
+ else
+ echo "Current ccache max_size setting:"
+ ccache -p | grep max_size
+ fi
+ $DRY_RUN ccache -sz # Reset the ccache statistics and show the current configuration
+}
+
+function configure() {
+ CMAKE_BUILD_OPTS="$@"
+ CMAKE_BUILD_OPTS+=$(detect_ceph_dev_pkgs)
+ $DRY_RUN ./do_cmake.sh $CMAKE_BUILD_OPTS $@ || return 1
+}
+
+function build() {
+ local targets="$@"
+ $DRY_RUN cd build
+ BUILD_MAKEOPTS=${BUILD_MAKEOPTS:-$DEFAULT_MAKEOPTS}
+ test "$BUILD_MAKEOPTS" && echo "make will run with option(s) $BUILD_MAKEOPTS"
+ $DRY_RUN make $BUILD_MAKEOPTS $targets || return 1
+ $DRY_RUN ccache -s # print the ccache statistics to evaluate the efficiency
+}
+
+DEFAULT_MAKEOPTS=${DEFAULT_MAKEOPTS:--j$(get_processors)}
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ # not sourced
+ if [ `uname` = FreeBSD ]; then
+ GETOPT=/usr/local/bin/getopt
+ else
+ GETOPT=getopt
+ fi
+
+ options=$(${GETOPT} --name "$0" --options "" --longoptions "cmake-args:" -- "$@")
+ if [ $? -ne 0 ]; then
+ exit 2
+ fi
+ eval set -- "${options}"
+ while true; do
+ case "$1" in
+ --cmake-args)
+ cmake_args=$2
+ shift 2;;
+ --)
+ shift
+ break;;
+ *)
+ echo "bad option $1" >& 2
+ exit 2;;
+ esac
+ done
+ prepare
+ configure "$cmake_args"
+ build "$@"
+fi
diff --git a/src/script/run_mypy.sh b/src/script/run_mypy.sh
new file mode 100755
index 00000000..3d135328
--- /dev/null
+++ b/src/script/run_mypy.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+# needs to be executed form the src directory.
+# generates a report at src/mypy_report.txt
+
+python3 -m venv venv
+
+. venv/bin/activate
+
+pip install $(find * -name requirements.txt | awk '{print "-r " $0}') mypy
+
+cat <<EOF > ./mypy.ini
+[mypy]
+strict_optional = True
+no_implicit_optional = True
+ignore_missing_imports = True
+warn_incomplete_stub = True
+check_untyped_defs = True
+show_error_context = True
+EOF
+
+
+echo "pybind:" > mypy_report.txt
+pushd pybind
+mypy --config-file=../mypy.ini *.py | awk '{print "pybind/" $0}' >> ../mypy_report.txt
+popd
+
+echo "MGR Modules:" >> mypy_report.txt
+pushd pybind/mgr
+mypy --config-file=../../mypy.ini $(find * -name '*.py' | grep -v -e venv -e tox -e env -e gyp -e node_modules) | awk '{print "pybind/mgr/" $0}' >> ../../mypy_report.txt
+popd
+
+echo "ceph-volume:" >> mypy_report.txt
+pushd ceph-volume/ceph_volume
+mypy --config-file=../../mypy.ini $(find * -name '*.py' | grep -v -e venv -e tox -e env -e gyp -e node_modules -e tests) | awk '{print "ceph-volume/ceph_volume/" $0}' >> ../../mypy_report.txt
+popd
+
diff --git a/src/script/run_uml.sh b/src/script/run_uml.sh
new file mode 100755
index 00000000..9bff38b2
--- /dev/null
+++ b/src/script/run_uml.sh
@@ -0,0 +1,212 @@
+#!/bin/bash -norc
+
+# Magic startup script for a UML instance. As long as unique
+# instances are started, more than one of them can be concurrently
+# in use on a single system. All their network interfaces are
+# bridged together onto the virtual bridge "virbr0" which is
+# supplied by the "libvirt" package.
+#
+# Note that a DHCP server is started for that interface. It's
+# configured in this file:
+# /etc/libvirt/qemu/networks/default.xml
+# Unfortunately what I see there serves all possible DHCP addresses,
+# so stealing them like we do here isn't really kosher. To fix
+# it, that configuration should change to serve a smaller subset
+# of the available address range.
+#
+# Each instance uses its own tun/tap device, created using the
+# "tunctl" command. The assigned tap device will correspond with
+# the guest id (a small integer representing the instance), i.e.,
+# guest id 1 uses tap1, etc. The tap device is attached to the
+# virtual bridge, which will have its own subnet associated with it.
+# The guest side of that interface will have the same subnet as the
+# bridge interface, with the bottom bits representing (normally) 100
+# more than the guest id. So for subnet 192.168.122.0/24, guest
+# id 1 will use ip 192.168.122.101, guest id 2 will use ip
+# 192.168.122.102, and so on. Because these interfaces are bridged,
+# they can all communicate with each other.
+
+# You will want to override this by setting and exporting the
+# "CEPH_TOP" environment variable to be the directory that contains
+# the "ceph-client" source tree.
+CEPH_TOP="${CEPH_TOP:-/home/elder/ceph}"
+
+# You may want to change this too, if you want guest UML instances
+# to have a diffeerent IP address range. The guest IP will be based
+# on this plus GUEST_ID (defined below).
+GUEST_IP_OFFSET="${GUEST_IP_OFFSET:-100}"
+
+#############################
+
+if [ $# -gt 1 ]; then
+ echo "" >&2
+ echo "Usage: $(basename $0) [guest_id]" >&2
+ echo "" >&2
+ echo " guest_id is a small integer (default 1)" >&2
+ echo " (each UML instance needs a distinct guest_id)" >&2
+ echo "" >&2
+ exit 1
+elif [ $# -eq 1 ]; then
+ GUEST_ID="$1"
+else
+ GUEST_ID=1
+fi
+
+# This will be what the guest host calls itself.
+GUEST_HOSTNAME="uml-${GUEST_ID}"
+
+# This is the path to the boot disk image used by UML.
+DISK_IMAGE_A="${CEPH_TOP}/ceph-client/uml.${GUEST_ID}"
+if [ ! -f "${DISK_IMAGE_A}" ]; then
+ echo "root disk image not found (or not a file)" >&2
+ exit 2
+fi
+
+# Hostid 1 uses tun/tap device tap1, hostid 2 uses tap2, etc.
+TAP_ID="${GUEST_ID}"
+# This is the tap device used for this UML instance
+TAP="tap${TAP_ID}"
+
+# This is just used to mount an image temporarily
+TMP_MNT="/tmp/m$$"
+
+# Where to put a config file generated for this tap device
+TAP_IFUPDOWN_CONFIG="/tmp/interface-${TAP}"
+
+# Compute the HOST_IP and BROADCAST address values to use,
+# and assign shell variables with those names to their values.
+# Also compute BITS, which is the network prefix length used.
+# The NETMASK is then computed using that BITS value.
+eval $(
+ip addr show virbr0 | awk '
+/inet/ {
+ split($2, a, "/")
+ printf("HOST_IP=%s\n", a[1]);
+ printf("BROADCAST=%s\n", $4);
+ printf("BITS=%s\n", a[2]);
+ exit(0);
+}')
+
+# Use bc to avoid 32-bit wrap when computing netmask
+eval $(
+echo -n "NETMASK="
+bc <<! | fmt | sed 's/ /./g'
+m = 2 ^ 32 - 2 ^ (32 - ${BITS})
+for (p = 24; p >= 0; p = p - 8)
+ m / (2 ^ p) % 256
+!
+)
+
+# Now use the netmask and the host IP to compute the subnet address
+# and from that the guest IP address to use.
+eval $(
+awk '
+function from_quad(addr, a, val, i) {
+ if (split(addr, a, ".") != 4)
+ exit(1); # address not in dotted quad format
+ val = 0;
+ for (i = 1; i <= 4; i++)
+ val = val * 256 + a[i];
+ return val;
+}
+function to_quad(val, addr, i) {
+ addr = "";
+ for (i = 1; i <= 4; i++) {
+ addr = sprintf("%u%s%s", val % 256, i > 1 ? "." : "", addr);
+ val = int(val / 256);
+ }
+ if ((val + 0) != 0)
+ exit(1); # value provided exceeded 32 bits
+ return addr;
+}
+BEGIN {
+ host_ip = from_quad("'${HOST_IP}'");
+ netmask = from_quad("'${NETMASK}'");
+ guest_net_ip = '${GUEST_IP_OFFSET}' + '${GUEST_ID}';
+ if (and(netmask, guest_net_ip))
+ exit(1); # address too big for subnet
+ subnet = and(host_ip, netmask);
+ guest_ip = or(subnet, guest_net_ip);
+ if (guest_ip == host_ip)
+ exit(1); # computed guest ip matches host ip
+
+ printf("SUBNET=%s\n", to_quad(subnet));
+ printf("GUEST_IP=%s\n", to_quad(guest_ip));
+}
+' < /dev/null
+)
+
+############## OK, we now know all our network parameters...
+
+# There is a series of things that need to be done as superuser,
+# so group them all into one big (and sort of nested!) sudo request.
+sudo -s <<EnD_Of_sUdO
+# Mount the boot disk for the UML and set up some configuration
+# files there.
+mkdir -p "${TMP_MNT}"
+mount -o loop "${DISK_IMAGE_A}" "${TMP_MNT}"
+
+# Arrange for loopback and eth0 to load automatically,
+# and for eth0 to have our desired network parameters.
+cat > "${TMP_MNT}/etc/network/interfaces" <<!
+# Used by ifup(8) and ifdown(8). See the interfaces(5) manpage or
+# /usr/share/doc/ifupdown/examples for more information.
+auto lo
+iface lo inet loopback
+auto eth0
+# iface eth0 inet dhcp
+iface eth0 inet static
+ address ${GUEST_IP}
+ netmask ${NETMASK}
+ broadcast ${BROADCAST}
+ gateway ${HOST_IP}
+!
+
+# Have the guest start with an appropriate host name.
+# Also record an entry for it in its own hosts file.
+echo "${GUEST_HOSTNAME}" > "${TMP_MNT}/etc/hostname"
+echo "${GUEST_IP} ${GUEST_HOSTNAME}" >> "${TMP_MNT}/etc/hosts"
+
+# The host will serve as the name server also
+cat > "${TMP_MNT}/etc/resolv.conf" <<!
+nameserver ${HOST_IP}
+!
+
+# OK, done tweaking the boot image.
+sync
+umount "${DISK_IMAGE_A}"
+rmdir "${TMP_MNT}"
+
+# Set up a config file for "ifup" and "ifdown" (on the host) to use.
+# All the backslashes below are needed because we're sitting inside
+# a double here-document...
+cat > "${TAP_IFUPDOWN_CONFIG}" <<!
+iface ${TAP} inet manual
+ up brctl addif virbr0 "\\\${IFACE}"
+ up ip link set dev "\\\${IFACE}" up
+ pre-down brctl delif virbr0 "\\\${IFACE}"
+ pre-down ip link del dev "\\\${IFACE}"
+ tunctl_user $(whoami)
+!
+
+# OK, bring up the tap device using our config file
+ifup -i "${TAP_IFUPDOWN_CONFIG}" "${TAP}"
+
+EnD_Of_sUdO
+
+# Finally ready to launch the UML instance.
+./linux \
+ umid="${GUEST_HOSTNAME}" \
+ ubda="${DISK_IMAGE_A}" \
+ eth0="tuntap,${TAP}" \
+ mem=1024M
+
+# When we're done, clean up. Bring down the tap interface and
+# delete the config file.
+#
+# Note that if the above "./linux" crashes, you'll need to run the
+# following commands manually in order to clean up state.
+sudo ifdown -i "${TAP_IFUPDOWN_CONFIG}" "${TAP}"
+sudo rm -f "${TAP_IFUPDOWN_CONFIG}"
+
+exit 0
diff --git a/src/script/sepia_bt.sh b/src/script/sepia_bt.sh
new file mode 100755
index 00000000..f2d74aa8
--- /dev/null
+++ b/src/script/sepia_bt.sh
@@ -0,0 +1,182 @@
+#!/usr/bin/env bash
+
+function die() {
+ echo $@ >&2
+ exit 1
+}
+
+function usage() {
+ echo "bt: $0 -c core_path [-d distro] [-C directory] [-v]"
+ exit 1
+}
+
+function log() {
+ if [ -n "$verbose" ]; then
+ echo $*
+ fi
+}
+
+function get_machine() {
+ local core_path=$1
+ local machine=${core_path%/coredump/*}
+ echo $(basename $machine)
+}
+
+function get_t_dir() {
+ local core_path=$1
+ echo ${core_path%/remote/*}
+}
+
+while getopts "c:C:d:v" opt
+do
+ case $opt in
+ c) core_path=$OPTARG;;
+ C) wd=$OPTARG;;
+ d) codename=$OPTARG;;
+ v) verbose=1;;
+ *) usage;;
+ esac
+done
+
+if [ -z $core_path ]; then
+ usage
+fi
+
+sha1=$(strings $core_path | gawk 'BEGIN{ FS = "=" } /^CEPH_REF/{print $2}')
+if [ -z $sha1 ]; then
+ teuthology_log=$(get_t_dir $core_path)/teuthology.log
+ sha1=$(grep -m1 -A1 "Running command: sudo ceph --version" ${teuthology_log} | tail -n1 | grep -oP "ceph version [^ ]+ \(\K[^\) ]+")
+fi
+
+if [ -z $distro ]; then
+ machine=$(get_machine $core_path)
+ teuthology_log=$(get_t_dir $core_path)/teuthology.log
+ if [ ! -r ${teuthology_log} ]; then
+ die "missing distro, and unable to read it from ${teuthology_log}"
+ fi
+ ld=$(grep -m1 -A1 "${machine}:Running.*linux_distribution" ${teuthology_log} | tail -n1 | grep -oP "\(\K[^\)]+")
+ distro=$(echo $ld | gawk -F ", " '{print $1}' | sed s/\'//g)
+ distro=$(echo $distro | tr '[:upper:]' '[:lower:]')
+ distro_ver=$(echo $ld | gawk -F ", " '{print $2}' | sed s/\'//g)
+ codename=$(echo $ld | gawk -F ", " '{print $3}' | sed s/\'//g)
+ if [ "$distro" == "centos linux" ]; then
+ # there is chance that it's actually something different,
+ # but we take it as centos7 anyway.
+ distro=centos
+ distro_ver=7
+ fi
+else
+ case $codename in
+ xenial)
+ distro=ubuntu
+ distro_ver=16.04
+ ;;
+ trusty)
+ distro=ubuntu
+ distro_ver=14.04
+ ;;
+ centos7)
+ distro=centos
+ distro_ver=7
+ ;;
+ *)
+ die "unknown distro: $distro"
+ ;;
+ esac
+fi
+
+# try to figure out a name for working directory
+if [ -z $wd ]; then
+ run=${core_path%/remote/*}
+ job_id=${run#/a/}
+ if [ $job_id != $core_path ]; then
+ # use the run/job for the working dir
+ wd=$job_id
+ fi
+fi
+
+if [ -z $wd ]; then
+ wd=$(basename $core_path)
+ wd=${wd%.*}
+ echo "unable to figure out the working directory, using ${wd}"
+fi
+
+log "creating ${wd}"
+mkdir -p $wd
+cd $wd
+
+prog=$(file $core_path | grep -oP "from '\K[^']+" | cut -d' ' -f1)
+case $prog in
+ ceph_test_*)
+ pkgs="ceph-test librados2"
+ ;;
+ ceph-osd|ceph-mon)
+ pkgs=$prog
+ ;;
+ */python*)
+ prog=$(basename $prog)
+ pkgs=librados2
+ ;;
+ rados)
+ pkgs="ceph-common librados2 libradosstriper1"
+ ;;
+ *)
+ die "unknown prog: $prog"
+ ;;
+esac
+
+flavor=default
+arch=x86_64
+
+release=$(strings $core_path | grep -m1 -oP '/build/ceph-\K[^/]+')
+if [ -z $release ]; then
+ teuthology_log=$(get_t_dir $core_path)/teuthology.log
+ release=$(grep -m1 -A1 "Running command: sudo ceph --version" ${teuthology_log} | tail -n1 | grep -oP "ceph version \K[^ ]+")
+fi
+
+case $distro in
+ ubuntu)
+ pkg_path=pool/main/c/ceph/%s_%s-1${codename}_amd64.deb
+ for p in $pkgs; do
+ t="$t $p $p-dbg"
+ done
+ pkgs="$t"
+ ;;
+ centos)
+ pkg_path=${arch}/%s-%s.el7.x86_64.rpm
+ # 11.0.2-1022-g5b25cd3 => 11.0.2-1022.g5b25cd3
+ release=$(echo $release | sed s/-/./2)
+ pkgs="$pkgs ceph-debuginfo"
+ ;;
+ *)
+ die "unknown distro: $distro"
+ ;;
+esac
+
+query_url="https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=${flavor}&distros=${distro}%2F${distro_ver}%2F${arch}&sha1=${sha1}"
+repo_url=`curl -L -s "${query_url}" | jq -r '.[0] | .url'`
+pkg_url=${repo_url}/${pkg_path}
+log "repo url is ${repo_url}"
+
+for pkg in ${pkgs}; do
+ url=`printf $pkg_url $pkg $release`
+ log "downloading ${url}"
+ curl -O -L -C - --silent --fail --insecure $url
+ fname=`basename $url`
+ case $fname in
+ *.deb)
+ ar p `basename $fname` data.tar.xz | tar xJv;;
+ *.rpm)
+ rpm2cpio < $fname | cpio -id;;
+ *)
+ esac
+done
+
+cat > preclude.gdb <<EOF
+set sysroot .
+set debug-file-directory ./usr/lib/debug
+set solib-search-path ./usr/lib64
+file ./usr/bin/$prog
+core $core_path
+EOF
+gdb -x preclude.gdb
diff --git a/src/script/smr_benchmark/linearCopy.sh b/src/script/smr_benchmark/linearCopy.sh
new file mode 100755
index 00000000..416a7e74
--- /dev/null
+++ b/src/script/smr_benchmark/linearCopy.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+# copy a linear file from srcFile to destination disk in a loop until writeSize MBs is written
+# destinationDisk is a SMR Host Aware Disk eg. /dev/sdb
+
+if [ "$#" -lt 3 ]; then
+ echo "Usage ./linearCopy.sh srcFile destinationDisk writeSize(MB)"
+ exit
+fi
+
+if [ "$(id -u)" != "0" ]; then
+ echo "Please run as sudo user"
+ exit
+fi
+
+srcFile=$1
+destDisk=$2
+writeSize=$3
+verbose=true
+
+if [ -f time ]; then
+ rm -rf time
+fi
+
+#chunkSize=4096 # in bytes
+chunkSize=1048576 # in bytes
+fileSize=`stat --printf="%s" $srcFile`
+
+numChunksInFile=`echo "$fileSize * (1048576 / $chunkSize)" | bc`
+chunksLeft=$(( $(($writeSize * 1048576)) / $chunkSize))
+
+
+echo "fileSize = $fileSize"
+
+if [ "$(($fileSize % 512))" -ne 0 ]; then
+ echo "$srcFile not 512 byte aligned"
+ exit
+fi
+
+if [ "$(($chunkSize % 512))" -ne 0 ]; then
+ echo "$chunkSize not 512 byte aligned"
+ exit
+fi
+
+if [ "$fileSize" -lt "$chunkSize" ]; then
+ echo "filesize $fileSize should be greater than chunkSize $chunkSize"
+ exit
+fi
+
+
+numFileChunks=$(($fileSize / $chunkSize))
+if [ $verbose == true ]; then
+ echo "numFileChunks = $numFileChunks"
+fi
+
+smrLBAStart=33554432 # TODO query from SMR Drive
+#smrLBAStart=37224448
+
+offset=$(( $smrLBAStart / $(( $chunkSize / 512)) ))
+
+if [ $verbose == true ]; then
+ echo "chunksLeft = $chunksLeft, offset = $offset"
+fi
+
+chunkNum=0
+
+while [ "$chunksLeft" -gt 0 ]; do
+ chunkNum=$(($chunkNum + 1))
+ if [ $verbose == true ]; then
+ echo "CHUNK $chunkNum `date +%H:%M:%S`" >> time
+ fi
+ dd if=$srcFile of=$destDisk seek=$offset bs=$chunkSize 2> tmp
+ cat tmp | grep MB >> time # > /dev/null 2>&1
+ if [ $verbose == true ]; then
+ echo "chunksLeft = $chunksLeft, offset = $offset"
+ fi
+ chunksLeft=$(($chunksLeft - $numFileChunks))
+ offset=$(($offset + $numFileChunks))
+done
+
+if [ -f tmp ]; then
+ rm tmp
+fi
+
+if [ $verbose == false ]; then
+ rm time
+else
+ echo "Time Stamp for Chunk Writes"
+ cat time
+ rm time
+fi
diff --git a/src/script/smr_benchmark/linearSMRCopy.sh b/src/script/smr_benchmark/linearSMRCopy.sh
new file mode 100755
index 00000000..1ff2695c
--- /dev/null
+++ b/src/script/smr_benchmark/linearSMRCopy.sh
@@ -0,0 +1,69 @@
+#! /usr/bin/env bash
+
+# copy a linear file from srcFile to destination SMRDisk in a loop until writeSize MBs is written
+# SMRDisk is the SMR Host Aware / Host Managed Disk eg. /dev/sdb
+
+usage(){
+ echo "linearSMRCopy.sh <srcFile> <SMRDisk> <writeSize (MB)>"
+}
+
+if [ "$#" -lt 3 ]; then
+ usage
+ exit
+fi
+
+if [ "$(id -u)" != "0" ]; then
+ echo "Please run as sudo user"
+ exit
+fi
+
+if which zbc_open_zone > /dev/null 2>&1 && which zbc_read_zone > /dev/null 2>&1 && which zbc_write_zone > /dev/null 2>&1 ; then
+ echo "libzbc commands present... refreshing zones"
+ # reset all write pointers before starting to write
+ sudo zbc_reset_write_ptr /dev/sdb -1
+else
+ echo "libzbc commands not detected. Please install libzbc"
+ exit
+fi
+
+srcFile=$1
+SMRDisk=$2
+writeSize=$3
+iosize=10240
+
+numberOfSectors=$(($writeSize * 2048))
+
+smrZoneStart=33554432 # TODO query this from SMR drive
+
+#dd if=$srcFile of=$destDisk seek=$smrZoneStart bs=512
+
+fileSize=`stat --printf="%s" $srcFile`
+
+if [ "$(($fileSize % 512))" -ne 0 ]; then
+ echo "$srcFile not 512 byte aligned"
+ exit
+fi
+
+sectorsLeftToWrite=$(($fileSize / 512))
+
+znum=64 # TODO query this from SMR Drive
+
+zoneLength=524288 # number of sectors in each zone TODO query from SMR drive
+
+writeOffset=$smrZoneStart
+
+sectorsLeftToWrite=$numberOfSectors
+
+echo "write begin sectors Left = $sectorsLeftToWrite, writeOffset = $writeOffset zone Num = $znum"
+
+while [ "$sectorsLeftToWrite" -gt 0 ];
+do
+ sudo zbc_open_zone $SMRDisk $znum
+ sudo time zbc_write_zone -f $srcFile -loop $SMRDisk $znum $iosize
+ sudo zbc_close_zone /dev/sdb $znum
+ writeOffset=$(($writeOffset+$zoneLength))
+ znum=$(($znum+1))
+ sectorsLeftToWrite=$(($sectorsLeftToWrite - $zoneLength))
+done
+
+echo "write end sectors Left = $sectorsLeftToWrite, writeOffset = $writeOffset zone Num = $znum"
diff --git a/src/script/strip_trailing_whitespace.sh b/src/script/strip_trailing_whitespace.sh
new file mode 100755
index 00000000..7fa8060a
--- /dev/null
+++ b/src/script/strip_trailing_whitespace.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+sed -i 's/[ \t]*$//' $1
+sed -i 's/^ /\t/' $1