summaryrefslogtreecommitdiffstats
path: root/script
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
commit8daa83a594a2e98f39d764422bfbdbc62c9efd44 (patch)
tree4099e8021376c7d8c05bdf8503093d80e9c7bad0 /script
parentInitial commit. (diff)
downloadsamba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.tar.xz
samba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.zip
Adding upstream version 2:4.20.0+dfsg.upstream/2%4.20.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'script')
-rwxr-xr-xscript/attr_count_read197
-rwxr-xr-xscript/autobuild.py1902
-rwxr-xr-xscript/bisect-test.py101
-rwxr-xr-xscript/check-shell-scripts.sh20
-rwxr-xr-xscript/clean-source-tree.sh33
-rwxr-xr-xscript/codespell.sh21
-rwxr-xr-xscript/commit_mark.sh21
-rwxr-xr-xscript/compare_cc_results.py71
-rwxr-xr-xscript/configure_check_unused.pl124
-rwxr-xr-xscript/ctdb-import.msg-filter.sh11
-rwxr-xr-xscript/ctdb-import.tree-filter.sh13
-rw-r--r--script/ctdb-import.txt5
-rwxr-xr-xscript/find_python.sh9
-rwxr-xr-xscript/findstatic.pl70
-rw-r--r--script/generate_param.py431
-rwxr-xr-xscript/git-hooks/check-trailing-whitespace17
-rwxr-xr-xscript/git-hooks/pre-commit-hook17
-rwxr-xr-xscript/git-hooks/pre-commit-script19
-rwxr-xr-xscript/identity_cc.sh10
-rwxr-xr-xscript/release.sh1275
-rwxr-xr-xscript/show_test_time29
-rwxr-xr-xscript/show_testsuite_time51
-rwxr-xr-xscript/traffic_learner72
-rwxr-xr-xscript/traffic_replay445
-rwxr-xr-xscript/traffic_summary.pl707
25 files changed, 5671 insertions, 0 deletions
diff --git a/script/attr_count_read b/script/attr_count_read
new file mode 100755
index 0000000..2f6a4d0
--- /dev/null
+++ b/script/attr_count_read
@@ -0,0 +1,197 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import argparse
+import struct
+import os
+from collections import OrderedDict, Counter
+from pprint import pprint
+
+sys.path.insert(0, "bin/python")
+import tdb
+
+
+def unpack_uint(filename, casefold=True):
+ db = tdb.Tdb(filename)
+ d = {}
+ for k in db:
+ v = struct.unpack("I", db[k])[0]
+ k2 = k.decode('utf-8')
+ if casefold:
+ k2 = k2.lower()
+ if k2 in d: # because casefold
+ d[k2] += v
+ else:
+ d[k2] = v
+ return d
+
+
+def unpack_ssize_t_pair(filename, casefold):
+ db = tdb.Tdb(filename)
+ pairs = []
+ for k in db:
+ key = struct.unpack("nn", k)
+ v = struct.unpack("I", db[k])[0]
+ pairs.append((v, key))
+
+ pairs.sort(reverse=True)
+ #print(pairs)
+ return [(k, v) for (v, k) in pairs]
+
+
+DATABASES = [
+ ('requested', "debug/attr_counts_requested.tdb", unpack_uint,
+ "The attribute was specifically requested."),
+ ('duplicates', "debug/attr_counts_duplicates.tdb", unpack_uint,
+ "Requested more than once in the same request."),
+ ('empty request', "debug/attr_counts_empty_req.tdb", unpack_uint,
+ "No attributes were requested, but these were returned"),
+ ('null request', "debug/attr_counts_null_req.tdb", unpack_uint,
+ "The attribute list was NULL and these were returned."),
+ ('found', "debug/attr_counts_found.tdb", unpack_uint,
+ "The attribute was specifically requested and it was found."),
+ ('not found', "debug/attr_counts_not_found.tdb", unpack_uint,
+ "The attribute was specifically requested but was not found."),
+ ('unwanted', "debug/attr_counts_unwanted.tdb", unpack_uint,
+ "The attribute was not requested and it was found."),
+ ('star match', "debug/attr_counts_star_match.tdb", unpack_uint,
+ 'The attribute was not specifically requested but "*" was.'),
+ ('req vs found', "debug/attr_counts_req_vs_found.tdb", unpack_ssize_t_pair,
+ "How many attributes were requested versus how many were returned."),
+]
+
+
+def plot_pair_data(name, data, doc, lim=90):
+ # Note we keep the matplotlib import internal to this function for
+ # two reasons:
+ # 1. Some people won't have matplotlib, but might want to run the
+ # script.
+ # 2. The import takes hundreds of milliseconds, which is a
+ # nuisance if you don't want graphs.
+ #
+ # This plot could be improved!
+ import matplotlib.pylab as plt
+ fig, ax = plt.subplots()
+ if lim:
+ data2 = []
+ for p, c in data:
+ if p[0] > lim or p[1] > lim:
+ print("not plotting %s: %s" % (p, c))
+ continue
+ data2.append((p, c))
+ skipped = len(data) - len(data2)
+ if skipped:
+ name += " (excluding %d out of range values)" % skipped
+ data = data2
+ xy, counts = zip(*data)
+ x, y = zip(*xy)
+ bins_x = max(x) + 4
+ bins_y = max(y)
+ ax.set_title(name)
+ ax.scatter(x, y, c=counts)
+ plt.show()
+
+
+def print_pair_data(name, data, doc):
+ print(name)
+ print(doc)
+ t = "%14s | %14s | %14s"
+ print(t % ("requested", "returned", "count"))
+ print(t % (('-' * 14,) * 3))
+
+ for xy, count in data:
+ x, y = xy
+ if x == -2:
+ x = 'NULL'
+ elif x == -4:
+ x = '*'
+ print(t % (x, y, count))
+
+
+def print_counts(count_data):
+ all_attrs = Counter()
+ for c in count_data:
+ all_attrs.update(c[1])
+
+ print("found %d attrs" % len(all_attrs))
+ longest = max(len(x) for x in all_attrs)
+
+ #pprint(all_attrs)
+ rows = OrderedDict()
+ for a, _ in all_attrs.most_common():
+ rows[a] = [a]
+
+ for col_name, counts, doc in count_data:
+ for attr, row in rows.items():
+ d = counts.get(attr, '')
+ row.append(d)
+
+ print("%15s: %s" % (col_name, doc))
+ print()
+
+ t = "%{}s".format(longest)
+ for c in count_data:
+ t += " | %{}s".format(max(len(c[0]), 7))
+
+ h = t % (("attribute",) + tuple(c[0] for c in count_data))
+ print(h)
+ print("-" * len(h))
+
+ for attr, row in rows.items():
+ print(t % tuple(row))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('LDB_PRIVATE_DIR',
+ help="read attr counts in this directory")
+ parser.add_argument('--plot', action="store_true",
+ help='attempt to draw graphs')
+ parser.add_argument('--no-casefold', action="store_false",
+ default=True, dest="casefold",
+ help='See all the encountered case variants')
+ args = parser.parse_args()
+
+ if not os.path.isdir(args.LDB_PRIVATE_DIR):
+ parser.print_usage()
+ sys.exit(1)
+
+ count_data = []
+ pair_data = []
+ for k, filename, unpacker, doc in DATABASES:
+ filename = os.path.join(args.LDB_PRIVATE_DIR, filename)
+ try:
+ d = unpacker(filename, casefold=args.casefold)
+ except (RuntimeError, IOError) as e:
+ print("could not parse %s: %s" % (filename, e))
+ continue
+ if unpacker is unpack_ssize_t_pair:
+ pair_data.append((k, d, doc))
+ else:
+ count_data.append((k, d, doc))
+
+ for k, v, doc in pair_data:
+ if args.plot:
+ plot_pair_data(k, v, doc)
+ print_pair_data(k, v, doc)
+
+ print()
+ print_counts(count_data)
+
+main()
diff --git a/script/autobuild.py b/script/autobuild.py
new file mode 100755
index 0000000..ecec352
--- /dev/null
+++ b/script/autobuild.py
@@ -0,0 +1,1902 @@
+#!/usr/bin/env python3
+# run tests on all Samba subprojects and push to a git tree on success
+# Copyright Andrew Tridgell 2010
+# released under GNU GPL v3 or later
+
+from subprocess import call, check_call, check_output, Popen, PIPE, CalledProcessError
+import os
+import tarfile
+import sys
+import time
+import random
+from optparse import OptionParser
+import smtplib
+import email
+from email.mime.text import MIMEText
+from email.mime.base import MIMEBase
+from email.mime.application import MIMEApplication
+from email.mime.multipart import MIMEMultipart
+from sysconfig import get_path
+import platform
+
+import logging
+
+try:
+ from waflib.Build import CACHE_SUFFIX
+except ImportError:
+ sys.path.insert(0, "./third_party/waf")
+ from waflib.Build import CACHE_SUFFIX
+
+logging.basicConfig(format='%(asctime)s %(message)s')
+logger = logging.getLogger('autobuild')
+logger.setLevel(logging.INFO)
+
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+# This speeds up testing remarkably.
+os.environ['TDB_NO_FSYNC'] = '1'
+
+# allow autobuild to run within git rebase -i
+if "GIT_DIR" in os.environ:
+ del os.environ["GIT_DIR"]
+if "GIT_WORK_TREE" in os.environ:
+ del os.environ["GIT_WORK_TREE"]
+
+def find_git_root():
+ '''get to the top of the git repo'''
+ p = os.getcwd()
+ while p != '/':
+ if os.path.exists(os.path.join(p, ".git")):
+ return p
+ p = os.path.abspath(os.path.join(p, '..'))
+ return None
+
+
+gitroot = find_git_root()
+if gitroot is None:
+ raise Exception("Failed to find git root")
+
+
+def_testbase = os.getenv("AUTOBUILD_TESTBASE", "/memdisk/%s" % os.getenv('USER'))
+
+parser = OptionParser()
+parser.add_option("--tail", help="show output while running", default=False, action="store_true")
+parser.add_option("--keeplogs", help="keep logs", default=False, action="store_true")
+parser.add_option("--nocleanup", help="don't remove test tree", default=False, action="store_true")
+parser.add_option("--skip-dependencies", help="skip to run task dependency tasks", default=False, action="store_true")
+parser.add_option("--testbase", help="base directory to run tests in (default %s)" % def_testbase,
+ default=def_testbase)
+parser.add_option("--full-testbase", help="full base directory to run tests in (default %s/b$PID)" % def_testbase,
+ default=None)
+parser.add_option("--passcmd", help="command to run on success", default=None)
+parser.add_option("--verbose", help="show all commands as they are run",
+ default=False, action="store_true")
+parser.add_option("--rebase", help="rebase on the given tree before testing",
+ default=None, type='str')
+parser.add_option("--pushto", help="push to a git url on success",
+ default=None, type='str')
+parser.add_option("--mark", help="add a Tested-By signoff before pushing",
+ default=False, action="store_true")
+parser.add_option("--fix-whitespace", help="fix whitespace on rebase",
+ default=False, action="store_true")
+parser.add_option("--retry", help="automatically retry if master changes",
+ default=False, action="store_true")
+parser.add_option("--email", help="send email to the given address on failure",
+ type='str', default=None)
+parser.add_option("--email-from", help="send email from the given address",
+ type='str', default="autobuild@samba.org")
+parser.add_option("--email-server", help="send email via the given server",
+ type='str', default='localhost')
+parser.add_option("--always-email", help="always send email, even on success",
+ action="store_true")
+parser.add_option("--daemon", help="daemonize after initial setup",
+ action="store_true")
+parser.add_option("--branch", help="the branch to work on (default=master)",
+ default="master", type='str')
+parser.add_option("--log-base", help="location where the logs can be found (default=cwd)",
+ default=gitroot, type='str')
+parser.add_option("--attach-logs", help="Attach logs to mails sent on success/failure?",
+ default=False, action="store_true")
+parser.add_option("--restrict-tests", help="run as make test with this TESTS= regex",
+ default='')
+parser.add_option("--enable-coverage", dest='enable_coverage',
+ action="store_const", const='--enable-coverage', default='',
+ help="Add --enable-coverage option while configure")
+
+(options, args) = parser.parse_args()
+
+if options.retry:
+ if options.rebase is None:
+ raise Exception('You can only use --retry if you also rebase')
+
+if options.verbose:
+ logger.setLevel(logging.DEBUG)
+
+if options.full_testbase is not None:
+ testbase = options.full_testbase
+else:
+ testbase = "%s/b%u" % (options.testbase, os.getpid())
+test_master = "%s/master" % testbase
+test_prefix = "%s/prefix" % testbase
+test_tmpdir = "%s/tmp" % testbase
+os.environ['TMPDIR'] = test_tmpdir
+
+if options.enable_coverage:
+ LCOV_CMD = "cd ${TEST_SOURCE_DIR} && lcov --capture --directory . --output-file ${LOG_BASE}/${NAME}.info --rc 'geninfo_adjust_src_path=${TEST_SOURCE_DIR}/'"
+else:
+ LCOV_CMD = 'echo "lcov skipped since no --enable-coverage specified"'
+
+if options.enable_coverage:
+ PUBLISH_DOCS = "mkdir -p ${LOG_BASE}/public && mv output/htmldocs ${LOG_BASE}/public/htmldocs"
+else:
+ PUBLISH_DOCS = 'echo "HTML documentation publishing skipped since no --enable-coverage specified"'
+
+CLEAN_SOURCE_TREE_CMD = "cd ${TEST_SOURCE_DIR} && script/clean-source-tree.sh"
+
+
+def check_symbols(sofile, expected_symbols=""):
+ return "objdump --dynamic-syms " + sofile + " | " + \
+ "awk \'$0 !~ /" + expected_symbols + "/ {if ($2 == \"g\" && $3 ~ /D(F|O)/ && $4 ~ /(.bss|.text)/ && $7 !~ /(__gcov_|mangle_path)/) exit 1}\'"
+
+if args:
+ # If we are only running specific test,
+ # do not sleep randomly to wait for it to start
+ def random_sleep(low, high):
+ return 'sleep 1'
+else:
+ def random_sleep(low, high):
+ return 'sleep {}'.format(random.randint(low, high))
+
+cleanup_list = []
+
+builddirs = {
+ "ctdb": "ctdb",
+ "ldb": "lib/ldb",
+ "tdb": "lib/tdb",
+ "talloc": "lib/talloc",
+ "replace": "lib/replace",
+ "tevent": "lib/tevent",
+ "pidl": "pidl",
+ "docs-xml": "docs-xml"
+}
+
+ctdb_configure_params = " --enable-developer ${PREFIX}"
+samba_configure_params = " ${ENABLE_COVERAGE} ${PREFIX} --with-profiling-data"
+
+samba_libs_envvars = "PYTHONPATH=${PYTHON_PREFIX}:$PYTHONPATH"
+samba_libs_envvars += " PKG_CONFIG_PATH=$PKG_CONFIG_PATH:${PREFIX_DIR}/lib/pkgconfig"
+samba_libs_envvars += " ADDITIONAL_CFLAGS='-Wmissing-prototypes'"
+samba_libs_configure_base = samba_libs_envvars + " ./configure --abi-check ${ENABLE_COVERAGE} --enable-debug -C ${PREFIX}"
+samba_libs_configure_libs = samba_libs_configure_base + " --bundled-libraries=cmocka,popt,NONE"
+samba_libs_configure_bundled_libs = " --bundled-libraries=!talloc,!pytalloc-util,!tdb,!pytdb,!ldb,!pyldb,!pyldb-util,!tevent,!pytevent,!popt"
+samba_libs_configure_samba = samba_libs_configure_base + samba_libs_configure_bundled_libs
+
+
+def format_option(name, value=None):
+ """Format option as str list."""
+ if value is None: # boolean option
+ return [name]
+ if not isinstance(value, list): # single value option
+ value = [value]
+ # repeatable option
+ return ['{}={}'.format(name, item) for item in value]
+
+
+def make_test(
+ cmd='make testonly',
+ INJECT_SELFTEST_PREFIX=1,
+ TESTS='',
+ include_envs=None,
+ exclude_envs=None):
+
+ test_options = []
+ if include_envs:
+ test_options = format_option('--include-env', include_envs)
+ if exclude_envs:
+ test_options = format_option('--exclude-env', exclude_envs)
+ if test_options:
+ # join envs options to original test options
+ TESTS = (TESTS + ' ' + ' '.join(test_options)).strip()
+
+ _options = []
+
+ # Allow getting a full CI with
+ # git push -o ci.variable='AUTOBUILD_FAIL_IMMEDIATELY=0'
+
+ FAIL_IMMEDIATELY = os.getenv("AUTOBUILD_FAIL_IMMEDIATELY", "1")
+
+ if int(FAIL_IMMEDIATELY):
+ _options.append('FAIL_IMMEDIATELY=1')
+ if TESTS:
+ _options.append("TESTS='{}'".format(TESTS))
+
+ if INJECT_SELFTEST_PREFIX:
+ _options.append("TEST_OPTIONS='--with-selftest-prefix={}'".format("${SELFTEST_PREFIX}"))
+ _options.append("--directory='{}'".format("${TEST_SOURCE_DIR}"))
+
+ return ' '.join([cmd] + _options)
+
+
+# When updating this list, also update .gitlab-ci.yml to add the job
+# and to make it a dependency of 'page' for the coverage report.
+
+tasks = {
+ "ctdb": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("configure", "./configure " + ctdb_configure_params),
+ ("make", "make all"),
+ ("install", "make install"),
+ ("test", "make autotest"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ],
+ },
+ "docs-xml": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("autoconf", "autoconf"),
+ ("configure", "./configure"),
+ ("make", "make html htmlman"),
+ ("publish-docs", PUBLISH_DOCS),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "samba-def-build": {
+ "git-clone-required": True,
+ "sequence": [
+ ("configure", "./configure.developer" + samba_configure_params),
+ ("make", "make -j"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("chmod-R-a-w", "chmod -R a-w ."),
+ ],
+ },
+
+ "samba-mit-build": {
+ "git-clone-required": True,
+ "sequence": [
+ ("configure", "./configure.developer --with-system-mitkrb5 --with-experimental-mit-ad-dc" + samba_configure_params),
+ ("make", "make -j"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("chmod-R-a-w", "chmod -R a-w ."),
+ ],
+ },
+
+ "samba-nt4-build": {
+ "git-clone-required": True,
+ "sequence": [
+ ("configure", "./configure.developer --without-ad-dc --without-ldap --without-ads --without-json" + samba_configure_params),
+ ("make", "make -j"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("chmod-R-a-w", "chmod -R a-w ."),
+ ],
+ },
+
+ "samba-h5l-build": {
+ "git-clone-required": True,
+ "sequence": [
+ ("configure", "./configure.developer --without-ad-dc --with-system-heimdalkrb5" + samba_configure_params),
+ ("make", "make -j"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("chmod-R-a-w", "chmod -R a-w ."),
+ ],
+ },
+
+ "samba-without-smb1-build": {
+ "git-clone-required": True,
+ "sequence": [
+ ("configure", "./configure.developer --without-smb1-server --without-ad-dc" + samba_configure_params),
+ ("make", "make -j"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("chmod-R-a-w", "chmod -R a-w ."),
+ ],
+ },
+
+ "samba-no-opath-build": {
+ "git-clone-required": True,
+ "sequence": [
+ ("configure", "ADDITIONAL_CFLAGS='-DDISABLE_OPATH=1 -DDISABLE_VFS_OPEN_HOW_RESOLVE_NO_SYMLINKS=1 -DDISABLE_PROC_FDS=1' ./configure.developer --without-ad-dc " + samba_configure_params),
+ ("make", "make -j"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("chmod-R-a-w", "chmod -R a-w ."),
+ ],
+ },
+
+ # We have 'test' before 'install' because, 'test' should work without 'install (runs all the other envs)'
+ "samba": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("configure", "./configure.developer" + samba_configure_params),
+ ("make", "make -j"),
+ ("test", make_test(exclude_envs=[
+ "none",
+ "nt4_dc",
+ "nt4_dc_smb1",
+ "nt4_dc_smb1_done",
+ "nt4_dc_schannel",
+ "nt4_member",
+ "ad_dc",
+ "ad_dc_smb1",
+ "ad_dc_smb1_done",
+ "ad_dc_backup",
+ "ad_dc_ntvfs",
+ "ad_dc_default",
+ "ad_dc_default_smb1",
+ "ad_dc_slowtests",
+ "ad_dc_no_nss",
+ "ad_dc_no_ntlm",
+ "fl2003dc",
+ "fl2008dc",
+ "fl2008r2dc",
+ "ad_member",
+ "ad_member_idmap_rid",
+ "admem_idmap_autorid",
+ "ad_member_idmap_ad",
+ "ad_member_rfc2307",
+ "ad_member_idmap_nss",
+ "ad_member_oneway",
+ "chgdcpass",
+ "vampire_2000_dc",
+ "fl2000dc",
+ "fileserver",
+ "fileserver_smb1",
+ "fileserver_smb1_done",
+ "maptoguest",
+ "simpleserver",
+ "backupfromdc",
+ "restoredc",
+ "renamedc",
+ "offlinebackupdc",
+ "labdc",
+ "preforkrestartdc",
+ "proclimitdc",
+ "promoted_dc",
+ "vampire_dc",
+ "rodc",
+ "ad_dc_default",
+ "ad_dc_default_smb1",
+ "ad_dc_default_smb1_done",
+ "ad_dc_slowtests",
+ "schema_pair_dc",
+ "schema_dc",
+ "clusteredmember",
+ "ad_dc_fips",
+ "ad_member_fips",
+ ])),
+ ("test-slow-none", make_test(cmd='make test', TESTS="--include=selftest/slow-none", include_envs=["none"])),
+ ("lcov", LCOV_CMD),
+ ("install", "make install"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ],
+ },
+
+ # We have 'test' before 'install' because, 'test' should work without 'install (runs all the other envs)'
+ "samba-mitkrb5": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("configure", "./configure.developer --with-system-mitkrb5 --with-experimental-mit-ad-dc" + samba_configure_params),
+ ("make", "make -j"),
+ ("test", make_test(exclude_envs=[
+ "none",
+ "nt4_dc",
+ "nt4_dc_smb1",
+ "nt4_dc_smb1_done",
+ "nt4_dc_schannel",
+ "nt4_member",
+ "ad_dc",
+ "ad_dc_smb1",
+ "ad_dc_smb1_done",
+ "ad_dc_backup",
+ "ad_dc_ntvfs",
+ "ad_dc_default",
+ "ad_dc_default_smb1",
+ "ad_dc_default_smb1_done",
+ "ad_dc_slowtests",
+ "ad_dc_no_nss",
+ "ad_dc_no_ntlm",
+ "fl2003dc",
+ "fl2008dc",
+ "fl2008r2dc",
+ "ad_member",
+ "ad_member_idmap_rid",
+ "admem_idmap_autorid",
+ "ad_member_idmap_ad",
+ "ad_member_rfc2307",
+ "ad_member_idmap_nss",
+ "ad_member_oneway",
+ "chgdcpass",
+ "vampire_2000_dc",
+ "fl2000dc",
+ "fileserver",
+ "fileserver_smb1",
+ "fileserver_smb1_done",
+ "maptoguest",
+ "simpleserver",
+ "backupfromdc",
+ "restoredc",
+ "renamedc",
+ "offlinebackupdc",
+ "labdc",
+ "preforkrestartdc",
+ "proclimitdc",
+ "promoted_dc",
+ "vampire_dc",
+ "rodc",
+ "ad_dc_default",
+ "ad_dc_default_smb1",
+ "ad_dc_default_smb1_done",
+ "ad_dc_slowtests",
+ "schema_pair_dc",
+ "schema_dc",
+ "clusteredmember",
+ "ad_dc_fips",
+ "ad_member_fips",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("install", "make install"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "samba-nt4": {
+ "dependency": "samba-nt4-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(include_envs=[
+ "nt4_dc",
+ "nt4_dc_smb1",
+ "nt4_dc_smb1_done",
+ "nt4_dc_schannel",
+ "nt4_member",
+ "simpleserver",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-fileserver": {
+ "dependency": "samba-h5l-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(include_envs=[
+ "fileserver",
+ "fileserver_smb1",
+ "fileserver_smb1_done",
+ "maptoguest",
+ "ktest", # ktest is also tested in samba-ktest-mit samba
+ # and samba-mitkrb5 but is tested here against
+ # a system Heimdal
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-fileserver-without-smb1": {
+ "dependency": "samba-without-smb1-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(include_envs=["fileserver"])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ # This is a full build without the AD DC so we test the build with
+ # MIT Kerberos from the current system. Runtime behaviour is
+ # confirmed via the ktest (static ccache and keytab) environment
+
+ # This environment also used to confirm we can still build with --with-libunwind
+ "samba-ktest-mit": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("configure", "./configure.developer --without-ad-dc --with-libunwind --with-system-mitkrb5 " + samba_configure_params),
+ ("make", "make -j"),
+ ("test", make_test(include_envs=[
+ "ktest", # ktest is also tested in fileserver, samba and
+ # samba-mitkrb5 but is tested here against a
+ # system MIT krb5
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-admem": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(include_envs=[
+ "ad_member",
+ "ad_member_idmap_rid",
+ "admem_idmap_autorid",
+ "ad_member_idmap_ad",
+ "ad_member_rfc2307",
+ "ad_member_idmap_nss",
+ "ad_member_offlogon",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-no-opath1": {
+ "dependency": "samba-no-opath-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(
+ cmd="make testonly DISABLE_OPATH=1",
+ include_envs=[
+ "nt4_dc",
+ "nt4_dc_smb1",
+ "nt4_dc_smb1_done",
+ "nt4_dc_schannel",
+ "nt4_member",
+ "simpleserver",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", "script/clean-source-tree.sh"),
+ ],
+ },
+
+ "samba-no-opath2": {
+ "dependency": "samba-no-opath-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(
+ cmd="make testonly DISABLE_OPATH=1",
+ include_envs=[
+ "fileserver",
+ "fileserver_smb1",
+ "fileserver_smb1_done",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", "script/clean-source-tree.sh"),
+ ],
+ },
+
+ "samba-ad-dc-1": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "ad_dc",
+ "ad_dc_smb1",
+ "ad_dc_smb1_done",
+ "ad_dc_no_nss",
+ "ad_dc_no_ntlm",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-ad-dc-2": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "vampire_dc",
+ "vampire_2000_dc",
+ "rodc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-ad-dc-3": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "promoted_dc",
+ "chgdcpass",
+ "preforkrestartdc",
+ "proclimitdc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-ad-dc-4a": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "fl2000dc",
+ "ad_member_oneway",
+ "fl2003dc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+ "samba-ad-dc-4b": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "fl2008dc",
+ "fl2008r2dc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-ad-dc-5": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "ad_dc_default", "ad_dc_default_smb1", "ad_dc_default_smb1_done"])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-ad-dc-6": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=["ad_dc_slowtests", "ad_dc_backup"])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-schemaupgrade": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=["schema_dc", "schema_pair_dc"])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ # We split out the ad_dc_ntvfs tests (which are long) so other test do not wait
+ # This is currently the longest task, so we don't randomly delay it.
+ "samba-ad-dc-ntvfs": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=["ad_dc_ntvfs"])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ # Test fips compliance
+ "samba-fips": {
+ "dependency": "samba-mit-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=["ad_dc_fips", "ad_member_fips"])),
+ # TODO: This seems to generate only an empty samba-fips.info ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ # run the backup/restore testenvs separately as they're fairly standalone
+ # (and CI seems to max out at ~3 different DCs running at once)
+ "samba-ad-back1": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(include_envs=[
+ "backupfromdc",
+ "restoredc",
+ "renamedc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+ "samba-ad-back2": {
+ "dependency": "samba-def-build",
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("test", make_test(include_envs=[
+ "backupfromdc",
+ "offlinebackupdc",
+ "labdc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-admem-mit": {
+ "dependency": "samba-mit-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "ad_member",
+ "ad_member_idmap_rid",
+ "admem_idmap_autorid",
+ "ad_member_idmap_ad",
+ "ad_member_rfc2307",
+ "ad_member_idmap_nss",
+ "ad_member_offlogon",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-addc-mit-1": {
+ "dependency": "samba-mit-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "ad_dc",
+ "ad_dc_smb1",
+ "ad_dc_smb1_done",
+ "ad_dc_no_nss",
+ "ad_dc_no_ntlm",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-addc-mit-4a": {
+ "dependency": "samba-mit-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "fl2000dc",
+ "ad_member_oneway",
+ "fl2003dc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+ "samba-addc-mit-4b": {
+ "dependency": "samba-mit-build",
+ "sequence": [
+ ("random-sleep", random_sleep(1, 1)),
+ ("test", make_test(include_envs=[
+ "fl2008dc",
+ "fl2008r2dc",
+ ])),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ],
+ },
+
+ "samba-test-only": {
+ "sequence": [
+ ("configure", "./configure.developer --abi-check-disable" + samba_configure_params),
+ ("make", "make -j"),
+ ("test", make_test(TESTS="${TESTS}")),
+ ("lcov", LCOV_CMD),
+ ],
+ },
+
+ # Test cross-compile infrastructure
+ "samba-xc": {
+ "sequence": [
+ ("random-sleep", random_sleep(900, 1500)),
+ ("configure-native", "./configure.developer --with-selftest-prefix=./bin/ab" + samba_configure_params),
+ ("configure-cross-execute", "./configure.developer --out ./bin-xe --cross-compile --cross-execute=script/identity_cc.sh" \
+ " --cross-answers=./bin-xe/cross-answers.txt --with-selftest-prefix=./bin-xe/ab" + samba_configure_params),
+ ("verify-cross-execute-output", "grep '^Checking value of NSIG' ./bin-xe/cross-answers.txt"),
+ ("configure-cross-answers", "./configure.developer --out ./bin-xa --cross-compile" \
+ " --cross-answers=./bin-xe/cross-answers.txt --with-selftest-prefix=./bin-xa/ab" + samba_configure_params),
+ ("compare-results", "script/compare_cc_results.py "
+ "./bin/c4che/default{} "
+ "./bin-xe/c4che/default{} "
+ "./bin-xa/c4che/default{}".format(*([CACHE_SUFFIX]*3))),
+ ("modify-cross-answers", "sed -i.bak -e 's/^\\(Checking value of NSIG:\\) .*/\\1 \"1234\"/' ./bin-xe/cross-answers.txt"),
+ ("configure-cross-answers-modified", "./configure.developer --out ./bin-xa2 --cross-compile" \
+ " --cross-answers=./bin-xe/cross-answers.txt --with-selftest-prefix=./bin-xa2/ab" + samba_configure_params),
+ ("verify-cross-answers", "test $(sed -n -e 's/VALUEOF_NSIG = \\(.*\\)/\\1/p' ./bin-xa2/c4che/default{})" \
+ " = \"'1234'\"".format(CACHE_SUFFIX)),
+ ("invalidate-cross-answers", "sed -i.bak -e '/^Checking value of NSIG/d' ./bin-xe/cross-answers.txt"),
+ ("configure-cross-answers-fail", "./configure.developer --out ./bin-xa3 --cross-compile" \
+ " --cross-answers=./bin-xe/cross-answers.txt --with-selftest-prefix=./bin-xa3/ab" + samba_configure_params + \
+ " ; test $? -ne 0"),
+ ],
+ },
+
+ # test build with -O3 -- catches extra warnings and bugs, tests the ad_dc environments
+ "samba-o3": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("configure", "ADDITIONAL_CFLAGS='-O3 -Wp,-D_FORTIFY_SOURCE=2' ./configure.developer --abi-check-disable" + samba_configure_params),
+ ("make", "make -j"),
+ ("test", make_test(cmd='make test', TESTS="--exclude=selftest/slow-none", include_envs=["none"])),
+ ("quicktest", make_test(cmd='make quicktest', include_envs=["ad_dc", "ad_dc_smb1", "ad_dc_smb1_done"])),
+ ("lcov", LCOV_CMD),
+ ("install", "make install"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "samba-32bit": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("configure", "./configure.developer --abi-check-disable --disable-warnings-as-errors" + samba_configure_params),
+ ("make", "make -j"),
+ ("nonetest", make_test(cmd='make test', TESTS="--exclude=selftest/slow-none", include_envs=["none"])),
+ ("quicktest", make_test(cmd='make quicktest', include_envs=["ad_dc", "ad_dc_smb1", "ad_dc_smb1_done"])),
+ ("ktest", make_test(cmd='make test', include_envs=["ktest"])),
+ ("install", "make install"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "samba-ctdb": {
+ "sequence": [
+ ("random-sleep", random_sleep(900, 1500)),
+
+ # make sure we have tdb around:
+ ("tdb-configure", "cd lib/tdb && PYTHONPATH=${PYTHON_PREFIX}:$PYTHONPATH PKG_CONFIG_PATH=$PKG_CONFIG_PATH:${PREFIX_DIR}/lib/pkgconfig ./configure --bundled-libraries=NONE --abi-check --enable-debug -C ${PREFIX}"),
+ ("tdb-make", "cd lib/tdb && make"),
+ ("tdb-install", "cd lib/tdb && make install"),
+
+ # build samba with cluster support (also building ctdb):
+ ("samba-configure",
+ "PYTHONPATH=${PYTHON_PREFIX}:$PYTHONPATH "
+ "PKG_CONFIG_PATH=${PREFIX_DIR}/lib/pkgconfig:${PKG_CONFIG_PATH} "
+ "./configure.developer ${PREFIX} "
+ "--with-selftest-prefix=./bin/ab "
+ "--with-cluster-support "
+ "--without-ad-dc "
+ "--bundled-libraries=!tdb"),
+ ("samba-make", "make"),
+ ("samba-check", "./bin/smbd --configfile=/dev/null -b | grep CLUSTER_SUPPORT"),
+ ("samba-install", "make install"),
+ ("ctdb-check", "test -e ${PREFIX_DIR}/sbin/ctdbd"),
+
+ ("test", make_test(
+ cmd='PYTHONPATH=${PYTHON_PREFIX}:$PYTHONPATH make test',
+ INJECT_SELFTEST_PREFIX=0,
+ include_envs=["clusteredmember"])
+ ),
+
+ # clean up:
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ("ctdb-clean", "cd ./ctdb && make clean"),
+ ],
+ },
+
+ "samba-libs": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+ ("talloc-configure", "cd lib/talloc && " + samba_libs_configure_libs),
+ ("talloc-make", "cd lib/talloc && make"),
+ ("talloc-install", "cd lib/talloc && make install"),
+
+ ("tdb-configure", "cd lib/tdb && " + samba_libs_configure_libs),
+ ("tdb-make", "cd lib/tdb && make"),
+ ("tdb-install", "cd lib/tdb && make install"),
+
+ ("tevent-configure", "cd lib/tevent && " + samba_libs_configure_libs),
+ ("tevent-make", "cd lib/tevent && make"),
+ ("tevent-install", "cd lib/tevent && make install"),
+
+ ("ldb-configure", "cd lib/ldb && " + samba_libs_configure_libs),
+ ("ldb-make", "cd lib/ldb && make"),
+ ("ldb-install", "cd lib/ldb && make install"),
+
+ ("nondevel-configure", samba_libs_envvars + " ./configure ${PREFIX}"),
+ ("nondevel-make", "make -j"),
+ ("nondevel-check", "./bin/smbd -b | grep WITH_NTVFS_FILESERVER && exit 1; exit 0"),
+ ("nondevel-no-libtalloc", "find ./bin | grep -v 'libtalloc-report' | grep 'libtalloc' && exit 1; exit 0"),
+ ("nondevel-no-libtdb", "find ./bin | grep -v 'libtdb-wrap' | grep 'libtdb' && exit 1; exit 0"),
+ ("nondevel-no-libtevent", "find ./bin | grep -v 'libtevent-util' | grep 'libtevent' && exit 1; exit 0"),
+ ("nondevel-no-libldb", "find ./bin | grep -v 'module' | grep -v 'libldbsamba' | grep 'libldb' && exit 1; exit 0"),
+ ("nondevel-no-samba-nss_winbind", "ldd ./bin/plugins/libnss_winbind.so.2 | grep 'samba' && exit 1; exit 0"),
+ ("nondevel-no-samba-nss_wins", "ldd ./bin/plugins/libnss_wins.so.2 | grep 'samba' && exit 1; exit 0"),
+ ("nondevel-no-samba-libwbclient", "ldd ./bin/shared/libwbclient.so.0 | grep 'samba' && exit 1; exit 0"),
+ ("nondevel-no-samba-pam_winbind", "ldd ./bin/plugins/pam_winbind.so | grep -v 'libtalloc.so.2' | grep 'samba' && exit 1; exit 0"),
+ ("nondevel-no-public-nss_winbind",
+ check_symbols("./bin/plugins/libnss_winbind.so.2", "_nss_winbind_")),
+ ("nondevel-no-public-nss_wins",
+ check_symbols("./bin/plugins/libnss_wins.so.2", "_nss_wins_")),
+ ("nondevel-no-public-libwbclient",
+ check_symbols("./bin/shared/libwbclient.so.0", "wbc")),
+ ("nondevel-no-public-pam_winbind",
+ check_symbols("./bin/plugins/pam_winbind.so", "pam_sm_")),
+ ("nondevel-no-public-winbind_krb5_locator",
+ check_symbols("./bin/plugins/winbind_krb5_locator.so", "service_locator")),
+ ("nondevel-no-public-async_dns_krb5_locator",
+ check_symbols("./bin/plugins/async_dns_krb5_locator.so", "service_locator")),
+ ("nondevel-install", "make -j install"),
+ ("nondevel-dist", "make dist"),
+
+ ("prefix-no-private-libtalloc", "find ${PREFIX_DIR} | grep -v 'libtalloc-report' | grep 'private.*libtalloc' && exit 1; exit 0"),
+ ("prefix-no-private-libtdb", "find ${PREFIX_DIR} | grep -v 'libtdb-wrap' | grep 'private.*libtdb' && exit 1; exit 0"),
+ ("prefix-no-private-libtevent", "find ${PREFIX_DIR} | grep -v 'libtevent-util' | grep 'private.*libtevent' && exit 1; exit 0"),
+ ("prefix-no-private-libldb", "find ${PREFIX_DIR} | grep -v 'module' | grep -v 'libldbsamba' | grep 'private.*libldb' && exit 1; exit 0"),
+ ("prefix-no-samba-nss_winbind", "ldd ${PREFIX_DIR}/lib/libnss_winbind.so.2 | grep 'samba' && exit 1; exit 0"),
+ ("prefix-no-samba-nss_wins", "ldd ${PREFIX_DIR}/lib/libnss_wins.so.2 | grep 'samba' && exit 1; exit 0"),
+ ("prefix-no-samba-libwbclient", "ldd ${PREFIX_DIR}/lib/libwbclient.so.0 | grep 'samba' && exit 1; exit 0"),
+ ("prefix-no-samba-pam_winbind", "ldd ${PREFIX_DIR}/lib/security/pam_winbind.so | grep -v 'libtalloc.so.2' | grep 'samba' && exit 1; exit 0"),
+ ("prefix-no-public-nss_winbind",
+ check_symbols("${PREFIX_DIR}/lib/libnss_winbind.so.2", "_nss_winbind_")),
+ ("prefix-no-public-nss_wins",
+ check_symbols("${PREFIX_DIR}/lib/libnss_wins.so.2", "_nss_wins_")),
+ ("prefix-no-public-libwbclient",
+ check_symbols("${PREFIX_DIR}/lib/libwbclient.so.0", "wbc")),
+ ("prefix-no-public-pam_winbind",
+ check_symbols("${PREFIX_DIR}/lib/security/pam_winbind.so", "pam_sm_")),
+ ("prefix-no-public-winbind_krb5_locator",
+ check_symbols("${PREFIX_DIR}/lib/krb5/winbind_krb5_locator.so",
+ "service_locator")),
+ ("prefix-no-public-async_dns_krb5_locator",
+ check_symbols("${PREFIX_DIR}/lib/krb5/async_dns_krb5_locator.so",
+ "service_locator")),
+
+ # retry with all modules shared
+ ("allshared-distclean", "make distclean"),
+ ("allshared-configure", samba_libs_configure_samba + " --with-shared-modules=ALL"),
+ ("allshared-make", "make -j"),
+ ("allshared-no-libtalloc", "find ./bin | grep -v 'libtalloc-report' | grep 'libtalloc' && exit 1; exit 0"),
+ ("allshared-no-libtdb", "find ./bin | grep -v 'libtdb-wrap' | grep 'libtdb' && exit 1; exit 0"),
+ ("allshared-no-libtevent", "find ./bin | grep -v 'libtevent-util' | grep 'libtevent' && exit 1; exit 0"),
+ ("allshared-no-libldb", "find ./bin | grep -v 'module' | grep -v 'libldbsamba' | grep 'libldb' && exit 1; exit 0"),
+ ("allshared-no-samba-nss_winbind", "ldd ./bin/plugins/libnss_winbind.so.2 | grep 'samba' && exit 1; exit 0"),
+ ("allshared-no-samba-nss_wins", "ldd ./bin/plugins/libnss_wins.so.2 | grep 'samba' && exit 1; exit 0"),
+ ("allshared-no-samba-libwbclient", "ldd ./bin/shared/libwbclient.so.0 | grep 'samba' && exit 1; exit 0"),
+ ("allshared-no-samba-pam_winbind", "ldd ./bin/plugins/pam_winbind.so | grep -v 'libtalloc.so.2' | grep 'samba' && exit 1; exit 0"),
+ ("allshared-no-public-nss_winbind",
+ check_symbols("./bin/plugins/libnss_winbind.so.2", "_nss_winbind_")),
+ ("allshared-no-public-nss_wins",
+ check_symbols("./bin/plugins/libnss_wins.so.2", "_nss_wins_")),
+ ("allshared-no-public-libwbclient",
+ check_symbols("./bin/shared/libwbclient.so.0", "wbc")),
+ ("allshared-no-public-pam_winbind",
+ check_symbols("./bin/plugins/pam_winbind.so", "pam_sm_")),
+ ("allshared-no-public-winbind_krb5_locator",
+ check_symbols("./bin/plugins/winbind_krb5_locator.so", "service_locator")),
+ ("allshared-no-public-async_dns_krb5_locator",
+ check_symbols("./bin/plugins/async_dns_krb5_locator.so", "service_locator")),
+ ],
+ },
+
+ "samba-fuzz": {
+ "sequence": [
+ # build the fuzzers (static) via the oss-fuzz script
+ ("fuzzers-mkdir-prefix", "mkdir -p ${PREFIX_DIR}"),
+ ("fuzzers-build", "OUT=${PREFIX_DIR} LIB_FUZZING_ENGINE= SANITIZER=address CXX= CFLAGS= ADDITIONAL_LDFLAGS='-fuse-ld=bfd' ./lib/fuzzing/oss-fuzz/build_samba.sh --enable-afl-fuzzer"),
+ ],
+ },
+
+ # * Test smbd and smbtorture can build semi-static
+ #
+ # * Test Samba without python still builds.
+ #
+ # When this test fails due to more use of Python, the expectations
+ # is that the newly failing part of the code should be disabled
+ # when --disable-python is set (rather than major work being done
+ # to support this environment).
+ #
+ # The target here is for vendors shipping a minimal smbd.
+ "samba-minimal-smbd": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+
+ # build with all modules static
+ ("allstatic-configure", "./configure.developer " + samba_configure_params + " --with-static-modules=ALL"),
+ ("allstatic-make", "make -j"),
+ ("allstatic-test", make_test(TESTS="samba3.smb2.create.*nt4_dc")),
+ ("allstatic-lcov", LCOV_CMD),
+ ("allstatic-def-check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("allstatic-def-clean", "make clean"),
+
+ # force all libraries as private
+ ("allprivate-def-distclean", "make distclean"),
+ ("allprivate-def-configure", "./configure.developer " + samba_configure_params + " --private-libraries=ALL"),
+ ("allprivate-def-make", "make -j"),
+ # note wrapper libraries need to be public
+ ("allprivate-def-no-public", "ls ./bin/shared | egrep -v '^private$|lib[nprsu][saeoi][smscd].*-wrapper.so$|pam_set_items.so' | wc -l | grep -q '^0'"),
+ ("allprivate-def-only-private-ext", "ls ./bin/shared/private | egrep 'private-samba' | wc -l | grep -q '^0' && exit 1; exit 0"),
+ ("allprivate-def-no-non-private-ext", "ls ./bin/shared/private | egrep -v 'private-samba|^libpypamtest.so$' | wc -l | grep -q '^0'"),
+ ("allprivate-def-test", make_test(TESTS="samba3.smb2.create.*nt4_dc")),
+ ("allprivate-def-lcov", LCOV_CMD),
+ ("allprivate-def-check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("allprivate-def-clean", "make clean"),
+
+ # force all libraries as private with a non default
+ # extension and 2 exceptions
+ ("allprivate-ext-distclean", "make distclean"),
+ ("allprivate-ext-configure", "./configure.developer " + samba_configure_params + " --private-libraries=ALL --private-library-extension=private-library --private-extension-exception=pac,ndr"),
+ ("allprivate-ext-make", "make -j"),
+ # note wrapper libraries need to be public
+ ("allprivate-ext-no-public", "ls ./bin/shared | egrep -v '^private$|lib[nprsu][saeoi][smscd].*-wrapper.so$|pam_set_items.so' | wc -l | grep -q '^0'"),
+ ("allprivate-ext-no-private-default-ext", "ls ./bin/shared/private | grep 'private-samba' | wc -l | grep -q '^0'"),
+ ("allprivate-ext-has-private-ext", "ls ./bin/shared/private | grep 'private-library' | wc -l | grep -q '^0' && exit 1; exit 0"),
+ ("allprivate-ext-libndr-no-private-ext", "ls ./bin/shared/private | grep -v 'private-library' | grep 'libndr' | wc -l | grep -q '^1'"),
+ ("allprivate-ext-libpac-no-private-ext", "ls ./bin/shared/private | grep -v 'private-library' | grep 'libpac' | wc -l | grep -q '^1'"),
+ ("allprivate-ext-test", make_test(TESTS="samba3.smb2.create.*nt4_dc")),
+ ("allprivate-ext-lcov", LCOV_CMD),
+ ("allprivate-ext-check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("allprivate-ext-clean", "make clean"),
+
+ # retry with nonshared smbd and smbtorture
+ ("nonshared-distclean", "make distclean"),
+ ("nonshared-configure", "./configure.developer " + samba_configure_params + " --bundled-libraries=ALL --with-static-modules=ALL --nonshared-binary=smbtorture,smbd/smbd"),
+ ("nonshared-make", "make -j"),
+ ("nonshared-test", make_test(TESTS="samba3.smb2.create.*nt4_dc")),
+ ("nonshared-lcov", LCOV_CMD),
+ ("nonshared-check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("nonshared-clean", "make clean"),
+ ],
+ },
+
+ "samba-nopython": {
+ "sequence": [
+ ("random-sleep", random_sleep(300, 900)),
+
+ ("configure", "./configure.developer ${ENABLE_COVERAGE} ${PREFIX} --with-profiling-data --disable-python --without-ad-dc"),
+ ("make", "make -j"),
+ ("find-python", "script/find_python.sh ${PREFIX}"),
+ ("test", "make test-nopython"),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+
+ ("talloc-configure", "cd lib/talloc && " + samba_libs_configure_base + " --bundled-libraries=cmocka,NONE --disable-python"),
+ ("talloc-make", "cd lib/talloc && make"),
+ ("talloc-install", "cd lib/talloc && make install"),
+
+ ("tdb-configure", "cd lib/tdb && " + samba_libs_configure_base + " --bundled-libraries=cmocka,NONE --disable-python"),
+ ("tdb-make", "cd lib/tdb && make"),
+ ("tdb-install", "cd lib/tdb && make install"),
+
+ ("tevent-configure", "cd lib/tevent && " + samba_libs_configure_base + " --bundled-libraries=cmocka,NONE --disable-python"),
+ ("tevent-make", "cd lib/tevent && make"),
+ ("tevent-install", "cd lib/tevent && make install"),
+
+ ("ldb-configure", "cd lib/ldb && " + samba_libs_configure_base + " --bundled-libraries=cmocka,NONE --disable-python"),
+ ("ldb-make", "cd lib/ldb && make"),
+ ("ldb-install", "cd lib/ldb && make install"),
+
+ # retry against installed library packages, but no required modules
+ ("libs-configure", samba_libs_configure_base + samba_libs_configure_bundled_libs + " --disable-python --without-ad-dc --with-static-modules=!FORCED,!DEFAULT --with-shared-modules=!FORCED,!DEFAULT"),
+ ("libs-make", "make -j"),
+ ("libs-install", "make install"),
+ ("libs-check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("libs-clean", "make clean"),
+
+ ],
+ },
+
+ "samba-codecheck": {
+ "sequence": [
+ ("run", "script/check-shell-scripts.sh ."),
+ ("run", "script/codespell.sh ."),
+ ],
+ },
+
+ "ldb": {
+ "sequence": [
+ ("random-sleep", random_sleep(60, 600)),
+ ("configure", "./configure ${ENABLE_COVERAGE} --enable-developer -C ${PREFIX}"),
+ ("make", "make"),
+ ("install", "make install"),
+ ("test", "make test"),
+ ("lcov", LCOV_CMD),
+ ("clean", "make clean"),
+ ("configure-no-lmdb", "./configure ${ENABLE_COVERAGE} --enable-developer --without-ldb-lmdb -C ${PREFIX}"),
+ ("make-no-lmdb", "make"),
+ ("test-no-lmdb", "make test"),
+ ("lcov-no-lmdb", LCOV_CMD),
+ ("install-no-lmdb", "make install"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("distcheck", "make distcheck"),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "tdb": {
+ "sequence": [
+ ("random-sleep", random_sleep(60, 600)),
+ ("configure", "./configure ${ENABLE_COVERAGE} --enable-developer -C ${PREFIX}"),
+ ("make", "make"),
+ ("install", "make install"),
+ ("test", "make test"),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("distcheck", "make distcheck"),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "talloc": {
+ "sequence": [
+ ("random-sleep", random_sleep(60, 600)),
+ ("configure", "./configure ${ENABLE_COVERAGE} --enable-developer -C ${PREFIX}"),
+ ("make", "make"),
+ ("install", "make install"),
+ ("test", "make test"),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("distcheck", "make distcheck"),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "replace": {
+ "sequence": [
+ ("random-sleep", random_sleep(60, 600)),
+ ("configure", "./configure ${ENABLE_COVERAGE} --enable-developer -C ${PREFIX}"),
+ ("make", "make"),
+ ("install", "make install"),
+ ("test", "make test"),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("distcheck", "make distcheck"),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "tevent": {
+ "sequence": [
+ ("random-sleep", random_sleep(60, 600)),
+ ("configure", "./configure ${ENABLE_COVERAGE} --enable-developer -C ${PREFIX}"),
+ ("make", "make"),
+ ("install", "make install"),
+ ("test", "make test"),
+ ("lcov", LCOV_CMD),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("distcheck", "make distcheck"),
+ ("clean", "make clean"),
+ ],
+ },
+
+ "pidl": {
+ "git-clone-required": True,
+ "sequence": [
+ ("random-sleep", random_sleep(60, 600)),
+ ("configure", "perl Makefile.PL PREFIX=${PREFIX_DIR}"),
+ ("touch", "touch *.yp"),
+ ("make", "make"),
+ ("test", "make test"),
+ ("install", "make install"),
+ ("checkout-yapp-generated", "git checkout lib/Parse/Pidl/IDL.pm lib/Parse/Pidl/Expr.pm"),
+ ("check-clean-tree", CLEAN_SOURCE_TREE_CMD),
+ ("clean", "make clean"),
+ ],
+ },
+
+ # these are useful for debugging autobuild
+ "pass": {
+ "sequence": [
+ ("pass", 'echo passing && /bin/true'),
+ ],
+ },
+ "fail": {
+ "sequence": [
+ ("fail", 'echo failing && /bin/false'),
+ ],
+ },
+}
+
+defaulttasks = list(tasks.keys())
+
+defaulttasks.remove("pass")
+defaulttasks.remove("fail")
+
+# The build tasks will be brought in by the test tasks as needed
+defaulttasks.remove("samba-def-build")
+defaulttasks.remove("samba-nt4-build")
+defaulttasks.remove("samba-mit-build")
+defaulttasks.remove("samba-h5l-build")
+defaulttasks.remove("samba-no-opath-build")
+
+# This is not a normal test, but a task to support manually running
+# one test under autobuild
+defaulttasks.remove("samba-test-only")
+
+# Only built on GitLab CI and not in the default autobuild because it
+# uses too much space (4GB of semi-static binaries)
+defaulttasks.remove("samba-fuzz")
+
+# The FIPS build runs only in GitLab CI on a current Fedora Docker
+# container where a simulated FIPS mode is possible.
+defaulttasks.remove("samba-fips")
+
+# The MIT build runs on a current Fedora where an up to date MIT KDC
+# is already packaged. This avoids needing to backport a current MIT
+# to the default Ubuntu 18.04, particularly during development, and
+# the need to install on the shared sn-devel-184.
+
+defaulttasks.remove("samba-mitkrb5")
+defaulttasks.remove("samba-admem-mit")
+defaulttasks.remove("samba-addc-mit-1")
+defaulttasks.remove("samba-addc-mit-4a")
+defaulttasks.remove("samba-addc-mit-4b")
+
+defaulttasks.remove("samba-32bit")
+
+if os.environ.get("AUTOBUILD_SKIP_SAMBA_O3", "0") == "1":
+ defaulttasks.remove("samba-o3")
+
+
+def do_print(msg):
+ logger.info(msg)
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+def do_debug(msg):
+ logger.debug(msg)
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+
+def run_cmd(cmd, dir=".", show=None, output=False, checkfail=True):
+ if show is None:
+ do_debug("Running: '%s' in '%s'" % (cmd, dir))
+ elif show:
+ do_print("Running: '%s' in '%s'" % (cmd, dir))
+
+ if output:
+ out = check_output([cmd], shell=True, cwd=dir)
+ return out.decode(encoding='utf-8', errors='backslashreplace')
+ elif checkfail:
+ return check_call(cmd, shell=True, cwd=dir)
+ else:
+ return call(cmd, shell=True, cwd=dir)
+
+def rmdir_force(dirname, re_raise=True):
+ try:
+ run_cmd("test -d %s && chmod -R +w %s; rm -rf %s" % (
+ dirname, dirname, dirname), output=True, show=True)
+ except CalledProcessError as e:
+ do_print("Failed: '%s'" % (str(e)))
+ run_cmd("tree %s" % dirname, output=True, show=True)
+ if re_raise:
+ raise
+ return False
+ return True
+
+class builder(object):
+ '''handle build of one directory'''
+
+ def __init__(self, name, definition):
+ self.name = name
+ self.dir = builddirs.get(name, '.')
+ self.tag = self.name.replace('/', '_')
+ self.definition = definition
+ self.sequence = definition["sequence"]
+ self.git_clone_required = False
+ if "git-clone-required" in definition:
+ self.git_clone_required = bool(definition["git-clone-required"])
+ self.proc = None
+ self.done = False
+ self.next = 0
+ self.stdout_path = "%s/%s.stdout" % (gitroot, self.tag)
+ self.stderr_path = "%s/%s.stderr" % (gitroot, self.tag)
+ do_debug("stdout for %s in %s" % (self.name, self.stdout_path))
+ do_debug("stderr for %s in %s" % (self.name, self.stderr_path))
+ run_cmd("rm -f %s %s" % (self.stdout_path, self.stderr_path))
+ self.stdout = open(self.stdout_path, 'w')
+ self.stderr = open(self.stderr_path, 'w')
+ self.stdin = open("/dev/null", 'r')
+ self.builder_dir = "%s/%s" % (testbase, self.tag)
+ self.test_source_dir = self.builder_dir
+ self.cwd = "%s/%s" % (self.builder_dir, self.dir)
+ self.selftest_prefix = "%s/bin/ab" % (self.cwd)
+ self.prefix = "%s/%s" % (test_prefix, self.tag)
+ self.consumers = []
+ self.producer = None
+
+ if self.git_clone_required:
+ assert "dependency" not in definition
+
+ def mark_existing(self):
+ do_debug('%s: Mark as existing dependency' % self.name)
+ self.next = len(self.sequence)
+ self.done = True
+
+ def add_consumer(self, consumer):
+ do_debug("%s: add consumer: %s" % (self.name, consumer.name))
+ consumer.producer = self
+ consumer.test_source_dir = self.test_source_dir
+ self.consumers.append(consumer)
+
+ def start_next(self):
+ if self.producer is not None:
+ if not self.producer.done:
+ do_debug("%s: Waiting for producer: %s" % (self.name, self.producer.name))
+ return
+
+ if self.next == 0:
+ rmdir_force(self.builder_dir)
+ rmdir_force(self.prefix)
+ if self.producer is not None:
+ run_cmd("mkdir %s" % (self.builder_dir), dir=test_master, show=True)
+ elif not self.git_clone_required:
+ run_cmd("cp -R -a -l %s %s" % (test_master, self.builder_dir), dir=test_master, show=True)
+ else:
+ run_cmd("git clone --recursive --shared %s %s" % (test_master, self.builder_dir), dir=test_master, show=True)
+
+ if self.next == len(self.sequence):
+ if not self.done:
+ do_print('%s: Completed OK' % self.name)
+ self.done = True
+ if not options.nocleanup and len(self.consumers) == 0:
+ do_print('%s: Cleaning up' % self.name)
+ rmdir_force(self.builder_dir)
+ rmdir_force(self.prefix)
+ for consumer in self.consumers:
+ if consumer.next != 0:
+ continue
+ do_print('%s: Starting consumer %s' % (self.name, consumer.name))
+ consumer.start_next()
+ if self.producer is not None:
+ self.producer.consumers.remove(self)
+ assert self.producer.done
+ self.producer.start_next()
+ do_print('%s: Remaining consumers %u' % (self.name, len(self.consumers)))
+ return
+ (self.stage, self.cmd) = self.sequence[self.next]
+ self.cmd = self.cmd.replace("${PYTHON_PREFIX}",
+ get_path(name='platlib',
+ scheme="posix_prefix",
+ vars={"base": self.prefix,
+ "platbase": self.prefix}))
+ self.cmd = self.cmd.replace("${PREFIX}", "--prefix=%s" % self.prefix)
+ self.cmd = self.cmd.replace("${PREFIX_DIR}", "%s" % self.prefix)
+ self.cmd = self.cmd.replace("${TESTS}", options.restrict_tests)
+ self.cmd = self.cmd.replace("${TEST_SOURCE_DIR}", self.test_source_dir)
+ self.cmd = self.cmd.replace("${SELFTEST_PREFIX}", self.selftest_prefix)
+ self.cmd = self.cmd.replace("${LOG_BASE}", options.log_base)
+ self.cmd = self.cmd.replace("${NAME}", self.name)
+ self.cmd = self.cmd.replace("${ENABLE_COVERAGE}", options.enable_coverage)
+ do_print('%s: [%s] Running %s in %r' % (self.name, self.stage, self.cmd, self.cwd))
+ self.proc = Popen(self.cmd, shell=True,
+ close_fds=True, cwd=self.cwd,
+ stdout=self.stdout, stderr=self.stderr, stdin=self.stdin)
+ self.next += 1
+
+def expand_dependencies(n):
+ deps = list()
+ if "dependency" in tasks[n]:
+ depname = tasks[n]["dependency"]
+ assert depname in tasks
+ sdeps = expand_dependencies(depname)
+ assert n not in sdeps
+ for sdep in sdeps:
+ deps.append(sdep)
+ deps.append(depname)
+ return deps
+
+
+class buildlist(object):
+ '''handle build of multiple directories'''
+
+ def __init__(self, tasknames, rebase_url, rebase_branch="master"):
+ self.tail_proc = None
+ self.retry = None
+ if not tasknames:
+ if options.restrict_tests:
+ tasknames = ["samba-test-only"]
+ else:
+ tasknames = defaulttasks
+
+ given_tasknames = tasknames.copy()
+ implicit_tasknames = []
+ for n in given_tasknames:
+ deps = expand_dependencies(n)
+ for dep in deps:
+ if dep in given_tasknames:
+ continue
+ if dep in implicit_tasknames:
+ continue
+ implicit_tasknames.append(dep)
+
+ tasknames = implicit_tasknames.copy()
+ tasknames.extend(given_tasknames)
+ do_debug("given_tasknames: %s" % given_tasknames)
+ do_debug("implicit_tasknames: %s" % implicit_tasknames)
+ do_debug("tasknames: %s" % tasknames)
+ self.tlist = [builder(n, tasks[n]) for n in tasknames]
+
+ if options.retry:
+ rebase_remote = "rebaseon"
+ retry_task = {
+ "git-clone-required": True,
+ "sequence": [
+ ("retry",
+ '''set -e
+ git remote add -t %s %s %s
+ git fetch %s
+ while :; do
+ sleep 60
+ git describe %s/%s > old_remote_branch.desc
+ git fetch %s
+ git describe %s/%s > remote_branch.desc
+ diff old_remote_branch.desc remote_branch.desc
+ done
+ ''' % (
+ rebase_branch, rebase_remote, rebase_url,
+ rebase_remote,
+ rebase_remote, rebase_branch,
+ rebase_remote,
+ rebase_remote, rebase_branch
+ ))]}
+
+ self.retry = builder('retry', retry_task)
+ self.need_retry = False
+
+ if options.skip_dependencies:
+ for b in self.tlist:
+ if b.name in implicit_tasknames:
+ b.mark_existing()
+
+ for b in self.tlist:
+ do_debug("b.name=%s" % b.name)
+ if "dependency" not in b.definition:
+ continue
+ depname = b.definition["dependency"]
+ do_debug("b.name=%s: dependency:%s" % (b.name, depname))
+ for p in self.tlist:
+ if p.name == depname:
+ p.add_consumer(b)
+
+ def kill_kids(self):
+ if self.tail_proc is not None:
+ self.tail_proc.terminate()
+ self.tail_proc.wait()
+ self.tail_proc = None
+ if self.retry is not None:
+ self.retry.proc.terminate()
+ self.retry.proc.wait()
+ self.retry = None
+ for b in self.tlist:
+ if b.proc is not None:
+ run_cmd("killbysubdir %s > /dev/null 2>&1" % b.test_source_dir, checkfail=False)
+ b.proc.terminate()
+ b.proc.wait()
+ b.proc = None
+
+ def wait_one(self):
+ while True:
+ none_running = True
+ for b in self.tlist:
+ if b.proc is None:
+ continue
+ none_running = False
+ b.status = b.proc.poll()
+ if b.status is None:
+ continue
+ b.proc = None
+ return b
+ if options.retry:
+ ret = self.retry.proc.poll()
+ if ret is not None:
+ self.need_retry = True
+ self.retry = None
+ return None
+ if none_running:
+ return None
+ time.sleep(0.1)
+
+ def run(self):
+ for b in self.tlist:
+ b.start_next()
+ if options.retry:
+ self.retry.start_next()
+ while True:
+ b = self.wait_one()
+ if options.retry and self.need_retry:
+ self.kill_kids()
+ do_print("retry needed")
+ return (0, None, None, None, "retry")
+ if b is None:
+ break
+ if os.WIFSIGNALED(b.status) or os.WEXITSTATUS(b.status) != 0:
+ self.kill_kids()
+ return (b.status, b.name, b.stage, b.tag, "%s: [%s] failed '%s' with status %d" % (b.name, b.stage, b.cmd, b.status))
+ b.start_next()
+ self.kill_kids()
+ return (0, None, None, None, "All OK")
+
+ def write_system_info(self, filename):
+ with open(filename, 'w') as f:
+ for cmd in ['uname -a',
+ 'lsb_release -a',
+ 'free',
+ 'mount',
+ 'cat /proc/cpuinfo',
+ 'cc --version',
+ 'df -m .',
+ 'df -m %s' % testbase]:
+ try:
+ out = run_cmd(cmd, output=True, checkfail=False)
+ except CalledProcessError as e:
+ out = "<failed: %s>" % str(e)
+ print('### %s' % cmd, file=f)
+ print(out, file=f)
+ print(file=f)
+
+ def tarlogs(self, fname):
+ with tarfile.open(fname, "w:gz") as tar:
+ for b in self.tlist:
+ tar.add(b.stdout_path, arcname="%s.stdout" % b.tag)
+ tar.add(b.stderr_path, arcname="%s.stderr" % b.tag)
+ if os.path.exists("autobuild.log"):
+ tar.add("autobuild.log")
+ filename = 'system-info.txt'
+ self.write_system_info(filename)
+ tar.add(filename)
+
+ def remove_logs(self):
+ for b in self.tlist:
+ os.unlink(b.stdout_path)
+ os.unlink(b.stderr_path)
+
+ def start_tail(self):
+ cmd = ["tail", "-f"]
+ for b in self.tlist:
+ cmd.append(b.stdout_path)
+ cmd.append(b.stderr_path)
+ self.tail_proc = Popen(cmd, close_fds=True)
+
+
+def cleanup(do_raise=False):
+ if options.nocleanup:
+ return
+ run_cmd("stat %s || true" % test_tmpdir, show=True)
+ run_cmd("stat %s" % testbase, show=True)
+ do_print("Cleaning up %r" % cleanup_list)
+ for d in cleanup_list:
+ ok = rmdir_force(d, re_raise=False)
+ if ok:
+ continue
+ if os.path.isdir(d):
+ do_print("Killing, waiting and retry")
+ run_cmd("killbysubdir %s > /dev/null 2>&1" % d, checkfail=False)
+ else:
+ do_print("Waiting and retry")
+ time.sleep(1)
+ rmdir_force(d, re_raise=do_raise)
+
+
+def daemonize(logfile):
+ pid = os.fork()
+ if pid == 0: # Parent
+ os.setsid()
+ pid = os.fork()
+ if pid != 0: # Actual daemon
+ os._exit(0)
+ else: # Grandparent
+ os._exit(0)
+
+ import resource # Resource usage information.
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if maxfd == resource.RLIM_INFINITY:
+ maxfd = 1024 # Rough guess at maximum number of open file descriptors.
+ for fd in range(0, maxfd):
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+ os.open(logfile, os.O_RDWR | os.O_CREAT)
+ os.dup2(0, 1)
+ os.dup2(0, 2)
+
+
+def write_pidfile(fname):
+ '''write a pid file, cleanup on exit'''
+ with open(fname, mode='w') as f:
+ f.write("%u\n" % os.getpid())
+
+
+def rebase_tree(rebase_url, rebase_branch="master"):
+ rebase_remote = "rebaseon"
+ do_print("Rebasing on %s" % rebase_url)
+ run_cmd("git describe HEAD", show=True, dir=test_master)
+ run_cmd("git remote add -t %s %s %s" %
+ (rebase_branch, rebase_remote, rebase_url),
+ show=True, dir=test_master)
+ run_cmd("git fetch %s" % rebase_remote, show=True, dir=test_master)
+ if options.fix_whitespace:
+ run_cmd("git rebase --force-rebase --whitespace=fix %s/%s" %
+ (rebase_remote, rebase_branch),
+ show=True, dir=test_master)
+ else:
+ run_cmd("git rebase --force-rebase %s/%s" %
+ (rebase_remote, rebase_branch),
+ show=True, dir=test_master)
+ diff = run_cmd("git --no-pager diff HEAD %s/%s" %
+ (rebase_remote, rebase_branch),
+ dir=test_master, output=True)
+ if diff == '':
+ do_print("No differences between HEAD and %s/%s - exiting" %
+ (rebase_remote, rebase_branch))
+ sys.exit(0)
+ run_cmd("git describe %s/%s" %
+ (rebase_remote, rebase_branch),
+ show=True, dir=test_master)
+ run_cmd("git describe HEAD", show=True, dir=test_master)
+ run_cmd("git --no-pager diff --stat HEAD %s/%s" %
+ (rebase_remote, rebase_branch),
+ show=True, dir=test_master)
+
+
+def push_to(push_url, push_branch="master"):
+ push_remote = "pushto"
+ do_print("Pushing to %s" % push_url)
+ if options.mark:
+ run_cmd("git config --replace-all core.editor script/commit_mark.sh", dir=test_master)
+ run_cmd("git commit --amend -c HEAD", dir=test_master)
+ # the notes method doesn't work yet, as metze hasn't allowed refs/notes/* in master
+ # run_cmd("EDITOR=script/commit_mark.sh git notes edit HEAD", dir=test_master)
+ run_cmd("git remote add -t %s %s %s" %
+ (push_branch, push_remote, push_url),
+ show=True, dir=test_master)
+ run_cmd("git push %s +HEAD:%s" %
+ (push_remote, push_branch),
+ show=True, dir=test_master)
+
+
+def send_email(subject, text, log_tar):
+ if options.email is None:
+ do_print("not sending email because the recipient is not set")
+ do_print("the text content would have been:\n\nSubject: %s\n\n%s" %
+ (subject, text))
+ return
+ outer = MIMEMultipart()
+ outer['Subject'] = subject
+ outer['To'] = options.email
+ outer['From'] = options.email_from
+ outer['Date'] = email.utils.formatdate(localtime=True)
+ outer.preamble = 'Autobuild mails are now in MIME because we optionally attach the logs.\n'
+ outer.attach(MIMEText(text, 'plain', 'utf-8'))
+ if options.attach_logs:
+ with open(log_tar, 'rb') as fp:
+ msg = MIMEApplication(fp.read(), 'gzip', email.encoders.encode_base64)
+ # Set the filename parameter
+ msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(log_tar))
+ outer.attach(msg)
+ content = outer.as_string()
+ s = smtplib.SMTP(options.email_server)
+ email_user = os.getenv('SMTP_USERNAME')
+ email_password = os.getenv('SMTP_PASSWORD')
+ if email_user is not None:
+ s.starttls()
+ s.login(email_user, email_password)
+
+ s.sendmail(options.email_from, [options.email], content)
+ s.set_debuglevel(1)
+ s.quit()
+
+
+def email_failure(status, failed_task, failed_stage, failed_tag, errstr,
+ elapsed_time, log_base=None, add_log_tail=True):
+ '''send an email to options.email about the failure'''
+ elapsed_minutes = elapsed_time / 60.0
+ if log_base is None:
+ log_base = gitroot
+ text = '''
+Dear Developer,
+
+Your autobuild on %s failed after %.1f minutes
+when trying to test %s with the following error:
+
+ %s
+
+the autobuild has been abandoned. Please fix the error and resubmit.
+
+A summary of the autobuild process is here:
+
+ %s/autobuild.log
+''' % (platform.node(), elapsed_minutes, failed_task, errstr, log_base)
+
+ if options.restrict_tests:
+ text += """
+The build was restricted to tests matching %s\n""" % options.restrict_tests
+
+ if failed_task != 'rebase':
+ text += '''
+You can see logs of the failed task here:
+
+ %s/%s.stdout
+ %s/%s.stderr
+
+or you can get full logs of all tasks in this job here:
+
+ %s/logs.tar.gz
+
+The top commit for the tree that was built was:
+
+%s
+
+''' % (log_base, failed_tag, log_base, failed_tag, log_base, top_commit_msg)
+
+ log_stdout = "%s/%s.stdout" % (gitroot, failed_tag)
+ if add_log_tail and os.access(log_stdout, os.R_OK):
+ f = open(log_stdout, 'r')
+ lines = f.readlines()
+ log_tail = "".join(lines[-50:])
+ num_lines = len(lines)
+ log_stderr = "%s/%s.stderr" % (gitroot, failed_tag)
+ if num_lines < 50 and os.access(log_stderr, os.R_OK):
+ # Also include stderr (compile failures) if < 50 lines of stdout
+ f = open(log_stderr, 'r')
+ log_tail += "".join(f.readlines()[-(50 - num_lines):])
+
+ text += '''
+The last 50 lines of log messages:
+
+%s
+ ''' % log_tail
+ f.close()
+
+ logs = os.path.join(gitroot, 'logs.tar.gz')
+ send_email('autobuild[%s] failure on %s for task %s during %s'
+ % (options.branch, platform.node(), failed_task, failed_stage),
+ text, logs)
+
+
+def email_success(elapsed_time, log_base=None):
+ '''send an email to options.email about a successful build'''
+ if log_base is None:
+ log_base = gitroot
+ text = '''
+Dear Developer,
+
+Your autobuild on %s has succeeded after %.1f minutes.
+
+''' % (platform.node(), elapsed_time / 60.)
+
+ if options.restrict_tests:
+ text += """
+The build was restricted to tests matching %s\n""" % options.restrict_tests
+
+ if options.keeplogs:
+ text += '''
+
+you can get full logs of all tasks in this job here:
+
+ %s/logs.tar.gz
+
+''' % log_base
+
+ text += '''
+The top commit for the tree that was built was:
+
+%s
+''' % top_commit_msg
+
+ logs = os.path.join(gitroot, 'logs.tar.gz')
+ send_email('autobuild[%s] success on %s' % (options.branch, platform.node()),
+ text, logs)
+
+
+# get the top commit message, for emails
+top_commit_msg = run_cmd("git log -1", dir=gitroot, output=True)
+
+try:
+ if options.skip_dependencies:
+ run_cmd("stat %s" % testbase, dir=testbase, output=True)
+ else:
+ os.makedirs(testbase)
+except Exception as reason:
+ raise Exception("Unable to create %s : %s" % (testbase, reason))
+cleanup_list.append(testbase)
+
+if options.daemon:
+ logfile = os.path.join(testbase, "log")
+ do_print("Forking into the background, writing progress to %s" % logfile)
+ daemonize(logfile)
+
+write_pidfile(gitroot + "/autobuild.pid")
+
+start_time = time.time()
+
+while True:
+ try:
+ run_cmd("rm -rf %s" % test_tmpdir, show=True)
+ os.makedirs(test_tmpdir)
+ # The waf uninstall code removes empty directories all the way
+ # up the tree. Creating a file in test_tmpdir stops it from
+ # being removed.
+ run_cmd("touch %s" % os.path.join(test_tmpdir,
+ ".directory-is-not-empty"), show=True)
+ run_cmd("stat %s" % test_tmpdir, show=True)
+ run_cmd("stat %s" % testbase, show=True)
+ if options.skip_dependencies:
+ run_cmd("stat %s" % test_master, dir=testbase, output=True)
+ else:
+ run_cmd("git clone --recursive --shared %s %s" % (gitroot, test_master), show=True, dir=gitroot)
+ except Exception:
+ cleanup()
+ raise
+
+ try:
+ if options.rebase is not None:
+ rebase_tree(options.rebase, rebase_branch=options.branch)
+ except Exception:
+ cleanup_list.append(gitroot + "/autobuild.pid")
+ cleanup()
+ elapsed_time = time.time() - start_time
+ email_failure(-1, 'rebase', 'rebase', 'rebase',
+ 'rebase on %s failed' % options.branch,
+ elapsed_time, log_base=options.log_base)
+ sys.exit(1)
+
+ try:
+ blist = buildlist(args, options.rebase, rebase_branch=options.branch)
+ if options.tail:
+ blist.start_tail()
+ (status, failed_task, failed_stage, failed_tag, errstr) = blist.run()
+ if status != 0 or errstr != "retry":
+ break
+ cleanup(do_raise=True)
+ except Exception:
+ cleanup()
+ raise
+
+cleanup_list.append(gitroot + "/autobuild.pid")
+
+do_print(errstr)
+
+blist.kill_kids()
+if options.tail:
+ do_print("waiting for tail to flush")
+ time.sleep(1)
+
+elapsed_time = time.time() - start_time
+if status == 0:
+ if options.passcmd is not None:
+ do_print("Running passcmd: %s" % options.passcmd)
+ run_cmd(options.passcmd, dir=test_master)
+ if options.pushto is not None:
+ push_to(options.pushto, push_branch=options.branch)
+ if options.keeplogs or options.attach_logs:
+ blist.tarlogs("logs.tar.gz")
+ do_print("Logs in logs.tar.gz")
+ if options.always_email:
+ email_success(elapsed_time, log_base=options.log_base)
+ blist.remove_logs()
+ cleanup()
+ do_print(errstr)
+ sys.exit(0)
+
+# something failed, gather a tar of the logs
+blist.tarlogs("logs.tar.gz")
+
+if options.email is not None:
+ email_failure(status, failed_task, failed_stage, failed_tag, errstr,
+ elapsed_time, log_base=options.log_base)
+else:
+ elapsed_minutes = elapsed_time / 60.0
+ print('''
+
+####################################################################
+
+AUTOBUILD FAILURE
+
+Your autobuild[%s] on %s failed after %.1f minutes
+when trying to test %s with the following error:
+
+ %s
+
+the autobuild has been abandoned. Please fix the error and resubmit.
+
+####################################################################
+
+''' % (options.branch, platform.node(), elapsed_minutes, failed_task, errstr))
+
+cleanup()
+do_print(errstr)
+do_print("Logs in logs.tar.gz")
+sys.exit(status)
diff --git a/script/bisect-test.py b/script/bisect-test.py
new file mode 100755
index 0000000..7c5cd63
--- /dev/null
+++ b/script/bisect-test.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python3
+# use git bisect to work out what commit caused a test failure
+# Copyright Andrew Tridgell 2010
+# released under GNU GPL v3 or later
+
+
+from subprocess import call, check_call, Popen, PIPE
+import os
+import tempfile
+import sys
+from optparse import OptionParser
+
+parser = OptionParser()
+parser.add_option("", "--good", help="known good revision (default HEAD~100)", default='HEAD~100')
+parser.add_option("", "--bad", help="known bad revision (default HEAD)", default='HEAD')
+parser.add_option("", "--skip-build-errors", help="skip revision where make fails",
+ action='store_true', default=False)
+parser.add_option("", "--autogen", help="run autogen before each build", action="store_true", default=False)
+parser.add_option("", "--autogen-command", help="command to use for autogen (default ./autogen.sh)",
+ type='str', default="./autogen.sh")
+parser.add_option("", "--configure", help="run configure.developer before each build",
+ action="store_true", default=False)
+parser.add_option("", "--configure-command", help="the command for configure (default ./configure.developer)",
+ type='str', default="./configure.developer")
+parser.add_option("", "--build-command", help="the command to build the tree (default 'make -j')",
+ type='str', default="make -j")
+parser.add_option("", "--test-command", help="the command to test the tree (default 'make test')",
+ type='str', default="make test")
+parser.add_option("", "--clean", help="run make clean before each build",
+ action="store_true", default=False)
+
+
+(opts, args) = parser.parse_args()
+
+
+def run_cmd(cmd, dir=".", show=True, output=False, checkfail=True):
+ if show:
+ print("Running: '%s' in '%s'" % (cmd, dir))
+ if output:
+ return Popen([cmd], shell=True, stdout=PIPE, cwd=dir).communicate()[0]
+ elif checkfail:
+ return check_call(cmd, shell=True, cwd=dir)
+ else:
+ return call(cmd, shell=True, cwd=dir)
+
+
+def find_git_root():
+ '''get to the top of the git repo'''
+ p = os.getcwd()
+ while p != '/':
+ if os.path.exists(os.path.join(p, ".git")):
+ return p
+ p = os.path.abspath(os.path.join(p, '..'))
+ return None
+
+
+cwd = os.getcwd()
+gitroot = find_git_root()
+
+# create a bisect script
+f = tempfile.NamedTemporaryFile(delete=False, mode="w+t")
+f.write("set -x\n")
+f.write("cd %s || exit 125\n" % cwd)
+if opts.autogen:
+ f.write("%s || exit 125\n" % opts.autogen_command)
+if opts.configure:
+ f.write("%s || exit 125\n" % opts.configure_command)
+if opts.clean:
+ f.write("make clean || exit 125\n")
+if opts.skip_build_errors:
+ build_err = 125
+else:
+ build_err = 1
+f.write("%s || exit %u\n" % (opts.build_command, build_err))
+f.write("%s || exit 1\n" % opts.test_command)
+f.write("exit 0\n")
+f.close()
+
+
+def cleanup():
+ run_cmd("git bisect reset", dir=gitroot)
+ os.unlink(f.name)
+ sys.exit(-1)
+
+
+# run bisect
+ret = -1
+try:
+ run_cmd("git bisect reset", dir=gitroot, show=False, checkfail=False)
+ run_cmd("git bisect start %s %s --" % (opts.bad, opts.good), dir=gitroot)
+ ret = run_cmd("git bisect run bash %s" % f.name, dir=gitroot, show=True, checkfail=False)
+except KeyboardInterrupt:
+ print("Cleaning up")
+ cleanup()
+except Exception as reason:
+ print("Failed bisect: %s" % reason)
+ cleanup()
+
+run_cmd("git bisect reset", dir=gitroot)
+os.unlink(f.name)
+sys.exit(ret)
diff --git a/script/check-shell-scripts.sh b/script/check-shell-scripts.sh
new file mode 100755
index 0000000..f4f48a3
--- /dev/null
+++ b/script/check-shell-scripts.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+if [ $# -lt 1 ]; then
+ echo "Usage: $(basename "${0}") DIR [SEVERITY]"
+ exit 1
+fi
+
+DIR="${1}"
+SEVERITY="${2:-error}"
+
+shfmt -f "${DIR}" |
+ grep -v -E "(bootstrap|third_party)" |
+ xargs shellcheck \
+ --shell=sh \
+ --external-sources \
+ --check-sourced \
+ --format=gcc \
+ --severity="${SEVERITY}"
+
+exit $?
diff --git a/script/clean-source-tree.sh b/script/clean-source-tree.sh
new file mode 100755
index 0000000..8d7f759
--- /dev/null
+++ b/script/clean-source-tree.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+
+N=$(git clean -n | wc -l)
+C=$(git diff --stat HEAD | wc -l)
+
+test x"$N" != x"0" && {
+ echo "The tree has $N new uncommitted files!!! see stderr"
+ echo "The tree has $N new uncommitted files!!!" >&2
+
+ echo "git clean -n" >&2
+ git clean -n >&2
+
+ test x"$C" != x"0" && {
+ echo "git diff -p --stat HEAD" >&2
+ git diff -p --stat HEAD >&2
+ }
+
+ exit 1
+}
+
+test x"$C" != x"0" && {
+ echo "The tree has uncommitted changes!!! see stderr"
+ echo "The tree has uncommitted changes!!!" >&2
+
+ echo "git diff -p --stat HEAD" >&2
+ git diff -p --stat HEAD >&2
+
+ exit 1
+}
+
+echo "clean tree"
+exit 0
diff --git a/script/codespell.sh b/script/codespell.sh
new file mode 100755
index 0000000..60e0eba
--- /dev/null
+++ b/script/codespell.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Check code spelling
+
+if [ $# -lt 1 ]; then
+ echo "Usage: $(basename "${0}") DIR"
+ exit 1
+fi
+
+DIR="${1}"
+
+codespell "${DIR}"
+ret=$?
+
+if [ ${ret} -ne 0 ]; then
+ echo
+ echo "Fix code spelling issues. If it detected false positives" \
+ "please update .codespellignore."
+fi
+
+exit ${ret}
diff --git a/script/commit_mark.sh b/script/commit_mark.sh
new file mode 100755
index 0000000..3de6ba7
--- /dev/null
+++ b/script/commit_mark.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# add a autobuild message to the HEAD commit
+
+branch=$(git branch --contains HEAD | grep '^\* ' | sed -e 's/^\* //')
+
+if grep -q "^Autobuild\-User($branch): " "$1"; then
+ echo "Already marked as tested for $branch"
+ exit 0
+fi
+
+fullname=$(getent passwd $USER | cut -d: -f5 | cut -d',' -f1)
+mailaddr=$(git config user.email)
+if test -z "$mailaddr"; then
+ mailaddr="$USER@samba.org"
+fi
+cat <<EOF >>"$1"
+
+Autobuild-User($branch): $fullname <$mailaddr>
+Autobuild-Date($branch): $(date) on $(hostname)
+EOF
+exit 0
diff --git a/script/compare_cc_results.py b/script/compare_cc_results.py
new file mode 100755
index 0000000..9bf24ad
--- /dev/null
+++ b/script/compare_cc_results.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+"""Compare the results of native and cross-compiled configure tests
+
+The compared files are called "default.cache.py" and are generated in
+bin/c4che/.
+
+USAGE: compare_cc_results.py CONFIG_1 CONFIG_2 [CONFIG_3 [CONFIG_4 ...]]
+"""
+import sys
+import difflib
+
+exceptions = [
+ 'BUILD_DIRECTORY', 'SELFTEST_PREFIX', 'defines',
+ 'CROSS_COMPILE', 'CROSS_ANSWERS', 'CROSS_EXECUTE',
+ 'LIBSOCKET_WRAPPER_SO_PATH',
+ 'LIBNSS_WRAPPER_SO_PATH',
+ 'LIBPAM_WRAPPER_SO_PATH',
+ 'PAM_SET_ITEMS_SO_PATH',
+ 'LIBUID_WRAPPER_SO_PATH',
+ 'LIBRESOLV_WRAPPER_SO_PATH',
+]
+
+if len(sys.argv) < 3:
+ print(__doc__)
+ sys.exit(1)
+
+base_lines = list()
+base_fname = ''
+
+found_diff = False
+
+for fname in sys.argv[1:]:
+ lines = list()
+ f = open(fname, 'r')
+ for line in f:
+ if line.startswith("cfg_files ="):
+ # waf writes configuration files as absolute paths
+ continue
+ if len(line.split('=', 1)) == 2:
+ key = line.split('=', 1)[0].strip()
+ value = line.split('=', 1)[1].strip()
+ if key in exceptions:
+ continue
+ # using waf with python 3.4 seems to randomly sort dict keys
+ # we can't modify the waf code but we can fake a dict value
+ # string representation as if it were sorted. python 3.6.5
+ # doesn't seem to suffer from this behaviour
+ if value.startswith('{'):
+ import ast
+ amap = ast.literal_eval(value)
+ fakeline = ""
+ for k in sorted(amap.keys()):
+ if not len(fakeline) == 0:
+ fakeline = fakeline + ", "
+ fakeline = fakeline + '\'' + k + '\': \'' + amap[k] + '\''
+ line = key + ' = {' + fakeline + '}'
+ lines.append(line)
+ f.close()
+ if base_fname:
+ diff = list(difflib.unified_diff(base_lines, lines, base_fname, fname))
+ if diff:
+ print('configuration files %s and %s do not match' % (base_fname, fname))
+ for l in diff:
+ sys.stdout.write(l)
+ found_diff = True
+ else:
+ base_fname = fname
+ base_lines = lines
+
+if found_diff:
+ sys.exit(1)
diff --git a/script/configure_check_unused.pl b/script/configure_check_unused.pl
new file mode 100755
index 0000000..52d8dee
--- /dev/null
+++ b/script/configure_check_unused.pl
@@ -0,0 +1,124 @@
+#!/usr/bin/perl
+# Script that finds macros in a configure script that are not
+# used in a set of C files.
+# Copyright Jelmer Vernooij <jelmer@samba.org>, GPL
+#
+# Usage: ./$ARGV[0] configure.in [c-files...]
+
+use strict;
+
+sub autoconf_parse($$$$)
+{
+ my $in = shift;
+ my $defines = shift;
+ my $functions = shift;
+ my $headers = shift;
+
+ open(IN, $in) or die("Can't open $in");
+
+ my $ln = 0;
+
+ foreach(<IN>) {
+ $ln++;
+
+ if(/AC_DEFINE\(([^,]+),/) {
+ $defines->{$1} = "$in:$ln";
+ }
+
+ if(/AC_CHECK_FUNCS\(\[*(.[^],)]+)/) {
+ foreach(split / /, $1) {
+ $functions->{$_} = "$in:$ln";
+ }
+ }
+
+ if(/AC_CHECK_FUNC\(([^,)]+)/) {
+ $functions->{$1} = "$in:$ln";
+ }
+
+ if(/AC_CHECK_HEADERS\(\[*([^],)]+)/) {
+ foreach(split / /, $1) {
+ $headers->{$_} = "$in:$ln";
+ }
+ }
+
+ if(/AC_CHECK_HEADER\(([^,)]+)/) {
+ $headers->{$1} = "$in:$ln";
+ }
+
+ if(/sinclude\(([^,]+)\)/) {
+ autoconf_parse($1, $defines, $functions, $headers);
+ }
+ }
+
+ close IN;
+}
+
+# Return the symbols and headers used by a C file
+sub cfile_parse($$$)
+{
+ my $in = shift;
+ my $symbols = shift;
+ my $headers = shift;
+
+ open(FI, $in) or die("Can't open $in");
+ my $ln = 0;
+ my $line;
+ while($line = <FI>) {
+ $ln++;
+ $_ = $line;
+ if (/\#([ \t]*)include ["<]([^">]+)/) {
+ $headers->{$2} = "$in:$ln";
+ }
+
+ $_ = $line;
+ while(/([A-Za-z0-9_]+)/g) {
+ $symbols->{$1} = "$in:$ln";
+ }
+ }
+ close FI;
+}
+
+my %ac_defines = ();
+my %ac_func_checks = ();
+my %ac_headers = ();
+my %symbols = ();
+my %headers = ();
+
+if (scalar(@ARGV) <= 1) {
+ print("Usage: configure_find_unused.pl configure.in [CFILE...]\n");
+ exit 0;
+}
+
+autoconf_parse(shift(@ARGV), \%ac_defines, \%ac_func_checks, \%ac_headers);
+cfile_parse($_, \%symbols, \%headers) foreach(@ARGV);
+
+(keys %ac_defines) or warn("No defines found in configure.in file, parse error?");
+
+foreach (keys %ac_defines) {
+ if (not defined($symbols{$_})) {
+ print "$ac_defines{$_}: Autoconf-defined $_ is unused\n";
+ }
+}
+
+(keys %ac_func_checks) or warn("No function checks found in configure.in file, parse error?");
+
+foreach (keys %ac_func_checks) {
+ my $def = "HAVE_".uc($_);
+ if (not defined($symbols{$_})) {
+ print "$ac_func_checks{$_}: Autoconf-checked function `$_' is unused\n";
+ } elsif (not defined($symbols{$def})) {
+ print "$ac_func_checks{$_}: Autoconf-define `$def' for function `$_' is unused\n";
+ }
+}
+
+(keys %ac_headers) or warn("No headers found in configure.in file, parse error?");
+
+foreach (keys %ac_headers) {
+ my $def = "HAVE_".uc($_);
+ $def =~ s/[\/\.]/_/g;
+ if (not defined($headers{$_})) {
+ print "$ac_headers{$_}: Autoconf-checked header `$_' is unused\n";
+ } elsif (not defined($symbols{$def})) {
+ print "$ac_headers{$_}: Autoconf-define `$def' for header `$_' is unused\n";
+ }
+}
diff --git a/script/ctdb-import.msg-filter.sh b/script/ctdb-import.msg-filter.sh
new file mode 100755
index 0000000..107d736
--- /dev/null
+++ b/script/ctdb-import.msg-filter.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+
+set -e
+set -u
+
+cat -
+echo ""
+echo "(This used to be ctdb commit ${GIT_COMMIT})"
+
+exit 0
diff --git a/script/ctdb-import.tree-filter.sh b/script/ctdb-import.tree-filter.sh
new file mode 100755
index 0000000..00b6255
--- /dev/null
+++ b/script/ctdb-import.tree-filter.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+#
+
+set -e
+set -u
+
+lo=$(find -mindepth 1 -maxdepth 1)
+for o in $lo; do
+ mkdir -p ctdb
+ mv $o ctdb/
+done
+
+exit 0
diff --git a/script/ctdb-import.txt b/script/ctdb-import.txt
new file mode 100644
index 0000000..621b24e
--- /dev/null
+++ b/script/ctdb-import.txt
@@ -0,0 +1,5 @@
+ctdb-import.git$ git filter-branch \
+ --tree-filter /path/to/ctdb-import.tree-filter.sh \
+ --msg-filter /path/to/ctdb-import.msg-filter.sh \
+ HEAD
+
diff --git a/script/find_python.sh b/script/find_python.sh
new file mode 100755
index 0000000..5ef8368
--- /dev/null
+++ b/script/find_python.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+if [ $# -lt 1 ]; then
+ echo "$0: <installdir>"
+ exit 1
+fi
+
+installdir=$1
+exit $(find ${installdir} -name \*.py | wc -l)
diff --git a/script/findstatic.pl b/script/findstatic.pl
new file mode 100755
index 0000000..43a4916
--- /dev/null
+++ b/script/findstatic.pl
@@ -0,0 +1,70 @@
+#!/usr/bin/perl -w
+# find a list of fns and variables in the code that could be static
+# usually called with something like this:
+# findstatic.pl `find . -name "*.o"`
+# Andrew Tridgell <tridge@samba.org>
+
+use strict;
+
+# use nm to find the symbols
+my($saved_delim) = $/;
+undef $/;
+my($syms) = `nm -o @ARGV`;
+$/ = $saved_delim;
+
+my(@lines) = split(/\n/s, $syms);
+
+my(%def);
+my(%undef);
+my(%stype);
+
+my(%typemap) = (
+ "T" => "function",
+ "C" => "uninitialised variable",
+ "D" => "initialised variable"
+ );
+
+
+# parse the symbols into defined and undefined
+for (my($i)=0; $i <= $#{@lines}; $i++) {
+ my($line) = $lines[$i];
+ if ($line =~ /(.*):[a-f0-9]* ([TCD]) (.*)/) {
+ my($fname) = $1;
+ my($symbol) = $3;
+ push(@{$def{$fname}}, $symbol);
+ $stype{$symbol} = $2;
+ }
+ if ($line =~ /(.*):\s* U (.*)/) {
+ my($fname) = $1;
+ my($symbol) = $2;
+ push(@{$undef{$fname}}, $symbol);
+ }
+}
+
+# look for defined symbols that are never referenced outside the place they
+# are defined
+foreach my $f (keys %def) {
+ print "Checking $f\n";
+ my($found_one) = 0;
+ foreach my $s (@{$def{$f}}) {
+ my($found) = 0;
+ foreach my $f2 (keys %undef) {
+ if ($f2 ne $f) {
+ foreach my $s2 (@{$undef{$f2}}) {
+ if ($s2 eq $s) {
+ $found = 1;
+ $found_one = 1;
+ }
+ }
+ }
+ }
+ if ($found == 0) {
+ my($t) = $typemap{$stype{$s}};
+ print " '$s' is unique to $f ($t)\n";
+ }
+ }
+ if ($found_one == 0) {
+ print " all symbols in '$f' are unused (main program?)\n";
+ }
+}
+
diff --git a/script/generate_param.py b/script/generate_param.py
new file mode 100644
index 0000000..50f2d12
--- /dev/null
+++ b/script/generate_param.py
@@ -0,0 +1,431 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) 2014 Catalyst.Net Ltd
+#
+# Auto generate param_functions.c
+#
+# ** NOTE! The following LGPL license applies to the ldb
+# ** library. This does NOT imply that all of Samba is released
+# ** under the LGPL
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 3 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import xml.etree.ElementTree as ET
+import optparse
+
+# parse command line arguments
+parser = optparse.OptionParser()
+parser.add_option("-f", "--file", dest="filename",
+ help="input file", metavar="FILE")
+parser.add_option("-o", "--output", dest="output",
+ help='output file', metavar="FILE")
+parser.add_option("--mode", type="choice", metavar="<FUNCTIONS|S3PROTO|LIBPROTO|PARAMDEFS|PARAMTABLE>",
+ choices=["FUNCTIONS", "S3PROTO", "LIBPROTO", "PARAMDEFS", "PARAMTABLE"], default="FUNCTIONS")
+parser.add_option("--scope", metavar="<GLOBAL|LOCAL>",
+ choices=["GLOBAL", "LOCAL"], default="GLOBAL")
+
+(options, args) = parser.parse_args()
+
+if options.filename is None:
+ parser.error("No input file specified")
+if options.output is None:
+ parser.error("No output file specified")
+
+
+def iterate_all(path):
+ """Iterate and yield all the parameters.
+
+ :param path: path to parameters xml file
+ """
+
+ try:
+ with open(path, 'r') as p:
+ out = p.read()
+ except IOError as e:
+ raise Exception("Error opening parameters file")
+
+ # parse the parameters xml file
+ root = ET.fromstring(out)
+ for parameter in root:
+ name = parameter.attrib.get("name")
+ param_type = parameter.attrib.get("type")
+ context = parameter.attrib.get("context")
+ func = parameter.attrib.get("function")
+ synonym = parameter.attrib.get("synonym")
+ removed = parameter.attrib.get("removed")
+ generated = parameter.attrib.get("generated_function")
+ handler = parameter.attrib.get("handler")
+ enumlist = parameter.attrib.get("enumlist")
+ deprecated = parameter.attrib.get("deprecated")
+ synonyms = parameter.findall('synonym')
+
+ if removed == "1":
+ continue
+
+ constant = parameter.attrib.get("constant")
+ substitution = parameter.attrib.get("substitution")
+ parm = parameter.attrib.get("parm")
+ if name is None or param_type is None or context is None:
+ raise Exception("Error parsing parameter: " + name)
+ if func is None:
+ func = name.replace(" ", "_").lower()
+ if enumlist is None:
+ enumlist = "NULL"
+ if handler is None:
+ handler = "NULL"
+ yield {'name': name,
+ 'type': param_type,
+ 'context': context,
+ 'function': func,
+ 'constant': (constant == '1'),
+ 'substitution': (substitution == '1'),
+ 'parm': (parm == '1'),
+ 'synonym' : synonym,
+ 'generated' : generated,
+ 'enumlist' : enumlist,
+ 'handler' : handler,
+ 'deprecated' : deprecated,
+ 'synonyms' : synonyms }
+
+
+# map doc attributes to a section of the generated function
+context_dict = {"G": "_GLOBAL", "S": "_LOCAL"}
+param_type_dict = {
+ "boolean" : "_BOOL",
+ "list" : "_LIST",
+ "string" : "_STRING",
+ "integer" : "_INTEGER",
+ "enum" : "_INTEGER",
+ "char" : "_CHAR",
+ "boolean-auto" : "_INTEGER",
+ "cmdlist" : "_LIST",
+ "bytes" : "_INTEGER",
+ "octal" : "_INTEGER",
+ "ustring" : "_STRING",
+ }
+
+
+def generate_functions(path_in, path_out):
+ f = open(path_out, 'w')
+ try:
+ f.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
+ for parameter in iterate_all(options.filename):
+ # filter out parameteric options
+ if ':' in parameter['name']:
+ continue
+ if parameter['synonym'] == "1":
+ continue
+ if parameter['generated'] == "0":
+ continue
+
+ output_string = "FN"
+ temp = context_dict.get(parameter['context'])
+ if temp is None:
+ raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
+ output_string += temp
+ if parameter['type'] == "string" or parameter['type'] == "ustring":
+ if parameter['substitution']:
+ output_string += "_SUBSTITUTED"
+ else:
+ output_string += "_CONST"
+ if parameter['parm']:
+ output_string += "_PARM"
+ temp = param_type_dict.get(parameter['type'])
+ if temp is None:
+ raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
+ output_string += temp
+ f.write(output_string + "(" + parameter['function'] + ", " + parameter['function'] + ')\n')
+ finally:
+ f.close()
+
+
+mapping = {
+ 'boolean' : 'bool ',
+ 'string' : 'char *',
+ 'integer' : 'int ',
+ 'char' : 'char ',
+ 'list' : 'const char **',
+ 'enum' : 'int ',
+ 'boolean-auto' : 'int ',
+ 'cmdlist' : 'const char **',
+ 'bytes' : 'int ',
+ 'octal' : 'int ',
+ 'ustring' : 'char *',
+ }
+
+
+def make_s3_param_proto(path_in, path_out):
+ file_out = open(path_out, 'w')
+ try:
+ file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
+ header = get_header(path_out)
+ file_out.write("#ifndef %s\n" % header)
+ file_out.write("#define %s\n\n" % header)
+ file_out.write("struct share_params;\n")
+ file_out.write("struct loadparm_substitution;\n")
+ for parameter in iterate_all(path_in):
+ # filter out parameteric options
+ if ':' in parameter['name']:
+ continue
+ if parameter['synonym'] == "1":
+ continue
+ if parameter['generated'] == "0":
+ continue
+
+ output_string = ""
+ param_type = mapping.get(parameter['type'])
+ if param_type is None:
+ raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
+ output_string += param_type
+ output_string += "lp_%s" % parameter['function']
+
+ param = None
+ if parameter['parm']:
+ param = "const struct share_params *p"
+ else:
+ param = "int"
+
+ if parameter['type'] == 'string' or parameter['type'] == 'ustring':
+ if parameter['substitution']:
+ if parameter['context'] == 'G':
+ output_string += '(TALLOC_CTX *ctx, const struct loadparm_substitution *lp_sub);\n'
+ elif parameter['context'] == 'S':
+ output_string += '(TALLOC_CTX *ctx, const struct loadparm_substitution *lp_sub, %s);\n' % param
+ else:
+ raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
+ else:
+ if parameter['context'] == 'G':
+ output_string = 'const ' + output_string + '(void);\n'
+ elif parameter['context'] == 'S':
+ output_string = 'const ' + output_string + '(%s);\n' % param
+ else:
+ raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
+ else:
+ if parameter['context'] == 'G':
+ output_string += '(void);\n'
+ elif parameter['context'] == 'S':
+ output_string += '(%s);\n' % param
+ else:
+ raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
+
+ file_out.write(output_string)
+
+ file_out.write("\n#endif /* %s */\n\n" % header)
+ finally:
+ file_out.close()
+
+
+def make_lib_proto(path_in, path_out):
+ file_out = open(path_out, 'w')
+ try:
+ file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
+ for parameter in iterate_all(path_in):
+ # filter out parameteric options
+ if ':' in parameter['name']:
+ continue
+ if parameter['synonym'] == "1":
+ continue
+ if parameter['generated'] == "0":
+ continue
+
+ output_string = ""
+ param_type = mapping.get(parameter['type'])
+ if param_type is None:
+ raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
+ output_string += param_type
+
+ output_string += "lpcfg_%s" % parameter['function']
+
+ if parameter['type'] == 'string' or parameter['type'] == 'ustring':
+ if parameter['substitution']:
+ if parameter['context'] == 'G':
+ output_string += '(struct loadparm_context *, const struct loadparm_substitution *lp_sub, TALLOC_CTX *ctx);\n'
+ elif parameter['context'] == 'S':
+ output_string += '(struct loadparm_service *, struct loadparm_service *, TALLOC_CTX *ctx);\n'
+ else:
+ raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
+ else:
+ if parameter['context'] == 'G':
+ output_string = 'const ' + output_string + '(struct loadparm_context *);\n'
+ elif parameter['context'] == 'S':
+ output_string = 'const ' + output_string + '(struct loadparm_service *, struct loadparm_service *);\n'
+ else:
+ raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
+ else:
+ if parameter['context'] == 'G':
+ output_string += '(struct loadparm_context *);\n'
+ elif parameter['context'] == 'S':
+ output_string += '(struct loadparm_service *, struct loadparm_service *);\n'
+ else:
+ raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
+
+ file_out.write(output_string)
+ finally:
+ file_out.close()
+
+
+def get_header(path):
+ header = os.path.basename(path).upper()
+ header = header.replace(".", "_").replace("\\", "_").replace("-", "_")
+ return "__%s__" % header
+
+
+def make_param_defs(path_in, path_out, scope):
+ file_out = open(path_out, 'w')
+ try:
+ file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
+ header = get_header(path_out)
+ file_out.write("#ifndef %s\n" % header)
+ file_out.write("#define %s\n\n" % header)
+ if scope == "GLOBAL":
+ file_out.write("/**\n")
+ file_out.write(" * This structure describes global (ie., server-wide) parameters.\n")
+ file_out.write(" */\n")
+ file_out.write("struct loadparm_global \n")
+ file_out.write("{\n")
+ file_out.write("\tTALLOC_CTX *ctx; /* Context for talloced members */\n")
+ elif scope == "LOCAL":
+ file_out.write("/**\n")
+ file_out.write(" * This structure describes a single service.\n")
+ file_out.write(" */\n")
+ file_out.write("struct loadparm_service \n")
+ file_out.write("{\n")
+ file_out.write("\tbool autoloaded;\n")
+
+ for parameter in iterate_all(path_in):
+ # filter out parameteric options
+ if ':' in parameter['name']:
+ continue
+ if parameter['synonym'] == "1":
+ continue
+
+ if (scope == "GLOBAL" and parameter['context'] != "G" or
+ scope == "LOCAL" and parameter['context'] != "S"):
+ continue
+
+ output_string = "\t"
+ param_type = mapping.get(parameter['type'])
+ if param_type is None:
+ raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
+ output_string += param_type
+
+ output_string += " %s;\n" % parameter['function']
+ file_out.write(output_string)
+
+ file_out.write("LOADPARM_EXTRA_%sS\n" % scope)
+ file_out.write("};\n")
+ file_out.write("\n#endif /* %s */\n\n" % header)
+ finally:
+ file_out.close()
+
+
+type_dict = {
+ "boolean" : "P_BOOL",
+ "boolean-rev" : "P_BOOLREV",
+ "boolean-auto" : "P_ENUM",
+ "list" : "P_LIST",
+ "string" : "P_STRING",
+ "integer" : "P_INTEGER",
+ "enum" : "P_ENUM",
+ "char" : "P_CHAR",
+ "cmdlist" : "P_CMDLIST",
+ "bytes" : "P_BYTES",
+ "octal" : "P_OCTAL",
+ "ustring" : "P_USTRING",
+ }
+
+
+def make_param_table(path_in, path_out):
+ file_out = open(path_out, 'w')
+ try:
+ file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
+ header = get_header(path_out)
+ file_out.write("#ifndef %s\n" % header)
+ file_out.write("#define %s\n\n" % header)
+
+ file_out.write("struct parm_struct parm_table[] = {\n")
+
+ for parameter in iterate_all(path_in):
+ # filter out parameteric options
+ if ':' in parameter['name']:
+ continue
+ if parameter['context'] == 'G':
+ p_class = "P_GLOBAL"
+ else:
+ p_class = "P_LOCAL"
+
+ p_type = type_dict.get(parameter['type'])
+
+ if parameter['context'] == 'G':
+ temp = "GLOBAL"
+ else:
+ temp = "LOCAL"
+ offset = "%s_VAR(%s)" % (temp, parameter['function'])
+
+ enumlist = parameter['enumlist']
+ handler = parameter['handler']
+ synonym = parameter['synonym']
+ deprecated = parameter['deprecated']
+ flags_list = []
+ if synonym == "1":
+ flags_list.append("FLAG_SYNONYM")
+ if deprecated == "1":
+ flags_list.append("FLAG_DEPRECATED")
+ flags = "|".join(flags_list)
+ synonyms = parameter['synonyms']
+
+ file_out.write("\t{\n")
+ file_out.write("\t\t.label\t\t= \"%s\",\n" % parameter['name'])
+ file_out.write("\t\t.type\t\t= %s,\n" % p_type)
+ file_out.write("\t\t.p_class\t= %s,\n" % p_class)
+ file_out.write("\t\t.offset\t\t= %s,\n" % offset)
+ file_out.write("\t\t.special\t= %s,\n" % handler)
+ file_out.write("\t\t.enum_list\t= %s,\n" % enumlist)
+ if flags != "":
+ file_out.write("\t\t.flags\t\t= %s,\n" % flags)
+ file_out.write("\t},\n")
+
+ if synonyms is not None:
+ # for synonyms, we only list the synonym flag:
+ flags = "FLAG_SYNONYM"
+ for syn in synonyms:
+ file_out.write("\t{\n")
+ file_out.write("\t\t.label\t\t= \"%s\",\n" % syn.text)
+ file_out.write("\t\t.type\t\t= %s,\n" % p_type)
+ file_out.write("\t\t.p_class\t= %s,\n" % p_class)
+ file_out.write("\t\t.offset\t\t= %s,\n" % offset)
+ file_out.write("\t\t.special\t= %s,\n" % handler)
+ file_out.write("\t\t.enum_list\t= %s,\n" % enumlist)
+ if flags != "":
+ file_out.write("\t\t.flags\t\t= %s,\n" % flags)
+ file_out.write("\t},\n")
+
+ file_out.write("\n\t{ .label = NULL }\n")
+ file_out.write("};\n")
+ file_out.write("\n#endif /* %s */\n\n" % header)
+ finally:
+ file_out.close()
+
+
+if options.mode == 'FUNCTIONS':
+ generate_functions(options.filename, options.output)
+elif options.mode == 'S3PROTO':
+ make_s3_param_proto(options.filename, options.output)
+elif options.mode == 'LIBPROTO':
+ make_lib_proto(options.filename, options.output)
+elif options.mode == 'PARAMDEFS':
+ make_param_defs(options.filename, options.output, options.scope)
+elif options.mode == 'PARAMTABLE':
+ make_param_table(options.filename, options.output)
diff --git a/script/git-hooks/check-trailing-whitespace b/script/git-hooks/check-trailing-whitespace
new file mode 100755
index 0000000..4dc1a6d
--- /dev/null
+++ b/script/git-hooks/check-trailing-whitespace
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+git diff-index --cached --check HEAD -- :/*.[ch] :/*.p[ylm]
+
+if [ $? != 0 ]; then
+ echo
+ echo "The commit failed because it seems to introduce trailing whitespace"
+ echo "into C, Perl, or Python code."
+ echo
+ echo "If you are sure you want to do this, repeat the commit with the "
+ echo "--no-verify, like this:"
+ echo
+ echo " git commit --no-verify"
+ exit 1
+fi
+
+exit 0
diff --git a/script/git-hooks/pre-commit-hook b/script/git-hooks/pre-commit-hook
new file mode 100755
index 0000000..1bcb000
--- /dev/null
+++ b/script/git-hooks/pre-commit-hook
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+set -eu
+
+gitdir=$(git rev-parse --show-toplevel)
+if [ $? -ne 0 ]; then
+ echo "git rev-parse --show-toplevel failed"
+ exit 1
+fi
+
+if [ ! -f ${gitdir}/script/git-hooks/pre-commit-script ]; then
+ exit 0
+fi
+
+${gitdir}/script/git-hooks/pre-commit-script || exit $?
+
+exit 0
diff --git a/script/git-hooks/pre-commit-script b/script/git-hooks/pre-commit-script
new file mode 100755
index 0000000..8adb01c
--- /dev/null
+++ b/script/git-hooks/pre-commit-script
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+set -eu
+
+#
+# make emacs/magit work, cf
+# https://github.com/magit/magit/issues/3419
+#
+unset GIT_LITERAL_PATHSPECS
+
+gitdir=$(git rev-parse --show-toplevel)
+if [ $? -ne 0 ]; then
+ echo "git rev-parse --show-toplevel failed"
+ exit 1
+fi
+
+${gitdir}/script/git-hooks/check-trailing-whitespace || exit $?
+
+exit 0
diff --git a/script/identity_cc.sh b/script/identity_cc.sh
new file mode 100755
index 0000000..8b5118e
--- /dev/null
+++ b/script/identity_cc.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+#An "identity cross-execute" script
+#It can be used for testing the cross-build infrastructure
+#as follows:
+#./configure --cross-compile --cross-execute=./script/identity_cc.sh
+#If the build is actually a native build, then the configuration
+#result should be just like running ./configure without --cross-compile.
+
+eval "$@"
diff --git a/script/release.sh b/script/release.sh
new file mode 100755
index 0000000..b144f74
--- /dev/null
+++ b/script/release.sh
@@ -0,0 +1,1275 @@
+#!/bin/bash
+# make a release of Samba or a library
+
+LC_ALL=C
+export LC_ALL
+LANG=C
+export LANG
+LANGUAGE=C
+export LANGUAGE
+
+set -u
+set -e
+umask 0022
+
+CONF_REPO_URL="ssh://git.samba.org/data/git/samba.git"
+CONF_UPLOAD_URL="samba-bugs@download-master.samba.org:/home/data/ftp/pub"
+CONF_DOWNLOAD_URL="https://download.samba.org/pub"
+CONF_HISTORY_URL="https://www.samba.org"
+
+test -d ".git" -o -r ".git" || {
+ echo "Run this script from the top-level directory in the"
+ echo "repository"
+ exit 1
+}
+
+usage()
+{
+ echo "Usage: script/release.sh <PRODUCT> <COMMAND>"
+ echo ""
+ echo "PRODUCT: ldb, talloc, tevent, tdb, samba-rc, samba-stable"
+ echo "COMMAND: fullrelease, create, push, upload, announce"
+ echo ""
+ return 0
+}
+
+test -x "script/release.sh" || {
+ usage
+ echo "Run this script from the top-level directory in the"
+ echo "repository: as 'script/release.sh'"
+ exit 1
+}
+
+check_args()
+{
+ local cmd="$1"
+ local got_args="$2"
+ local take_args="$3"
+
+ test x"${got_args}" = x"${take_args}" || {
+ usage
+ echo "cmd[${cmd}] takes ${take_args} instead of ${got_args}"
+ return 1
+ }
+
+ return 0
+}
+
+min_args()
+{
+ local cmd="$1"
+ local got_args="$2"
+ local min_args="$3"
+
+ test "${got_args}" -ge "${min_args}" || {
+ usage
+ echo "cmd[${cmd}] takes at least ${min_args} instead of ${got_args}"
+ return 1
+ }
+
+ return 0
+}
+
+min_args "$0" "$#" "2"
+
+product="$1"
+globalcmd="$2"
+shift 2
+oldtagname=""
+tagname=""
+patchfile=""
+cmds=""
+next_cmd=""
+
+require_tagname()
+{
+ min_args "${FUNCNAME}" "$#" "1" || return 1
+ local cmd="$1"
+
+ test -n "${tagname}" || {
+ echo "cmd[${cmd}] requires '\${tagname}' variable to be set"
+ return 1
+ }
+
+ local name=$(echo "${tagname}" | cut -d '-' -f1)
+ test x"${name}" = x"${productbase}" || {
+ echo "Invalid tagname[${tgzname}]"
+ return 1
+ }
+
+ return 0
+}
+
+cmd_allowed()
+{
+ min_args "${FUNCNAME}" "$#" "2" || return 1
+ local cmd="$1"
+ shift 1
+
+ echo "$@" | grep -q "\<${cmd}\>" || {
+ return 1
+ }
+
+ return 0
+}
+
+verify_samba_rc()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -f VERSION || {
+ echo "VERSION doesn't exist"
+ return 1
+ }
+
+ grep -q 'SAMBA_VERSION_IS_GIT_SNAPSHOT=no' VERSION || {
+ echo "SAMBA_VERSION_IS_GIT_SNAPSHOT is not 'no'"
+ return 1
+ }
+
+ grep -q '^SAMBA_VERSION_RC_RELEASE=' VERSION || {
+ echo "SAMBA_VERSION_RC_RELEASE= missing"
+ return 1
+ }
+
+ grep -q '^SAMBA_VERSION_RC_RELEASE=$' VERSION && {
+ echo "SAMBA_VERSION_RC_RELEASE= missing the rc version"
+ return 1
+ }
+
+ return 0
+}
+
+load_samba_stable_versions()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -n "${version-}" && {
+ return 0
+ }
+
+ local SAMBA_VERSION_MAJOR=$(grep '^SAMBA_VERSION_MAJOR=' VERSION | cut -d '=' -f2 | xargs)
+ local SAMBA_VERSION_MINOR=$(grep '^SAMBA_VERSION_MINOR=' VERSION | cut -d '=' -f2 | xargs)
+ local SAMBA_VERSION_RELEASE=$(grep '^SAMBA_VERSION_RELEASE=' VERSION | cut -d '=' -f2 | xargs)
+
+ version="${SAMBA_VERSION_MAJOR}.${SAMBA_VERSION_MINOR}.${SAMBA_VERSION_RELEASE}"
+ tagname="${productbase}-${version}"
+
+ test ${SAMBA_VERSION_RELEASE} -gt 0 || {
+ return 0
+ }
+
+ oldversion="${SAMBA_VERSION_MAJOR}.${SAMBA_VERSION_MINOR}.$(expr ${SAMBA_VERSION_RELEASE} - 1)"
+ oldtagname="${productbase}-${oldversion}"
+ patchfile="${productbase}-${oldversion}-${version}.diffs"
+
+ return 0
+}
+
+verify_samba_stable()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -f VERSION || {
+ echo "VERSION doesn't exist"
+ return 1
+ }
+
+ grep -q 'SAMBA_VERSION_IS_GIT_SNAPSHOT=no' VERSION || {
+ echo "SAMBA_VERSION_IS_GIT_SNAPSHOT is not 'no'"
+ return 1
+ }
+
+ local VARS=""
+ VARS="${VARS} SAMBA_VERSION_REVISION"
+ VARS="${VARS} SAMBA_VERSION_TP_RELEASE"
+ VARS="${VARS} SAMBA_VERSION_ALPHA_RELEASE"
+ VARS="${VARS} SAMBA_VERSION_BETA_RELEASE"
+ VARS="${VARS} SAMBA_VERSION_PRE_RELEASE"
+ VARS="${VARS} SAMBA_VERSION_RC_RELEASE"
+ VARS="${VARS} SAMBA_VERSION_RELEASE_NICKNAME"
+ VARS="${VARS} SAMBA_VERSION_VENDOR_SUFFIX"
+ VARS="${VARS} SAMBA_VERSION_VENDOR_PATCH"
+ for var in ${VARS}; do
+ grep -q "^${var}" VERSION && {
+ grep -q "^${var}=$" VERSION || {
+ echo "${var} found in stable version"
+ return 1
+ }
+ }
+ done
+
+ load_samba_stable_versions
+
+ test -n "${oldtagname}" || {
+ return 0
+ }
+
+ local verify_out="${TMPDIR}/verify-${oldtagname}.out"
+
+ echo "Verifying oldtagname: ${oldtagname}"
+
+ git tag -v "${oldtagname}" >${verify_out} 2>&1 || {
+ echo "failed to verify old tag[${oldtagname}]"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "oldtagname[${oldtagname}] was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ echo "Verifying ${oldtagname}.tar.gz and ${oldtagname}.tar.asc"
+
+ test -f "${oldtagname}.tar.gz" || {
+ echo "${oldtagname}.tar.gz does not exist"
+ return 1
+ }
+
+ test -f "${oldtagname}.tar.asc" || {
+ echo "${oldtagname}.tar.asc does not exist"
+ return 1
+ }
+
+ zcat "${oldtagname}.tar.gz" | gpg --verify "${oldtagname}.tar.asc" - 2>${verify_out} || {
+ echo "Failed to verify ${oldtagname}.tar.asc"
+ return 1
+ }
+
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "${oldtagname}.tar.asc was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ return 0
+}
+
+verify_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -n "${verify_fn}" || {
+ echo "verify_fn variable empty"
+ return 1
+ }
+
+ echo "Running ${verify_fn}"
+ ${verify_fn}
+}
+
+create_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ echo "Releasing product ${product}"
+
+ test -n "${tagname}" && {
+ git tag -l "${tagname}" | grep -q "${tagname}" && {
+ echo "tagname[${tagname}] already exist"
+ return 1
+ }
+
+ local _tgzname="${tagname}.tar.gz"
+ test -e "${_tgzname}" && {
+ echo "_tgzname[${_tgzname}] already exist"
+ return 1
+ }
+ }
+
+ echo "Building release tarball"
+ local tgzname=$(make dist 2>&1 | grep ^Created | cut -d' ' -f2)
+ test -f "${tgzname}" || {
+ echo "Failed to create tarball"
+ return 1
+ }
+ CLEANUP_FILES="${CLEANUP_FILES} ${tgzname}"
+
+ local name=$(echo "${tgzname}" | cut -d '-' -f1)
+ test x"${name}" = x"${productbase}" || {
+ echo "Invalid tgzname[${tgzname}]"
+ return 1
+ }
+
+ local _tagname=$(basename ${tgzname} .tar.gz)
+ test -n "${tagname}" && {
+ test x"${_tagname}" = x"${tagname}" || {
+ echo "Invalid tgzname[${tgzname}]"
+ return 1
+ }
+ }
+ tagname="${_tagname}"
+
+ local tarname=$(basename ${tgzname} .gz)
+ echo "Tarball: ${tarname}"
+ gunzip -f ${tgzname} || {
+ echo "Failed to decompress tarball ${tarname}"
+ return 1
+ }
+ test -f "${tarname}" || {
+ echo "Failed to decompress tarball ${tarname}"
+ return 1
+ }
+ CLEANUP_FILES="${CLEANUP_FILES} ${tarname}"
+
+ # tagname is global
+ echo "Tagging as ${tagname}"
+ git tag -u ${GPG_KEYID} -s "${tagname}" -m "${productbase}: tag release ${tagname}" || {
+ return 1
+ }
+ CLEANUP_TAGS="${CLEANUP_TAGS} ${tagname}"
+
+ echo "Signing ${tarname} => ${tarname}.asc"
+ rm -f "${tarname}.asc"
+ gpg --default-key "${GPG_KEYID}" --detach-sign --armor ${tarname} || {
+ return 1
+ }
+ test -f "${tarname}.asc" || {
+ echo "Failed to create signature ${tarname}.asc"
+ return 1
+ }
+ CLEANUP_FILES="${CLEANUP_FILES} ${tarname}.asc"
+ echo "Compressing ${tarname} => ${tgzname}"
+ gzip -f -9 ${tarname}
+ test -f "${tgzname}" || {
+ echo "Failed to compress ${tgzname}"
+ return 1
+ }
+
+ return 0
+}
+
+patch_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ test -n "${patchfile}" || {
+ return 0
+ }
+
+ local oldpwd=$(pwd)
+ echo "Generating ${patchfile}"
+ (
+ set -e
+ set -u
+ pushd "${TMPDIR}"
+ tar xfz "${oldpwd}/${oldtagname}.tar.gz"
+ tar xfz "${oldpwd}/${tagname}.tar.gz"
+ diff -Npur "${oldtagname}/" "${tagname}/" >"${patchfile}"
+ popd
+ )
+ CLEANUP_FILES="${CLEANUP_FILES} ${patchfile}"
+ mv "${TMPDIR}/${patchfile}" "${patchfile}" || {
+ echo "failed cmd[mv ${TMPDIR}/${patchfile} ${patchfile}]"
+ return 1
+ }
+
+ echo "Signing ${patchfile} => ${patchfile}.asc"
+ rm -f "${patchfile}.asc"
+ CLEANUP_FILES="${CLEANUP_FILES} ${patchfile}.asc"
+ gpg --default-key "${GPG_KEYID}" --detach-sign --armor ${patchfile} || {
+ return 1
+ }
+ test -f "${patchfile}.asc" || {
+ echo "Failed to create signature ${patchfile}.asc"
+ return 1
+ }
+ echo "Compressing ${patchfile} => ${patchfile}.gz"
+ CLEANUP_FILES="${CLEANUP_FILES} ${patchfile}.gz"
+ gzip -f -9 ${patchfile}
+ test -f "${patchfile}.gz" || {
+ echo "Failed to compress ${patchfile}.gz"
+ return 1
+ }
+
+ return 0
+}
+
+whatsnew_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ echo "extract ${tagname}.WHATSNEW.txt"
+ tar xf ${tagname}.tar.gz --to-stdout ${tagname}/WHATSNEW.txt >${tagname}.WHATSNEW.txt
+ CLEANUP_FILES="${CLEANUP_FILES} ${tagname}.WHATSNEW.txt"
+
+ return 0
+}
+
+check_nopatch()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ local verify_out="${TMPDIR}/verify-${oldtagname}.out"
+
+ echo "Verifying tagname: ${tagname}"
+
+ git tag -v "${tagname}" >${verify_out} 2>&1 || {
+ echo "failed to verify tag[${tagname}]"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "tagname[${tagname}] was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ echo "Verifying ${tagname}.tar.gz and ${tagname}.tar.asc"
+
+ test -f "${tagname}.tar.gz" || {
+ echo "${tagname}.tar.gz does not exist"
+ return 1
+ }
+
+ test -f "${tagname}.tar.asc" || {
+ echo "${tagname}.tar.asc does not exist"
+ return 1
+ }
+
+ zcat "${tagname}.tar.gz" | gpg --verify "${tagname}.tar.asc" - 2>${verify_out} || {
+ echo "Failed to verify ${tagname}.tar.asc"
+ return 1
+ }
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "${tagname}.tar.asc was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ ls -la ${tagname}.*
+
+ return 0
+}
+
+check_samba_stable()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ load_samba_stable_versions
+
+ local verify_out="${TMPDIR}/verify-${oldtagname}.out"
+
+ echo "Verifying tagname: ${tagname}"
+
+ git tag -v "${tagname}" >${verify_out} 2>&1 || {
+ echo "failed to verify tag[${tagname}]"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "tagname[${tagname}] was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ echo "Verifying ${tagname}.tar.gz and ${tagname}.tar.asc"
+
+ test -f "${tagname}.tar.gz" || {
+ echo "${tagname}.tar.gz does not exist"
+ return 1
+ }
+
+ test -f "${tagname}.tar.asc" || {
+ echo "${tagname}.tar.asc does not exist"
+ return 1
+ }
+
+ zcat "${tagname}.tar.gz" | gpg --verify "${tagname}.tar.asc" - 2>${verify_out} || {
+ echo "Failed to verify ${tagname}.tar.asc"
+ return 1
+ }
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "${tagname}.tar.asc was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ test -n "${patchfile}" || {
+ ls -lart ${tagname}.*
+ return 0
+ }
+
+ echo "Verifying ${patchfile}.gz and ${patchfile}.asc"
+
+ test -f "${patchfile}.gz" || {
+ echo "${patchfile}.gz does not exist"
+ return 1
+ }
+
+ test -f "${patchfile}.asc" || {
+ echo "${patchfile}.asc does not exist"
+ return 1
+ }
+
+ zcat "${patchfile}.gz" | gpg --verify "${patchfile}.asc" - 2>${verify_out} || {
+ echo "Failed to verify ${patchfile}.asc"
+ return 1
+ }
+ grep -q "${GPG_KEYID}" "${verify_out}" || {
+ echo "${patchfile}.asc was not generated with GPG_KEYID[${GPG_KEYID}]!"
+ echo ""
+ cat "${verify_out}"
+ return 1
+ }
+
+ ls -lart ${tagname}.* ${patchfile}.*
+ return 0
+}
+
+check_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -n "${check_fn}" || {
+ echo "check_fn variable empty"
+ return 1
+ }
+
+ echo "Running ${check_fn}"
+ ${check_fn}
+}
+
+push_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ echo "Push git tag ${tagname} to '${repo_url}'"
+ git push "${repo_url}" "refs/tags/${tagname}:refs/tags/${tagname}" || {
+ return 1
+ }
+
+ return 0
+}
+
+upload_nopatch()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ echo "Upload ${tagname}.* to '${upload_url}'"
+ rsync -Pav --delay-updates ${tagname}.* "${upload_url}/" || {
+ return 1
+ }
+ rsync ${upload_url}/${tagname}.*
+
+ return 0
+}
+
+upload_samba_stable()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ load_samba_stable_versions
+
+ local release_url="${upload_url}samba/stable/"
+ local patch_url="${upload_url}samba/patches/"
+
+ echo "Upload ${tagname}.tar.* to '${release_url}'"
+ ls -lart ${tagname}.tar.*
+ rsync -Pav --delay-updates ${tagname}.tar.* "${release_url}/" || {
+ return 1
+ }
+ rsync ${release_url}/${tagname}.tar.*
+
+ test -n "${patchfile}" || {
+ return 0
+ }
+
+ echo "Upload ${patchfile}.* to '${patch_url}'"
+ ls -lart ${patchfile}.*
+ rsync -Pav --delay-updates ${patchfile}.* "${patch_url}/" || {
+ return 1
+ }
+ rsync ${patch_url}/${patchfile}.*
+
+ return 0
+}
+
+upload_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -n "${upload_fn}" || {
+ echo "upload_fn variable empty"
+ return 1
+ }
+
+ echo "Running ${upload_fn}"
+ ${upload_fn}
+}
+
+announcement_samba_rc()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ test -f "${tagname}.WHATSNEW.txt" || {
+ echo "${tagname}.WHATSNEW.txt does not exist"
+ return 1
+ }
+
+ local t=""
+ local version=$(echo "${tagname}" | sed -e 's!^samba-!!')
+ local href="#${version}"
+ local series=$(echo "${version}" | cut -d '.' -f1-2)
+ local rc=$(echo "${version}" | sed -e 's!.*rc\([0-9][0-9]*\)!\1!')
+ local rcname="${rc}th"
+ case "${rc}" in
+ 1)
+ rcname="first"
+ ;;
+ 2)
+ rcname="second"
+ ;;
+ 3)
+ rcname="third"
+ ;;
+ 4)
+ rcname="fourth"
+ ;;
+ 5)
+ rcname="fifth"
+ ;;
+ esac
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.to.txt"
+ {
+ echo "samba-announce@lists.samba.org, samba@lists.samba.org, samba-technical@lists.samba.org"
+ } >announce.${tagname}.to.txt
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.subject.txt"
+ {
+ echo "[Announce] Samba ${version} Available for Download"
+ } >announce.${tagname}.subject.txt
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.mail.txt"
+ {
+ cat ${tagname}.WHATSNEW.txt
+ echo ""
+ echo "================"
+ echo "Download Details"
+ echo "================"
+ echo ""
+ echo "The uncompressed tarballs and patch files have been signed"
+ echo "using GnuPG (ID ${GPG_KEYID}). The source code can be downloaded"
+ echo "from:"
+ echo ""
+ echo " ${download_url}"
+ echo ""
+ echo "The release notes are available online at:"
+ echo ""
+ echo " ${download_url}${tagname}.WHATSNEW.txt"
+ echo ""
+ echo "Our Code, Our Bugs, Our Responsibility."
+ echo "(https://bugzilla.samba.org/)"
+ echo ""
+ echo " --Enjoy"
+ echo " The Samba Team"
+ } >announce.${tagname}.mail.txt
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.mutt-arguments.txt"
+ {
+ echo -n "-i announce.${tagname}.mail.txt "
+ echo -n "-s \"$(cat announce.${tagname}.subject.txt | xargs)\" "
+ echo -n "$(cat announce.${tagname}.to.txt | xargs)"
+ } >announce.${tagname}.mutt-arguments.txt
+
+ local headlinefile="posted_news/@UTCTIME@.${version}.headline.html"
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.headline.html"
+ {
+ echo "<!-- BEGIN: ${headlinefile} -->"
+ echo "<li> @UTCDATE@ <a href=\"${href}\">Samba ${version} Available for Download</a></li>"
+ echo "<!-- END: ${headlinefile} -->"
+ } >announce.${tagname}.headline.html
+
+ local bodyfile="posted_news/@UTCTIME@.${version}.body.html"
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.body.html"
+ {
+ echo "<!-- BEGIN: ${bodyfile} -->"
+ echo "<h5><a name=\"${version}\">@UTCDATE@</a></h5>"
+ echo "<p class="headline">Samba ${version} Available for Download</p>"
+ echo "<p>"
+ echo "This is the ${rcname} release candidate of the upcoming Samba ${series} release series."
+ echo "</p>"
+ echo "<p>"
+ echo "The uncompressed tarball has been signed using GnuPG (ID ${GPG_KEYID})."
+ echo "The source code can be <a href=\"${download_url}${tagname}.tar.gz\">downloaded now</a>."
+ echo "See <a href=\"${download_url}${tagname}.WHATSNEW.txt\">the release notes for more info</a>."
+ echo "</p>"
+ echo "<!-- END: ${bodyfile} -->"
+ } >announce.${tagname}.body.html
+
+ local webrepo="${TMPDIR}/webrepo"
+
+ mkdir "${webrepo}" || {
+ return 1
+ }
+ git -C "${webrepo}" init || {
+ return 1
+ }
+
+ mkdir -p "$(dirname ${webrepo}/${headlinefile})" || {
+ return 1
+ }
+ cp -a "announce.${tagname}.headline.html" "${webrepo}/${headlinefile}" || {
+ return 1
+ }
+
+ mkdir -p "$(dirname ${webrepo}/${bodyfile})" || {
+ return 1
+ }
+ cp -a "announce.${tagname}.body.html" "${webrepo}/${bodyfile}" || {
+ return 1
+ }
+
+ git -C "${webrepo}" add "${headlinefile}" "${bodyfile}" || {
+ return 1
+ }
+ git -C "${webrepo}" commit --signoff --message "NEWS[${version}]: Samba ${version} Available for Download" || {
+ return 1
+ }
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.patch.txt"
+ git -C "${webrepo}" format-patch --stdout -1 HEAD >announce.${tagname}.patch.txt || {
+ return 1
+ }
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.todo.txt"
+ {
+ ls -lart announce.${tagname}.*
+ echo ""
+ echo "NOTICE:"
+ echo "You need to do the following manual steps in order"
+ echo "to finish the announcement of ${tagname}!"
+ echo ""
+ echo "Change to a samba-web checkout and run"
+ echo " ./announce_samba_release.sh ${version} $(pwd)/announce.${tagname}.patch.txt"
+ echo ""
+ echo "Once the resulting commit is pushed a cron job will update "
+ echo "the content exported by the webserver every 5-10 mins."
+ echo "Check https://www.samba.org"
+ echo ""
+ echo "If the web content is updated, you need to send the announce mail (gpg signed)."
+ echo "- announce.${tagname}.to.txt contains the mail's recipients for the To: header."
+ echo "- announce.${tagname}.subject.txt contains the mail's subject line."
+ echo "- announce.${tagname}.mail.txt contains the content of the mail body."
+ echo "In case your're using mutt, you can use the following shortcut:"
+ echo " eval mutt \$(cat announce.${tagname}.mutt-arguments.txt)"
+ echo ""
+ echo "NOTICE: you're not done yet! Read the above instructions carefully!"
+ echo "See: announce.${tagname}.todo.txt"
+ echo ""
+ } >announce.${tagname}.todo.txt
+
+ ls -lart announce.${tagname}.*
+ return 0
+}
+
+announcement_samba_stable()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ load_samba_stable_versions
+
+ test -f "${tagname}.tar.gz" || {
+ echo "${tagname}.tar.gz does not exist"
+ return 1
+ }
+
+ local release_url="${download_url}samba/stable/"
+ local patch_url="${download_url}samba/patches/"
+
+ echo "extract WHATSNEW.txt"
+ tar xf ${tagname}.tar.gz --to-stdout ${tagname}/WHATSNEW.txt >${TMPDIR}/WHATSNEW.txt
+
+ local t=""
+ local oldversion=$(echo "${oldtagname}" | sed -e 's!^samba-!!')
+ local version=$(echo "${tagname}" | sed -e 's!^samba-!!')
+ local href="#${version}"
+ local series=$(echo "${version}" | cut -d '.' -f1-2)
+ local release=$(echo "${version}" | cut -d '.' -f3)
+ local releasename="latest"
+ case "${release}" in
+ 1)
+ releasename="first"
+ ;;
+ *)
+ releasename="latest"
+ ;;
+ esac
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.to.txt"
+ {
+ echo "samba-announce@lists.samba.org, samba@lists.samba.org, samba-technical@lists.samba.org"
+ } >announce.${tagname}.to.txt
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.subject.txt"
+ {
+ echo "[Announce] Samba ${version} Available for Download"
+ } >announce.${tagname}.subject.txt
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.mail.txt"
+ {
+ local top=$(cat ${TMPDIR}/WHATSNEW.txt | grep -n '^Release notes for older releases follow:' | head -1 | cut -d ':' -f1)
+ test -n "${top}" || {
+ top=$(cat ${TMPDIR}/WHATSNEW.txt | wc -l)
+ }
+ local skip=$(cat ${TMPDIR}/WHATSNEW.txt | grep -n '^[^ ]' | head -1 | cut -d ':' -f1)
+ local headlimit=$(expr ${top} - 1)
+ local taillimit=$(expr ${headlimit} - \( ${skip} - 1 \))
+
+ echo ""
+ echo ""
+ echo "Release Announcements"
+ echo "---------------------"
+ echo ""
+ head -${headlimit} ${TMPDIR}/WHATSNEW.txt | tail -${taillimit}
+ echo ""
+ echo "================"
+ echo "Download Details"
+ echo "================"
+ echo ""
+ echo "The uncompressed tarballs and patch files have been signed"
+ echo "using GnuPG (ID ${GPG_KEYID}). The source code can be downloaded"
+ echo "from:"
+ echo ""
+ echo " ${release_url}"
+ echo ""
+ echo "The release notes are available online at:"
+ echo ""
+ echo " ${history_url}${tagname}.html"
+ echo ""
+ echo "Our Code, Our Bugs, Our Responsibility."
+ echo "(https://bugzilla.samba.org/)"
+ echo ""
+ echo " --Enjoy"
+ echo " The Samba Team"
+ } >announce.${tagname}.mail.txt
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.mutt-arguments.txt"
+ {
+ echo -n "-i announce.${tagname}.mail.txt "
+ echo -n "-s \"$(cat announce.${tagname}.subject.txt | xargs)\" "
+ echo -n "$(cat announce.${tagname}.to.txt | xargs)"
+ } >announce.${tagname}.mutt-arguments.txt
+
+ local htmlfile="history/${tagname}.html"
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.html"
+ {
+ local tmp=$(cat ${TMPDIR}/WHATSNEW.txt | grep -n '^Reporting bugs & Development Discussion' | head -1 | cut -d ':' -f1)
+ local lines=$(expr ${tmp} - 2)
+
+ echo '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
+ echo ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
+ echo '<html xmlns="http://www.w3.org/1999/xhtml">'
+
+ echo "<head>"
+ echo "<title>Samba ${version} - Release Notes</title>"
+ echo "</head>"
+
+ echo "<body>"
+ echo "<H2>Samba ${version} Available for Download</H2>"
+
+ echo "<p>"
+ echo "<a href=\"${release_url}${tagname}.tar.gz\">Samba ${version} (gzipped)</a><br>"
+ echo "<a href=\"${release_url}${tagname}.tar.asc\">Signature</a>"
+ echo "</p>"
+
+ test -n "${patchfile}" && {
+ echo "<p>"
+ echo "<a href=\"${patch_url}${patchfile}.gz\">Patch (gzipped) against Samba ${oldversion}</a><br>"
+ echo "<a href=\"${patch_url}${patchfile}.asc\">Signature</a>"
+ echo "</p>"
+ }
+
+ echo "<p>"
+ echo "<pre>"
+ head -${lines} ${TMPDIR}/WHATSNEW.txt | sed \
+ -e 's!&!\&amp;!g' | sed \
+ -e 's!<!\&lt;!g' \
+ -e 's!>!\&gt;!g' \
+ -e 's!ä!\&auml;!g' \
+ -e 's!Ä!\&Auml;!g' \
+ -e 's!ö!\&ouml;!g' \
+ -e 's!Ö!\&Ouml;!g' \
+ -e 's!ü!\&uuml;!g' \
+ -e 's!Ü!\&Uuml;!g' \
+ -e 's!ß!\&szlig;!g' \
+ -e 's!"!\&quot;!g' \
+ -e "s!'!\&apos;!g" |
+ cat
+ echo "</pre>"
+ echo "</p>"
+
+ echo "</body>"
+ echo "</html>"
+ } >announce.${tagname}.html
+
+ local headlinefile="posted_news/@UTCTIME@.${version}.headline.html"
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.headline.html"
+ {
+ echo "<!-- BEGIN: ${headlinefile} -->"
+ echo "<li> @UTCDATE@ <a href=\"${href}\">Samba ${version} Available for Download</a></li>"
+ echo "<!-- END: ${headlinefile} -->"
+ } >announce.${tagname}.headline.html
+
+ local bodyfile="posted_news/@UTCTIME@.${version}.body.html"
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.body.html"
+ {
+ echo "<!-- BEGIN: ${bodyfile} -->"
+ echo "<h5><a name=\"${version}\">@UTCDATE@</a></h5>"
+ echo "<p class="headline">Samba ${version} Available for Download</p>"
+ echo "<p>"
+ echo "This is the ${releasename} stable release of the Samba ${series} release series."
+ echo "</p>"
+ echo "<p>"
+ echo "The uncompressed tarball has been signed using GnuPG (ID ${GPG_KEYID})."
+ echo "The source code can be <a href=\"${release_url}${tagname}.tar.gz\">downloaded now</a>."
+ test -n "${patchfile}" && {
+ echo "A <a href=\"${patch_url}${patchfile}.gz\">patch against Samba ${oldversion}</a> is also available."
+ }
+ echo "See <a href=\"${history_url}${tagname}.html\">the release notes for more info</a>."
+ echo "</p>"
+ echo "<!-- END: ${bodyfile} -->"
+ } >announce.${tagname}.body.html
+
+ local webrepo="${TMPDIR}/webrepo"
+
+ mkdir "${webrepo}" || {
+ return 1
+ }
+ git -C "${webrepo}" init || {
+ return 1
+ }
+
+ mkdir -p "$(dirname ${webrepo}/${htmlfile})" || {
+ return 1
+ }
+ cp -a "announce.${tagname}.html" "${webrepo}/${htmlfile}" || {
+ return 1
+ }
+
+ mkdir -p "$(dirname ${webrepo}/${headlinefile})" || {
+ return 1
+ }
+ cp -a "announce.${tagname}.headline.html" "${webrepo}/${headlinefile}" || {
+ return 1
+ }
+
+ mkdir -p "$(dirname ${webrepo}/${bodyfile})" || {
+ return 1
+ }
+ cp -a "announce.${tagname}.body.html" "${webrepo}/${bodyfile}" || {
+ return 1
+ }
+
+ git -C "${webrepo}" add "${htmlfile}" "${headlinefile}" "${bodyfile}" || {
+ return 1
+ }
+ git -C "${webrepo}" commit --signoff --message "NEWS[${version}]: Samba ${version} Available for Download" || {
+ return 1
+ }
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.patch.txt"
+ git -C "${webrepo}" format-patch --stdout -1 HEAD >announce.${tagname}.patch.txt || {
+ return 1
+ }
+
+ CLEANUP_FILES="${CLEANUP_FILES} announce.${tagname}.todo.txt"
+ {
+ ls -lart announce.${tagname}.*
+ echo ""
+ echo "NOTICE:"
+ echo "You need to do the following manual steps in order"
+ echo "to finish the announcement of ${tagname}!"
+ echo ""
+ echo "Change to a samba-web checkout and run"
+ echo " ./announce_samba_release.sh ${version} $(pwd)/announce.${tagname}.patch.txt"
+ echo ""
+ echo "Once the resulting commit is pushed a cron job will update "
+ echo "the content exported by the webserver every 5-10 mins."
+ echo "Check https://www.samba.org"
+ echo ""
+ echo "If the web content is updated, you need to send the announce mail (gpg signed)."
+ echo "- announce.${tagname}.to.txt contains the mail's recipients for the To: header."
+ echo "- announce.${tagname}.subject.txt contains the mail's subject line."
+ echo "- announce.${tagname}.mail.txt contains the content of the mail body."
+ echo "In case your're using mutt, you can use the following shortcut:"
+ echo " eval mutt \$(cat announce.${tagname}.mutt-arguments.txt)"
+ echo ""
+ echo "NOTICE: you're not done yet! Read the above instructions carefully!"
+ echo "See: announce.${tagname}.todo.txt"
+ echo ""
+ } >announce.${tagname}.todo.txt
+
+ ls -lart announce.${tagname}.*
+ return 0
+}
+
+announcement_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+
+ test -n "${announcement_fn}" || {
+ echo "announcement_fn variable empty"
+ return 1
+ }
+
+ echo "Running ${announcement_fn}"
+ ${announcement_fn}
+}
+
+announce_release()
+{
+ check_args "${FUNCNAME}" "$#" "0" || return 1
+ require_tagname "${FUNCNAME}"
+
+ test -f "announce.${tagname}.todo.txt" || {
+ echo "announce.${tagname}.todo.txt does not exist"
+ return 1
+ }
+
+ cat announce.${tagname}.todo.txt
+ return 0
+}
+
+case "${product}" in
+talloc | tdb | tevent | ldb)
+ test -z "${GPG_USER-}" && {
+ GPG_USER='Samba Library Distribution Key <samba-bugs@samba.org>'
+ }
+
+ test -z "${GPG_KEYID-}" && {
+ GPG_KEYID='4793916113084025'
+ }
+
+ productbase="${product}"
+ srcdir="lib/${product}"
+ repo_url="${CONF_REPO_URL}"
+ upload_url="${CONF_UPLOAD_URL}/${product}/"
+ download_url="${CONF_DOWNLOAD_URL}/${product}/"
+
+ check_fn="check_nopatch"
+ upload_fn="upload_nopatch"
+ fullcmds="create check push upload"
+ ;;
+samba-rc)
+ test -z "${GPG_USER-}" && {
+ GPG_USER='Samba Distribution Verification Key <samba-bugs@samba.org>'
+ }
+
+ test -z "${GPG_KEYID-}" && {
+ GPG_KEYID='AA99442FB680B620'
+ }
+
+ productbase="samba"
+ srcdir="."
+ repo_url="${CONF_REPO_URL}"
+ upload_url="${CONF_UPLOAD_URL}/samba/rc/"
+ download_url="${CONF_DOWNLOAD_URL}/samba/rc/"
+
+ verify_fn="verify_samba_rc"
+ check_fn="check_nopatch"
+ upload_fn="upload_nopatch"
+ announcement_fn="announcement_samba_rc"
+ fullcmds="verify create check whatsnew announcement push upload announce"
+ ;;
+samba-stable)
+ test -z "${GPG_USER-}" && {
+ GPG_USER='Samba Distribution Verification Key <samba-bugs@samba.org>'
+ }
+
+ test -z "${GPG_KEYID-}" && {
+ GPG_KEYID='AA99442FB680B620'
+ }
+
+ productbase="samba"
+ srcdir="."
+ repo_url="${CONF_REPO_URL}"
+ upload_url="${CONF_UPLOAD_URL}/"
+ download_url="${CONF_DOWNLOAD_URL}/"
+ history_url="${CONF_HISTORY_URL}/samba/history/"
+
+ verify_fn="verify_samba_stable"
+ check_fn="check_samba_stable"
+ upload_fn="upload_samba_stable"
+ announcement_fn="announcement_samba_stable"
+ fullcmds="verify create patch check announcement push upload announce"
+ ;;
+TODO-samba-security)
+ test -z "${GPG_USER-}" && {
+ GPG_USER='Samba Distribution Verification Key <samba-bugs@samba.org>'
+ }
+
+ test -z "${GPG_KEYID-}" && {
+ GPG_KEYID='AA99442FB680B620'
+ }
+
+ productbase="samba"
+ srcdir="."
+ repo_url="${CONF_REPO_URL}"
+ upload_url="${CONF_UPLOAD_URL}/"
+ download_url="${CONF_DOWNLOAD_URL}/"
+ history_url="${CONF_HISTORY_URL}/samba/history/"
+
+ verify_fn="verify_samba_stable"
+ check_fn="check_samba_stable"
+ upload_fn="upload_samba_stable"
+ announcement_fn="announcement_samba_security"
+ fullcmds="verify create patch check announcement"
+ next_cmd="push"
+ ;;
+*)
+ usage
+ echo "Unknown product ${product}"
+ exit 1
+ ;;
+esac
+
+pushd ${srcdir} || {
+ echo "srcdir[${srcdir}] does not exist"
+ exit 1
+}
+
+trap_handler()
+{
+ echo ""
+ echo "ERROR: cleaning up"
+ echo ""
+
+ for t in ${CLEANUP_TAGS}; do
+ echo "Removing tag[${t}]"
+ git tag -v "${t}" && {
+ git tag -d "${t}" || {
+ echo "failed to remove tag ${t}"
+ }
+ }
+ done
+
+ for f in ${CLEANUP_FILES}; do
+ echo "Removing file[${f}]"
+ test -f "${f}" && {
+ rm "${f}" || {
+ echo "failed to remove ${f}"
+ }
+ }
+ done
+
+ for d in ${CLEANUP_DIRS}; do
+ echo "Removing dir[${d}]"
+ test -d "${d}" && {
+ rm -rf "${d}" || {
+ echo "failed to remove ${d}"
+ }
+ }
+ done
+}
+
+CLEANUP_TAGS=""
+CLEANUP_FILES=""
+CLEANUP_DIRS=""
+trap trap_handler INT QUIT TERM EXIT
+
+cmd_allowed "${globalcmd}" fullrelease ${fullcmds} || {
+ usage
+ echo "command[${globalcmd}] not supported for product[${product}]"
+ exit 1
+}
+
+case "${globalcmd}" in
+fullrelease)
+ check_args "${globalcmd}" "$#" "0" || exit 1
+ cmds="${fullcmds}"
+ ;;
+create)
+ check_args "${globalcmd}" "$#" "0" || exit 1
+ check_args "create" "$#" "0" || exit 1
+
+ cmds=""
+ cmd_allowed "verify" ${fullcmds} && {
+ cmds="${cmds} verify"
+ }
+ cmds="${cmds} create"
+ cmd_allowed "whatsnew" ${fullcmds} && {
+ cmds="${cmds} whatsnew"
+ }
+ cmd_allowed "patch" ${fullcmds} && {
+ cmds="${cmds} patch"
+ }
+ cmds="${cmds} check"
+ cmd_allowed "announcement" ${fullcmds} && {
+ cmds="${cmds} announcement"
+ }
+ next_cmd="push"
+ ;;
+push)
+ check_args "${globalcmd}" "$#" "1" || exit 1
+ tagname="$1"
+ cmds="check push"
+ next_cmd="upload"
+ ;;
+upload)
+ check_args "${globalcmd}" "$#" "1" || exit 1
+ tagname="$1"
+ cmds="check upload"
+ cmd_allowed "announce" ${fullcmds} && {
+ next_cmd="announce"
+ }
+ ;;
+announce)
+ check_args "${globalcmd}" "$#" "1" || exit 1
+ tagname="$1"
+ cmds="check announce"
+ ;;
+*)
+ usage
+ echo "Unknown command ${globalcmd}"
+ exit 1
+ ;;
+esac
+
+TMPDIR="release.$$"
+CLEANUP_DIRS="${CLEANUP_DIRS} ${TMPDIR}"
+umask 0077
+mkdir "${TMPDIR}"
+umask 0022
+
+for cmd in ${cmds}; do
+ echo "Starting subcommand[${cmd}]"
+ ${cmd}_release || {
+ echo "Failed subcommand[${cmd}]"
+ exit 1
+ }
+ echo "Finished subcommand[${cmd}]"
+done
+
+test -d "${TMPDIR}" && {
+ rm -rf "${TMPDIR}" || {
+ echo "failed to remove ${TMPDIR}"
+ }
+}
+
+test -n "${next_cmd}" && {
+ echo "Continue with '$0 ${product} ${next_cmd} ${tagname}'."
+}
+
+trap - INT QUIT TERM EXIT
+
+exit 0
diff --git a/script/show_test_time b/script/show_test_time
new file mode 100755
index 0000000..70d29d7
--- /dev/null
+++ b/script/show_test_time
@@ -0,0 +1,29 @@
+#!/usr/bin/python
+import optparse
+import os.path
+import subprocess
+import sys
+
+parser = optparse.OptionParser()
+parser.add_option("--limit", dest="limit", type=int,
+ help="Limit to this number of output entries.", default=0)
+(opts, args) = parser.parse_args()
+
+durations = {}
+
+cmd = "subunit-1to2 | subunit-ls --times --no-passthrough"
+
+p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=sys.stdin, shell=True)
+for l in p.stdout:
+ l = l.strip()
+ (name, duration) = l.rsplit(" ", 1)
+ durations[name] = float(duration)
+
+if opts.limit:
+ print("Top %d tests by run time:" % opts.limit)
+
+for i, (name, length) in enumerate(sorted(
+ durations.items(), key=lambda x: x[1], reverse=True)):
+ if opts.limit and i == opts.limit:
+ break
+ print("%d: %s -> %ds" % (i+1, name, length))
diff --git a/script/show_testsuite_time b/script/show_testsuite_time
new file mode 100755
index 0000000..6e5808a
--- /dev/null
+++ b/script/show_testsuite_time
@@ -0,0 +1,51 @@
+#!/usr/bin/env perl
+use Time::Local ('timegm');
+my $in = STDIN;
+use strict;
+
+my $intest=0;
+my $name;
+my $start=0;
+my $end=0;
+my %hash;
+my $fh;
+my $max=0;
+if ($#ARGV >= 0) {
+ open($fh, "<", $ARGV[0]) || die "can't open ".$ARGV[0];
+} else {
+ $fh = $in;
+}
+if ($#ARGV >= 1) {
+ $max = $ARGV[1];
+ if ($max =~ /\D/) {
+ die "not a decimal number: '$max'";
+ }
+}
+
+print "TOP $max slowest tests\n";
+
+while(<$fh>)
+{
+ if (m/^testsuite: (.*)/) {
+ $intest = 1;
+ $name = $1;
+ }
+ if (m/testsuite-\w+:/) {
+ $hash{"$name -> ".($end - $start)} = $end - $start;
+ $intest = 0;
+ $start = 0;
+ }
+ if (m/^time: (\d\d\d\d)-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d)/ && $intest) {
+ my $ts=timegm($6,$5,$4,$3,$2 - 1,$1 - 1900);
+ if ($start == 0) {
+ $start = $ts;
+ } else {
+ $end = $ts;
+ }
+ }
+}
+my @sorted = sort { $hash{$b}<=>$hash{$a} } keys(%hash);
+$max = @sorted if (($max <= 0) or ($max > @sorted));
+for my $l (@sorted[0..($max - 1)]) {
+ print $l."\n";
+}
diff --git a/script/traffic_learner b/script/traffic_learner
new file mode 100755
index 0000000..303956e
--- /dev/null
+++ b/script/traffic_learner
@@ -0,0 +1,72 @@
+#!/usr/bin/env python3
+# Generate a traffic model from a traffic summary file
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import sys
+import argparse
+
+sys.path.insert(0, "bin/python")
+from samba.emulate import traffic
+
+from samba.logger import get_samba_logger
+logger = get_samba_logger(name=__name__, level=20)
+error = logger.error
+info = logger.info
+
+def main():
+ parser = argparse.ArgumentParser(description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('-o', '--out',
+ help="write model here")
+ parser.add_argument('--dns-mode', choices=['inline', 'count'],
+ help='how to deal with DNS', default='count')
+ parser.add_argument('SUMMARY_FILE', nargs='*', type=argparse.FileType('r'),
+ default=[sys.stdin],
+ help="read from this file (default STDIN)")
+ args = parser.parse_args()
+
+ if args.out is None:
+ error("No output file was specified to write the model to.")
+ error("Please specify a filename using the --out option.")
+ return 1
+
+ try:
+ outfile = open(args.out, 'w')
+ except IOError as e:
+ error("could not open %s" % args.out)
+ error(e)
+ return 1
+
+ if args.SUMMARY_FILE is sys.stdin:
+ info("reading from STDIN...")
+
+ (conversations,
+ interval,
+ duration,
+ dns_counts) = traffic.ingest_summaries(args.SUMMARY_FILE,
+ dns_mode=args.dns_mode)
+
+ model = traffic.TrafficModel()
+ info("learning model")
+ if args.dns_mode == 'count':
+ model.learn(conversations, dns_counts)
+ else:
+ model.learn(conversations)
+
+ model.save(args.out)
+
+sys.exit(main())
diff --git a/script/traffic_replay b/script/traffic_replay
new file mode 100755
index 0000000..60b7adb
--- /dev/null
+++ b/script/traffic_replay
@@ -0,0 +1,445 @@
+#!/usr/bin/env python3
+# Generates samba network traffic
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import sys
+import os
+import optparse
+import tempfile
+import shutil
+import random
+
+sys.path.insert(0, "bin/python")
+
+from samba import gensec, get_debug_level
+from samba.emulate import traffic
+import samba.getopt as options
+from samba.logger import get_samba_logger
+from samba.samdb import SamDB
+from samba.auth import system_session
+
+
+def print_err(*args, **kwargs):
+ print(*args, file=sys.stderr, **kwargs)
+
+
+def main():
+
+ desc = ("Generates network traffic 'conversations' based on a model generated"
+ " by script/traffic_learner. This traffic is sent to <dns-hostname>,"
+ " which is the full DNS hostname of the DC being tested.")
+
+ parser = optparse.OptionParser(
+ "%prog [--help|options] <model-file> <dns-hostname>",
+ description=desc)
+
+ parser.add_option('--dns-rate', type='float', default=0,
+ help='fire extra DNS packets at this rate')
+ parser.add_option('--dns-query-file', dest="dns_query_file",
+ help='A file contains DNS query list')
+ parser.add_option('-B', '--badpassword-frequency',
+ type='float', default=0.0,
+ help='frequency of connections with bad passwords')
+ parser.add_option('-K', '--prefer-kerberos',
+ action="store_true",
+ help='prefer kerberos when authenticating test users')
+ parser.add_option('-I', '--instance-id', type='int', default=0,
+ help='Instance number, when running multiple instances')
+ parser.add_option('-t', '--timing-data',
+ help=('write individual message timing data here '
+ '(- for stdout)'))
+ parser.add_option('--preserve-tempdir', default=False, action="store_true",
+ help='do not delete temporary files')
+ parser.add_option('-F', '--fixed-password',
+ type='string', default=None,
+ help=('Password used for the test users created. '
+ 'Required'))
+ parser.add_option('-c', '--clean-up',
+ action="store_true",
+ help='Clean up the generated groups and user accounts')
+ parser.add_option('--random-seed', type='int', default=None,
+ help='Use to keep randomness consistent across multiple runs')
+ parser.add_option('--stop-on-any-error',
+ action="store_true",
+ help='abort the whole thing if a child fails')
+ model_group = optparse.OptionGroup(parser, 'Traffic Model Options',
+ 'These options alter the traffic '
+ 'generated by the model')
+ model_group.add_option('-S', '--scale-traffic', type='float',
+ help=('Increase the number of conversations by '
+ 'this factor (or use -T)'))
+ parser.add_option('-T', '--packets-per-second', type=float,
+ help=('attempt this many packets per second '
+ '(alternative to -S)'))
+ parser.add_option('--old-scale',
+ action="store_true",
+ help='emulate the old scale for traffic')
+ model_group.add_option('-D', '--duration', type='float', default=60.0,
+ help=('Run model for this long (approx). '
+ 'Default 60s for models'))
+ model_group.add_option('--latency-timeout', type='float', default=None,
+ help=('Wait this long for last packet to finish'))
+ model_group.add_option('-r', '--replay-rate', type='float', default=1.0,
+ help='Replay the traffic faster by this factor')
+ model_group.add_option('--conversation-persistence', type='float',
+ default=0.0,
+ help=('chance (0 to 1) that a conversation waits '
+ 'when it would have died'))
+ model_group.add_option('--traffic-summary',
+ help=('Generate a traffic summary file and write '
+ 'it here (- for stdout)'))
+ parser.add_option_group(model_group)
+
+ user_gen_group = optparse.OptionGroup(parser, 'Generate User Options',
+ "Add extra user/groups on the DC to "
+ "increase the DB size. These extra "
+ "users aren't used for traffic "
+ "generation.")
+ user_gen_group.add_option('-G', '--generate-users-only',
+ action="store_true",
+ help='Generate the users, but do not replay '
+ 'the traffic')
+ user_gen_group.add_option('-n', '--number-of-users', type='int', default=0,
+ help='Total number of test users to create')
+ user_gen_group.add_option('--number-of-groups', type='int', default=None,
+ help='Create this many groups')
+ user_gen_group.add_option('--average-groups-per-user',
+ type='int', default=0,
+ help='Assign the test users to this '
+ 'many groups on average')
+ user_gen_group.add_option('--group-memberships', type='int', default=0,
+ help='Total memberships to assign across all '
+ 'test users and all groups')
+ user_gen_group.add_option('--max-members', type='int', default=None,
+ help='Max users to add to any one group')
+ parser.add_option_group(user_gen_group)
+
+ sambaopts = options.SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+ parser.add_option_group(options.VersionOptions(parser))
+ credopts = options.CredentialsOptions(parser)
+ parser.add_option_group(credopts)
+
+ # the --no-password credential doesn't make sense for this tool
+ if parser.has_option('-N'):
+ parser.remove_option('-N')
+
+ opts, args = parser.parse_args()
+
+ # First ensure we have reasonable arguments
+
+ if len(args) == 1:
+ model_file = None
+ host = args[0]
+ elif len(args) == 2:
+ model_file, host = args
+ else:
+ parser.print_usage()
+ return
+
+ lp = sambaopts.get_loadparm()
+ debuglevel = get_debug_level()
+ logger = get_samba_logger(name=__name__,
+ verbose=debuglevel > 3,
+ quiet=debuglevel < 1)
+
+ traffic.DEBUG_LEVEL = debuglevel
+ # pass log level down to traffic module to make sure level is controlled
+ traffic.LOGGER.setLevel(logger.getEffectiveLevel())
+
+ if opts.clean_up:
+ logger.info("Removing user and machine accounts")
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+ ldb = traffic.openLdb(host, creds, lp)
+ traffic.clean_up_accounts(ldb, opts.instance_id)
+ exit(0)
+
+ if model_file:
+ if not os.path.exists(model_file):
+ logger.error("Model file %s doesn't exist" % model_file)
+ sys.exit(1)
+ # the model-file can be omitted for --generate-users-only and
+ # --cleanup-up, but it should be specified in all other cases
+ elif not opts.generate_users_only:
+ logger.error("No model file specified to replay traffic from")
+ sys.exit(1)
+
+ if not opts.fixed_password:
+ logger.error(("Please use --fixed-password to specify a password"
+ " for the users created as part of this test"))
+ sys.exit(1)
+
+ if opts.random_seed is not None:
+ random.seed(opts.random_seed)
+
+ creds = credopts.get_credentials(lp)
+ creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+
+ domain = creds.get_domain()
+ if domain:
+ lp.set("workgroup", domain)
+ else:
+ domain = lp.get("workgroup")
+ if domain == "WORKGROUP":
+ logger.error(("NETBIOS domain does not appear to be "
+ "specified, use the --workgroup option"))
+ sys.exit(1)
+
+ if not opts.realm and not lp.get('realm'):
+ logger.error("Realm not specified, use the --realm option")
+ sys.exit(1)
+
+ if opts.generate_users_only and not (opts.number_of_users or
+ opts.number_of_groups):
+ logger.error(("Please specify the number of users and/or groups "
+ "to generate."))
+ sys.exit(1)
+
+ if opts.group_memberships and opts.average_groups_per_user:
+ logger.error(("--group-memberships and --average-groups-per-user"
+ " are incompatible options - use one or the other"))
+ sys.exit(1)
+
+ if not opts.number_of_groups and opts.average_groups_per_user:
+ logger.error(("--average-groups-per-user requires "
+ "--number-of-groups"))
+ sys.exit(1)
+
+ if opts.number_of_groups and opts.average_groups_per_user:
+ if opts.number_of_groups < opts.average_groups_per_user:
+ logger.error(("--average-groups-per-user can not be more than "
+ "--number-of-groups"))
+ sys.exit(1)
+
+ if not opts.number_of_groups and opts.group_memberships:
+ logger.error("--group-memberships requires --number-of-groups")
+ sys.exit(1)
+
+ if opts.scale_traffic is not None and opts.packets_per_second is not None:
+ logger.error("--scale-traffic and --packets-per-second "
+ "are incompatible. Use one or the other.")
+ sys.exit(1)
+
+ if not opts.scale_traffic and not opts.packets_per_second:
+ logger.info("No packet rate specified. Using --scale-traffic=1.0")
+ opts.scale_traffic = 1.0
+
+ if opts.timing_data not in ('-', None):
+ try:
+ open(opts.timing_data, 'w').close()
+ except IOError:
+ # exception info will be added to log automatically
+ logger.exception(("the supplied timing data destination "
+ "(%s) is not writable" % opts.timing_data))
+ sys.exit()
+
+ if opts.traffic_summary not in ('-', None):
+ try:
+ open(opts.traffic_summary, 'w').close()
+ except IOError:
+ # exception info will be added to log automatically
+ if debuglevel > 0:
+ import traceback
+ traceback.print_exc()
+ logger.exception(("the supplied traffic summary destination "
+ "(%s) is not writable" % opts.traffic_summary))
+ sys.exit()
+
+ if opts.old_scale:
+ # we used to use a silly calculation based on the number
+ # of conversations; now we use the number of packets and
+ # scale traffic accurately. To roughly compare with older
+ # numbers you use --old-scale which approximates as follows:
+ opts.scale_traffic *= 0.55
+
+ # ingest the model
+ if model_file and not opts.generate_users_only:
+ model = traffic.TrafficModel()
+ try:
+ model.load(model_file)
+ except ValueError:
+ if debuglevel > 0:
+ import traceback
+ traceback.print_exc()
+ logger.error(("Could not parse %s, which does not seem to be "
+ "a model generated by script/traffic_learner."
+ % model_file))
+ sys.exit(1)
+
+ logger.info(("Using the specified model file to "
+ "generate conversations"))
+
+ if opts.scale_traffic:
+ packets_per_second = model.scale_to_packet_rate(opts.scale_traffic)
+ else:
+ packets_per_second = opts.packets_per_second
+
+ conversations = \
+ model.generate_conversation_sequences(
+ packets_per_second,
+ opts.duration,
+ opts.replay_rate,
+ opts.conversation_persistence)
+ else:
+ conversations = []
+
+ if opts.number_of_users and opts.number_of_users < len(conversations):
+ logger.error(("--number-of-users (%d) is less than the "
+ "number of conversations to replay (%d)"
+ % (opts.number_of_users, len(conversations))))
+ sys.exit(1)
+
+ number_of_users = max(opts.number_of_users, len(conversations))
+
+ if opts.number_of_groups is None:
+ opts.number_of_groups = max(int(number_of_users / 10), 1)
+
+ max_memberships = number_of_users * opts.number_of_groups
+
+ if not opts.group_memberships and opts.average_groups_per_user:
+ opts.group_memberships = opts.average_groups_per_user * number_of_users
+ logger.info(("Using %d group-memberships based on %u average "
+ "memberships for %d users"
+ % (opts.group_memberships,
+ opts.average_groups_per_user, number_of_users)))
+
+ if opts.group_memberships > max_memberships:
+ logger.error(("The group memberships specified (%d) exceeds "
+ "the total users (%d) * total groups (%d)"
+ % (opts.group_memberships, number_of_users,
+ opts.number_of_groups)))
+ sys.exit(1)
+
+ # if no groups were specified by the user, then make sure we create some
+ # group memberships (otherwise it's not really a fair test)
+ if not opts.group_memberships and not opts.average_groups_per_user:
+ opts.group_memberships = min(number_of_users * 5, max_memberships)
+
+ # Get an LDB connection.
+ try:
+ # if we're only adding users, then it's OK to pass a sam.ldb filepath
+ # as the host, which creates the users much faster. In all other cases
+ # we should be connecting to a remote DC
+ if opts.generate_users_only and os.path.isfile(host):
+ ldb = SamDB(url="ldb://{0}".format(host),
+ session_info=system_session(), lp=lp)
+ else:
+ ldb = traffic.openLdb(host, creds, lp)
+ except:
+ logger.error(("\nInitial LDAP connection failed! Did you supply "
+ "a DNS host name and the correct credentials?"))
+ sys.exit(1)
+
+ if opts.generate_users_only:
+ # generate computer accounts for added realism. Assume there will be
+ # some overhang with more computer accounts than users
+ computer_accounts = int(1.25 * number_of_users)
+ traffic.generate_users_and_groups(ldb,
+ opts.instance_id,
+ opts.fixed_password,
+ opts.number_of_users,
+ opts.number_of_groups,
+ opts.group_memberships,
+ opts.max_members,
+ machine_accounts=computer_accounts,
+ traffic_accounts=False)
+ sys.exit()
+
+ tempdir = tempfile.mkdtemp(prefix="samba_tg_")
+ logger.info("Using temp dir %s" % tempdir)
+
+ traffic.generate_users_and_groups(ldb,
+ opts.instance_id,
+ opts.fixed_password,
+ number_of_users,
+ opts.number_of_groups,
+ opts.group_memberships,
+ opts.max_members,
+ machine_accounts=len(conversations),
+ traffic_accounts=True)
+
+ accounts = traffic.generate_replay_accounts(ldb,
+ opts.instance_id,
+ len(conversations),
+ opts.fixed_password)
+
+ statsdir = traffic.mk_masked_dir(tempdir, 'stats')
+
+ if opts.traffic_summary:
+ if opts.traffic_summary == '-':
+ summary_dest = sys.stdout
+ else:
+ summary_dest = open(opts.traffic_summary, 'w')
+
+ logger.info("Writing traffic summary")
+ summaries = []
+ for c in traffic.seq_to_conversations(conversations):
+ summaries += c.replay_as_summary_lines()
+
+ summaries.sort()
+ for (time, line) in summaries:
+ print(line, file=summary_dest)
+
+ exit(0)
+
+ traffic.replay(conversations,
+ host,
+ lp=lp,
+ creds=creds,
+ accounts=accounts,
+ dns_rate=opts.dns_rate,
+ dns_query_file=opts.dns_query_file,
+ duration=opts.duration,
+ latency_timeout=opts.latency_timeout,
+ badpassword_frequency=opts.badpassword_frequency,
+ prefer_kerberos=opts.prefer_kerberos,
+ statsdir=statsdir,
+ domain=domain,
+ base_dn=ldb.domain_dn(),
+ ou=traffic.ou_name(ldb, opts.instance_id),
+ tempdir=tempdir,
+ stop_on_any_error=opts.stop_on_any_error,
+ domain_sid=ldb.get_domain_sid(),
+ instance_id=opts.instance_id)
+
+ if opts.timing_data == '-':
+ timing_dest = sys.stdout
+ elif opts.timing_data is None:
+ timing_dest = None
+ else:
+ timing_dest = open(opts.timing_data, 'w')
+
+ logger.info("Generating statistics")
+ traffic.generate_stats(statsdir, timing_dest)
+
+ if not opts.preserve_tempdir:
+ logger.info("Removing temporary directory")
+ shutil.rmtree(tempdir)
+ else:
+ # delete the empty directories anyway. There are thousands of
+ # them and they're EMPTY.
+ for d in os.listdir(tempdir):
+ if d.startswith('conversation-'):
+ path = os.path.join(tempdir, d)
+ try:
+ os.rmdir(path)
+ except OSError as e:
+ logger.info("not removing %s (%s)" % (path, e))
+
+main()
diff --git a/script/traffic_summary.pl b/script/traffic_summary.pl
new file mode 100755
index 0000000..295a320
--- /dev/null
+++ b/script/traffic_summary.pl
@@ -0,0 +1,707 @@
+#! /usr/bin/perl
+#
+# Summarise tshark pdml output into a form suitable for the load test tool
+#
+# Copyright (C) Catalyst.Net Ltd 2017
+#
+# Catalyst.Net's contribution was written by Gary Lockyer
+# <gary@catalyst.net.nz>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+use warnings;
+use strict;
+
+use Getopt::Long;
+use Pod::Usage;
+
+BEGIN {
+ unless (eval "require XML::Twig") {
+ warn "traffic_summary requires the perl module XML::Twig\n" .
+ "on Ubuntu/Debian releases run\n".
+ " sudo apt install libxml-twig-perl \n".
+ "or install from CPAN\n".
+ "\nThe reported error was:\n$@";
+ exit(1);
+ }
+}
+
+
+my %ip_map; # Map of IP address to sequence number
+my $ip_sequence = 0; # count of unique IP addresses seen
+
+
+my $timestamp; # Packet timestamp
+my $stream; # Wireshark stream number
+my $ip_proto; # IP protocol (IANA protocol number)
+my $source; # source IP address
+my $dest; # destination address
+my $proto; # application protocol name
+my $description; # protocol specific description
+my %proto_data; # protocol specific data captured for the current packet
+my $malformed_packet; # Indicates the current packet has errors
+my $ldap_filter; # cleaned ldap filter
+my $ldap_attributes; # attributes requested in an ldap query
+
+
+
+# Dispatch table mapping the wireshark variables of interest to the
+# functions responsible for processing them
+my %field_dispatch_table = (
+ 'timestamp' => \&timestamp,
+ 'ip.src' => \&ip_src,
+ 'ipv6.src' => \&ip_src,
+ 'ip.dst' => \&ip_dst,
+ 'ipv6.dst' => \&ip_dst,
+ 'ip.proto' => \&ip_proto,
+ 'udp.stream' => \&stream,
+ 'tcp.stream' => \&stream,
+ 'dns.flags.opcode' => \&field_data,
+ 'dns.flags.response' => \&field_data,
+ 'netlogon.opnum' => \&field_data,
+ 'kerberos.msg_type' => \&field_data,
+ 'smb.cmd' => \&field_data,
+ 'smb2.cmd' => \&field_data,
+ 'ldap.protocolOp' => \&field_data,
+ 'gss-api.OID' => \&field_data,
+ 'ldap.gssapi_encrypted_payload' => \&field_data,
+ 'ldap.baseObject' => \&field_data,
+ 'ldap.scope' => \&field_data,
+ 'ldap.AttributeDescription' => \&ldap_attribute,
+ 'ldap.modification_element' => \&ldap_add_modify,
+ 'ldap.AttributeList_item_element' => \&ldap_add_modify,
+ 'ldap.operation' => \&field_data,
+ 'ldap.authentication' => \&field_data,
+ 'lsarpc.opnum' => \&field_data,
+ 'samr.opnum' => \&field_data,
+ 'dcerpc.pkt_type' => \&field_data,
+ 'epm.opnum' => \&field_data,
+ 'dnsserver.opnum' => \&field_data,
+ 'drsuapi.opnum' => \&field_data,
+ 'browser.command' => \&field_data,
+ 'smb_netlogon.command' => \&field_data,
+ 'srvsvc.opnum' => \&field_data,
+ 'nbns.flags.opcode' => \&field_data,
+ 'nbns.flags.response' => \&field_data,
+ '_ws.expert.message' => \&field_data,
+);
+
+# Dispatch table mapping protocols to the routine responsible for formatting
+# their output. Protocols not in this table are ignored.
+#
+my %proto_dispatch_table = (
+ 'dns' => sub { return format_opcode( 'dns.flags.response')},
+ 'rpc_netlogon' => sub { return format_opcode( 'netlogon.opnum')},
+ 'kerberos' => \&format_kerberos,
+ 'smb' => sub { return format_opcode( 'smb.cmd')},
+ 'smb2' => sub { return format_opcode( 'smb2.cmd')},
+ 'ldap' => \&format_ldap,
+ 'cldap' => \&format_ldap,
+ 'lsarpc' => sub { return format_opcode( 'lsarpc.opnum')},
+ 'samr' => sub { return format_opcode( 'samr.opnum')},
+ 'dcerpc' => sub { return format_opcode( 'dcerpc.pkt_type')},
+ 'epm' => sub { return format_opcode( 'epm.opnum')},
+ 'dnsserver' => sub { return format_opcode( 'dnsserver.opnum')},
+ 'drsuapi' => sub { return format_opcode( 'drsuapi.opnum')},
+ 'browser' => sub { return format_opcode( 'browser.command')},
+ 'smb_netlogon' => sub { return format_opcode( 'smb_netlogon.command')},
+ 'srvsvc' => sub { return format_opcode( 'srvsvc.opnum')},
+ 'nbns' => sub { return format_opcode( 'nbns.flags.response')},
+);
+
+# XPath entry to extract the kerberos cname
+my $kerberos_cname_path =
+ 'packet/proto/field[@name = "kerberos.as_req_element"]'
+ . '/field[@name = "kerberos.req_body_element"]'
+ . '/field[@name = "kerberos.cname_element"]'
+ . '/field[@name = "kerberos.name_string"]'
+ . '/field[@name = "kerberos.KerberosString"]';
+
+# XPath entry to extract the ldap filter
+my $ldap_filter_path =
+ 'field[@name = "ldap.searchRequest_element"]/field';
+
+
+# Create an XML Twig parser and register the event handlers.
+#
+my $t = XML::Twig->new(
+ start_tag_handlers => {
+ 'packet' => \&packet_start,
+ },
+ twig_handlers => {
+ 'packet' => \&packet,
+ 'proto' => \&protocol,
+ 'field' => \&field,
+ $kerberos_cname_path => \&kerberos_cname,
+ $ldap_filter_path => \&ldap_filter,
+ },
+);
+
+#------------------------------------------------------------------------------
+# Main loop
+#
+#------------------------------------------------------------------------------
+my $help = 0;
+GetOptions( 'help|h' => \$help) or pod2usage(2);
+pod2usage(1) if $help;
+
+if (@ARGV) {
+ foreach my $file (@ARGV) {
+ eval {
+ $t->parsefile( $file);
+ };
+ if ($@) {
+ print STDERR "Unable to process $file, ".
+ "did you run tshark with the -T pdml option?";
+ }
+ }
+} else {
+ pod2usage(1) if -t STDIN;
+ eval {
+ $t->parse( \*STDIN);
+ };
+ if ($@) {
+ print STDERR "Unable to process input, ".
+ "are you running tshark with the -T pdml option?";
+ }
+}
+
+
+#------------------------------------------------------------------------------
+# New packet detected reset the globals
+#------------------------------------------------------------------------------
+sub packet_start
+{
+ my ($t, $packet) = @_;
+ $timestamp = "";
+ $stream = "";
+ $ip_proto = "";
+ $source = "";
+ $dest = "";
+ $description = undef;
+ %proto_data = ();
+ $malformed_packet = undef;
+ $ldap_filter = "";
+ $ldap_attributes = "";
+}
+
+#------------------------------------------------------------------------------
+# Complete packet element parsed from the XML feed
+# output the protocol summary if required
+#------------------------------------------------------------------------------
+sub packet
+{
+ my ($t, $packet) = @_;
+
+ my $data;
+ if (exists $proto_dispatch_table{$proto}) {
+ if ($malformed_packet) {
+ $data = "\t\t** Malformed Packet ** " . ($proto_data{'_ws.expert.message.show'} || '');
+ } else {
+ my $rsub = $proto_dispatch_table{$proto};
+ $data = &$rsub();
+ }
+ print "$timestamp\t$ip_proto\t$stream\t$source\t$dest\t$proto\t$data\n";
+ }
+ $t->purge;
+}
+
+#------------------------------------------------------------------------------
+# Complete protocol element parsed from the XML input
+# Update the protocol name
+#------------------------------------------------------------------------------
+sub protocol
+{
+ my ($t, $protocol) = @_;
+ if ($protocol->{att}->{showname}) {
+ }
+ # Tag a packet as malformed if the protocol is _ws.malformed
+ # and the hide attribute is not 'yes'
+ if ($protocol->{att}->{name} eq '_ws.malformed'
+ && !($protocol->{att}->{hide} && $protocol->{att}->{hide} eq 'yes')
+ ) {
+ $malformed_packet = 1;
+ }
+ # Don't set the protocol name if it's a wireshark malformed
+ # protocol entry, or the packet was truncated during capture
+ my $p = $protocol->{att}->{name};
+ if ($p ne '_ws.malformed' && $p ne '_ws.short') {
+ $proto = $p;
+ }
+}
+
+
+#------------------------------------------------------------------------------
+# Complete field element parsed, extract any data of interest
+#------------------------------------------------------------------------------
+sub field
+{
+ my ($t, $field) = @_;
+ my $name = $field->{att}->{name};
+
+ # Only process the field if it has a corresponding entry in
+ # %field_dispatch_table
+ if (exists $field_dispatch_table{$name}) {
+ my $rsub = $field_dispatch_table{$name};
+ &$rsub( $field);
+ }
+}
+
+#------------------------------------------------------------------------------
+# Process a timestamp field element
+#------------------------------------------------------------------------------
+sub timestamp
+{
+ my ($field) = @_;
+ $timestamp = $field->{att}->{value};
+}
+
+#------------------------------------------------------------------------------
+# Process a wireshark stream element, used to group a sequence of requests
+# and responses between two IP addresses
+#------------------------------------------------------------------------------
+sub stream
+{
+ my ($field) = @_;
+ $stream = $field->{att}->{show};
+}
+
+#------------------------------------------------------------------------------
+# Process a source ip address field, mapping the IP address to it's
+# corresponding sequence number.
+#------------------------------------------------------------------------------
+sub ip_src
+{
+ my ($field) = @_;
+ $source = map_ip( $field);
+}
+
+#------------------------------------------------------------------------------
+# Process a destination ip address field, mapping the IP address to it's
+# corresponding sequence number.
+#------------------------------------------------------------------------------
+sub ip_dst
+{
+ my ($field) = @_;
+ $dest = map_ip( $field);
+}
+
+#------------------------------------------------------------------------------
+# Process an ip protocol element, extracting IANA protocol number
+#------------------------------------------------------------------------------
+sub ip_proto
+{
+ my ($field) = @_;
+ $ip_proto = $field->{att}->{value};
+}
+
+
+
+#------------------------------------------------------------------------------
+# Extract an ldap attribute and append it to ldap_attributes
+#------------------------------------------------------------------------------
+sub ldap_attribute
+{
+ my ($field) = @_;
+ my $attribute = $field->{att}->{show};
+
+ if (defined $attribute) {
+ $ldap_attributes .= "," if $ldap_attributes;
+ $ldap_attributes .= $attribute;
+ }
+}
+
+#------------------------------------------------------------------------------
+# Process a field element, extract the value, show and showname attributes
+# and store them in the %proto_data hash.
+#
+#------------------------------------------------------------------------------
+sub field_data
+{
+ my ($field) = @_;
+ my $name = $field->{att}->{name};
+ $proto_data{$name.'.value'} = $field->{att}->{value};
+ $proto_data{$name.'.show'} = $field->{att}->{show};
+ $proto_data{$name.'.showname'} = $field->{att}->{showname};
+}
+
+#------------------------------------------------------------------------------
+# Process a kerberos cname element, if the cname ends with a $ it's a machine
+# name. Otherwise it's a user name.
+#
+#------------------------------------------------------------------------------
+sub kerberos_cname
+{
+ my ($t, $field) = @_;
+ my $cname = $field->{att}->{show};
+ my $type;
+ if( $cname =~ /\$$/) {
+ $type = 'machine';
+ } else {
+ $type = 'user';
+ }
+ $proto_data{'kerberos.cname.type'} = $type;
+}
+
+
+#------------------------------------------------------------------------------
+# Process an ldap filter, remove the values but keep the attribute names
+#------------------------------------------------------------------------------
+sub ldap_filter
+{
+ my ($t, $field) = @_;
+ if ( $field->{att}->{show} && $field->{att}->{show} =~ /^Filter:/) {
+ my $filter = $field->{att}->{show};
+
+ # extract and save the objectClass to keep the value
+ my @object_classes;
+ while ( $filter =~ m/\((objectClass=.*?)\)/g) {
+ push @object_classes, $1;
+ }
+
+ # extract and save objectCategory and the top level value
+ my @object_categories;
+ while ( $filter =~ m/(\(objectCategory=.*?,|\(objectCategory=.*?\))/g
+ ) {
+ push @object_categories, $1;
+ }
+
+ # Remove all the values from the attributes
+ # Input
+ # Filter: (nCName=DC=DomainDnsZones,DC=sub1,DC=ad,DC=rh,DC=at,DC=net)
+ # Output
+ # (nCName)
+ $filter =~ s/^Filter:\s*//; # Remove the 'Filter: ' prefix
+ $filter =~ s/=.*?\)/\)/g; # Remove from the = to the first )
+
+ # Now restore the parts of objectClass and objectCategory that are being
+ # retained
+ #
+ for my $cat (@object_categories) {
+ $filter =~ s/\(objectCategory\)/$cat/;
+ }
+
+ for my $class (@object_classes) {
+ $filter =~ s/\(objectClass\)/($class)/;
+ }
+
+ $ldap_filter = $filter;
+ } else {
+ # Ok not an ldap filter so call the default field handler
+ field( $t, $field);
+ }
+}
+
+
+#------------------------------------------------------------------------------
+# Extract the attributes from ldap modification and add requests
+#------------------------------------------------------------------------------
+sub ldap_add_modify
+{
+ my ($field) = @_;
+ my $type = $field->first_child('field[@name="ldap.type"]');
+ my $attribute = $type->{att}->{show} if $type;
+ if (defined $attribute) {
+ $ldap_attributes .= "," if $ldap_attributes;
+ $ldap_attributes .= $attribute;
+ }
+}
+#------------------------------------------------------------------------------
+# Map an IP address to a unique sequence number. Assigning it a sequence number
+# if one has not already been assigned.
+#
+#------------------------------------------------------------------------------
+sub map_ip
+{
+ my ($field) = @_;
+ my $ip = $field->{att}->{show};
+ if ( !exists( $ip_map{$ip})) {
+ $ip_sequence++;
+ $ip_map{$ip} = $ip_sequence;
+ }
+ return $ip_map{$ip};
+}
+
+#------------------------------------------------------------------------------
+# Format a protocol operation code for output.
+#
+#------------------------------------------------------------------------------
+sub format_opcode
+{
+ my ($name) = @_;
+ my $operation = $proto_data{$name.'.show'};
+ my $description = $proto_data{$name.'.showname'} || '';
+
+ # Strip off the common prefix text, and the trailing (n).
+ # This tidies up most but not all descriptions.
+ $description =~ s/^[^:]*?: ?// if $description;
+ $description =~ s/^Message is a // if $description;
+ $description =~ s/\(\d+\)\s*$// if $description;
+ $description =~ s/\s*$// if $description;
+
+ return "$operation\t$description";
+}
+
+#------------------------------------------------------------------------------
+# Format ldap protocol details for output
+#------------------------------------------------------------------------------
+sub format_ldap
+{
+ my ($name) = @_;
+ if ( exists( $proto_data{'ldap.protocolOp.show'})
+ || exists( $proto_data{'gss-api.OID.show'})
+ ) {
+ my $operation = $proto_data{'ldap.protocolOp.show'};
+ my $description = $proto_data{'ldap.protocolOp.showname'} || '';
+ my $oid = $proto_data{'gss-api.OID.show'} || '';
+ my $base_object = $proto_data{'ldap.baseObject.show'} || '';
+ my $scope = $proto_data{'ldap.scope.show'} || '';
+
+ # Now extract operation specific data
+ my $extra;
+ my $extra_desc;
+ $operation = '' if !defined $operation;
+ if ($operation eq 6) {
+ # Modify operation
+ $extra = $proto_data{'ldap.operation.show'};
+ $extra_desc = $proto_data{'ldap.operation.showname'};
+ } elsif ($operation eq 0) {
+ # Bind operation
+ $extra = $proto_data{'ldap.authentication.show'};
+ $extra_desc = $proto_data{'ldap.authentication.showname'};
+ }
+ $extra = '' if !defined $extra;
+ $extra_desc = '' if !defined $extra_desc;
+
+
+ # strip the values out of the base object
+ if ($base_object) {
+ $base_object =~ s/^<//; # leading '<' if present
+ $base_object =~ s/>$//; # trailing '>' if present
+ $base_object =~ s/=.*?,/,/g; # from = up to the next comma
+ $base_object =~ s/=.*?$//; # from = up to the end of string
+ }
+
+ # strip off the leading prefix on the extra_description
+ # and the trailing (n);
+ $extra_desc =~ s/^[^:]*?: ?// if $extra_desc;
+ $extra_desc =~ s/\(\d+\)\s*$// if $extra_desc;
+ $extra_desc =~ s/\s*$// if $extra_desc;
+
+ # strip off the common prefix on the description
+ # and the trailing (n);
+ $description =~ s/^[^:]*?: ?// if $description;
+ $description =~ s/\(\d+\)\s*$// if $description;
+ $description =~ s/\s*$// if $description;
+
+ return "$operation\t$description\t$scope\t$base_object"
+ ."\t$ldap_filter\t$ldap_attributes\t$extra\t$extra_desc\t$oid";
+ } else {
+ return "\t*** Unknown ***";
+ }
+}
+
+#------------------------------------------------------------------------------
+# Format kerberos protocol details for output.
+#------------------------------------------------------------------------------
+sub format_kerberos
+{
+ my $msg_type = $proto_data{'kerberos.msg_type.show'} || '';
+ my $cname_type = $proto_data{'kerberos.cname.type'} || '';
+ my $description = $proto_data{'kerberos.msg_type.showname'} || '';
+
+ # Tidy up the description
+ $description =~ s/^[^:]*?: ?// if $description;
+ $description =~ s/\(\d+\)\s*$// if $description;
+ $description =~ s/\s*$// if $description;
+ return "$msg_type\t$description\t$cname_type";
+}
+
+=pod
+
+=head1 NAME
+
+traffic_summary.pl - summarise tshark pdml output
+
+=head1 USAGE
+
+B<traffic_summary.pl> [FILE...]
+
+Summarise samba network traffic from tshark pdml output. Produces a tsv
+delimited summary of samba activity.
+
+To process unencrypted traffic
+
+ tshark -r capture.file -T pdml | traffic_summary.pl
+
+To process encrypted kerberos traffic
+
+ tshark -r capture.file -K krb5.keytab -o kerberos.decrypt:true -T pdml | traffic_summary.pl
+
+To display more detailed documentation, including details of the output format
+
+ perldoc traffic_summary.pl
+
+ NOTE: tshark pdml output is very verbose, so it's better to pipe the tshark
+ output directly to traffic_summary, rather than generating
+ intermediate pdml format files.
+
+=head1 OPTIONS
+ B<--help> Display usage message and exit.
+
+=head1 DESCRIPTION
+
+Summarises tshark pdml output into a format suitable for load analysis
+and input into load generation tools.
+
+It reads the pdml input from stdin or the list of files passed on the command line.
+
+
+=head2 Output format
+ The output is tab delimited fields and one line per summarised packet.
+
+=head3 Fields
+ B<timestamp> Packet timestamp
+ B<IP protocol> The IANA protocol number
+ B<Wireshark Stream Number> Calculated by wireshark groups related requests and responses
+ B<Source IP> The unique sequence number for the source IP address
+ B<Destination IP> The unique sequence number for the destination IP address
+ B<protocol> The protocol name
+ B<opcode> The protocol operation code
+ B<Description> The protocol or operation description
+ B<extra> Extra protocol specific data, may be more than one field
+
+
+=head2 IP address mapping
+ Rather than capturing and printing the IP addresses. Each unique IP address
+ seen is assigned a sequence number. So the first IP address seen will be 1,
+ the second 2 ...
+
+=head2 Packets collected
+ Packets containing the following protocol records are summarised:
+ dns
+ rpc_netlogon
+ kerberos
+ smb
+ smb2
+ ldap
+ cldap
+ lsarpc
+ samr
+ dcerpc
+ epm
+ dnsserver
+ drsuapi
+ browser
+ smb_netlogon
+ srvsvc
+ nbns
+
+ Any other packets are ignored.
+
+ In addition to the standard elements extra data is returned for the following
+ protocol record.
+
+=head3 kerberos
+ cname_type machine cname ends with a $
+ user cname does not end with a $
+
+=head3 ldap
+
+ scope Query Scope
+ 0 - Base
+ 1 - One level
+ 2 - sub tree
+ base_object ldap base object
+ ldap_filter the ldap filter, attribute names are retained but the values
+ are removed.
+ ldap_attributes ldap attributes, only the names are retained any values are
+ discarded, with the following two exceptions
+ objectClass all the attribute values are retained
+ objectCategory the top level value is retained
+ i.e. everything from the = to the first ,
+
+=head3 ldap modifiyRequest
+ In addition to the standard ldap fields the modification type is also captured
+
+ modify_operator for modifyRequests this contains the modify operation
+ 0 - add
+ 1 - delete
+ 2 - replace
+ modify_description a description of the operation if available
+
+=head3 modify bindRequest
+ In addition to the standard ldap fields details of the authentication
+ type are captured
+
+ authentication type 0 - Simple
+ 3 - SASL
+ description Description of the authentication mechanism
+ oid GSS-API OID's
+ 1.2.840.113554.1.2.2 - Kerberos v5
+ 1.2.840.48018.1.2.2 - Kerberos V5
+ (incorrect, used by old Windows versions)
+ 1.3.6.1.5.5.2 - SPNEGO
+ 1.3.6.1.5.2.5 - IAKERB
+ 1.3.6.1.4.1.311.2.2.10 - NTLM SSP
+ 1.3.6.1.5.5.14 - SCRAM-SHA-1
+ 1.3.6.1.5.5.18 - SCRAM-SHA-256
+ 1.3.6.1.5.5.15.1.1.* - GSS-EAP
+ 1.3.6.1.5.2.7 - PKU2U
+ 1.3.6.1.5.5.1.1 - SPKM-1
+ 1.3.6.1.5.5.1.2 - SPKM-2
+ 1.3.6.1.5.5.1.3 - SPKM-3
+ 1.3.6.1.5.5.9 - LIPKEY
+ 1.2.752.43.14.2 - NETLOGON
+
+=head1 DEPENDENCIES
+tshark
+XML::Twig For Ubuntu libxml-twig-perl, or from CPAN
+use Getopt::Long
+use Pod::Usage
+
+
+=head1 Diagnostics
+
+=head2 ** Unknown **
+Unable to determine the operation being performed, for ldap it typically
+indicates a kerberos encrypted operation.
+
+=head2 ** Malformed Packet **
+tshark indicated that the packet was malformed, for ldap it usually indicates TLS
+encrypted traffic.
+
+=head1 LICENSE AND COPYRIGHT
+
+ Copyright (C) Catalyst.Net Ltd 2017
+
+ Catalyst.Net's contribution was written by Gary Lockyer
+ <gary@catalyst.net.nz>.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+=cut