diff options
Diffstat (limited to 'source4/scripting/bin')
26 files changed, 6460 insertions, 0 deletions
diff --git a/source4/scripting/bin/enablerecyclebin b/source4/scripting/bin/enablerecyclebin new file mode 100755 index 0000000..3477f90 --- /dev/null +++ b/source4/scripting/bin/enablerecyclebin @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# +# enabled the Recycle Bin optional feature +# +import optparse +import sys + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import samba +from samba import getopt as options, Ldb +from ldb import SCOPE_BASE +import sys +import ldb +from samba.auth import system_session + +parser = optparse.OptionParser("enablerecyclebin <URL>") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) +parser.add_option_group(options.VersionOptions(parser)) + +opts, args = parser.parse_args() +opts.dump_all = True + +if len(args) != 1: + parser.print_usage() + sys.exit(1) + +url = args[0] + +lp_ctx = sambaopts.get_loadparm() + +creds = credopts.get_credentials(lp_ctx) +sam_ldb = Ldb(url, session_info=system_session(), credentials=creds, lp=lp_ctx) + +# get the rootDSE +res = sam_ldb.search(base="", expression="", scope=SCOPE_BASE, attrs=["configurationNamingContext"]) +rootDse = res[0] + +configbase=rootDse["configurationNamingContext"] + +# enable the feature +msg = ldb.Message() +msg.dn = ldb.Dn(sam_ldb, "") +msg["enableOptionalFeature"] = ldb.MessageElement( + "CN=Partitions," + str(configbase) + ":766ddcd8-acd0-445e-f3b9-a7f9b6744f2a", + ldb.FLAG_MOD_ADD, "enableOptionalFeature") +res = sam_ldb.modify(msg) + +print("Recycle Bin feature enabled") diff --git a/source4/scripting/bin/findprovisionusnranges b/source4/scripting/bin/findprovisionusnranges new file mode 100755 index 0000000..b05b5ce --- /dev/null +++ b/source4/scripting/bin/findprovisionusnranges @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# +# Helper for determining USN ranges created of modified by provision and +# upgradeprovision. +# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2011 +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +import sys +import optparse +sys.path.insert(0, "bin/python") + +from samba.credentials import DONT_USE_KERBEROS +from samba.auth import system_session +from samba import Ldb +import ldb + +import samba.getopt as options +from samba import param +from samba.upgradehelpers import get_paths, print_provision_ranges, findprovisionrange +from samba.ndr import ndr_unpack +from samba.dcerpc import misc + +parser = optparse.OptionParser("findprovisionusnranges [options]") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +parser.add_option("--storedir", type="string", help="Directory where to store result files") +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) +opts = parser.parse_args()[0] +lp = sambaopts.get_loadparm() +smbconf = lp.configfile + +creds = credopts.get_credentials(lp) +creds.set_kerberos_state(DONT_USE_KERBEROS) +session = system_session() +paths = get_paths(param, smbconf=smbconf) +basedn="DC=" + lp.get("realm").replace(".",",DC=") +samdb = Ldb(paths.samdb, session_info=session, credentials=creds,lp=lp) + +res = samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"]) + +invocation = None +if res and len(res) == 1 and res[0]["dsServiceName"] != None: + dn = ldb.Dn(samdb, str(res[0]["dsServiceName"])) + res = samdb.search(base=str(dn), scope=ldb.SCOPE_BASE, attrs=["invocationId"], + controls=["search_options:1:2"]) + + if res and len(res) == 1 and res[0]["invocationId"]: + invocation = str(ndr_unpack(misc.GUID, res[0]["invocationId"][0])) + else: + print("Unable to find invocation ID") + sys.exit(1) +else: + print("Unable to find attribute dsServiceName in rootDSE") + sys.exit(1) + +minobj = 5 +(hash_id, nb_obj) = findprovisionrange(samdb, basedn) +print("Here is a list of changes that modified more than %d objects in 1 minute." % minobj) +print("Usually changes made by provision and upgradeprovision are those who affect a couple" + " of hundred of objects or more") +print("Total number of objects: %d\n" % nb_obj) + +print_provision_ranges(hash_id, minobj, opts.storedir, str(paths.samdb), invocation) diff --git a/source4/scripting/bin/gen_error_common.py b/source4/scripting/bin/gen_error_common.py new file mode 100644 index 0000000..aa71afa --- /dev/null +++ b/source4/scripting/bin/gen_error_common.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# +# Unix SMB/CIFS implementation. +# +# Utility methods for generating error codes from a file. +# +# Copyright (C) Noel Power <noel.power@suse.com> 2014 +# Copyright (C) Catalyst IT Ltd. 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# error data model +class ErrorDef: + def __init__(self): + self.err_code = None + self.err_define = None + self.err_string = "" + self.linenum = "" + +def escapeString( input ): + output = input.replace('"','\\"') + output = output.replace("\\<","\\\\<") + output = output.replace('\t',"") + return output + +# Parse error descriptions from a file which is the content +# of an HTML table. +# The file must be formatted as: +# [error code hex] +# [error name short] +# [error description] +# Blank lines are allowed and errors do not have to have a +# description. +# Returns a list of ErrorDef objects. +def parseErrorDescriptions( file_contents, isWinError, transformErrorFunction ): + errors = [] + count = 0 + for line in file_contents: + if line is None or line == '\t' or line == "" or line == '\n': + continue + content = line.strip().split(None,1) + # start new error definition ? + if line.startswith("0x"): + newError = ErrorDef() + newError.err_code = int(content[0],0) + # escape the usual suspects + if len(content) > 1: + newError.err_string = escapeString(content[1]) + newError.linenum = count + newError.isWinError = isWinError + errors.append(newError) + else: + if len(errors) == 0: + continue + err = errors[-1] + if err.err_define is None: + err.err_define = transformErrorFunction(content[0]) + else: + if len(content) > 0: + desc = escapeString(line.strip()) + if len(desc): + if err.err_string == "": + err.err_string = desc + else: + err.err_string = err.err_string + " " + desc + count = count + 1 + print("parsed %d lines generated %d error definitions"%(count,len(errors))) + return errors + diff --git a/source4/scripting/bin/gen_hresult.py b/source4/scripting/bin/gen_hresult.py new file mode 100755 index 0000000..6a75c37 --- /dev/null +++ b/source4/scripting/bin/gen_hresult.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python3 + +# +# Unix SMB/CIFS implementation. +# +# HRESULT Error definitions +# +# Copyright (C) Noel Power <noel.power@suse.com> 2014 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + + +import sys, os.path, io, string + +# parsed error data +Errors = [] + +# error data model +class ErrorDef: + + def __init__(self): + self.err_code = "" + self.err_define = None + self.err_string = "" + self.isWinError = False + self.linenum = "" + +def escapeString( input ): + output = input.replace('"','\\"') + output = output.replace("\\<","\\\\<") + output = output.replace('\t',"") + return output + +def parseErrorDescriptions( input_file, isWinError ): + # read in the data + fileContents = open(input_file,"r") + count = 0; + for line in fileContents: + content = line.strip().split(None,1) + # start new error definition ? + if len(content) == 0: + continue + if line.startswith("0x"): + newError = ErrorDef() + newError.err_code = content[0] + # escape the usual suspects + if len(content) > 1: + newError.err_string = escapeString(content[1]) + newError.linenum = count + newError.isWinError = isWinError + Errors.append(newError) + else: + if len(Errors) == 0: + print("Error parsing file as line %d"%count) + sys.exit() + err = Errors[-1] + if err.err_define is None: + err.err_define = "HRES_" + content[0] + else: + if len(content) > 0: + desc = escapeString(line.strip()) + if len(desc): + if err.err_string == "": + err.err_string = desc + else: + err.err_string = err.err_string + " " + desc + count = count + 1 + fileContents.close() + print("parsed %d lines generated %d error definitions"%(count,len(Errors))) + +def write_license(out_file): + out_file.write("/*\n") + out_file.write(" * Unix SMB/CIFS implementation.\n") + out_file.write(" *\n") + out_file.write(" * HRESULT Error definitions\n") + out_file.write(" *\n") + out_file.write(" * Copyright (C) Noel Power <noel.power@suse.com> 2014\n") + out_file.write(" *\n") + out_file.write(" * This program is free software; you can redistribute it and/or modify\n") + out_file.write(" * it under the terms of the GNU General Public License as published by\n") + out_file.write(" * the Free Software Foundation; either version 3 of the License, or\n") + out_file.write(" * (at your option) any later version.\n") + out_file.write(" *\n") + out_file.write(" * This program is distributed in the hope that it will be useful,\n") + out_file.write(" * but WITHOUT ANY WARRANTY; without even the implied warranty of\n") + out_file.write(" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n") + out_file.write(" * GNU General Public License for more details.\n") + out_file.write(" *\n") + out_file.write(" * You should have received a copy of the GNU General Public License\n") + out_file.write(" * along with this program. If not, see <http://www.gnu.org/licenses/>.\n") + out_file.write(" */\n") + out_file.write("\n") + +def generateHeaderFile(out_file): + write_license(out_file) + out_file.write("#ifndef _HRESULT_H_\n") + out_file.write("#define _HRESULT_H_\n\n") + macro_magic = "#if defined(HAVE_IMMEDIATE_STRUCTURES)\n" + macro_magic += "typedef struct {uint32_t h;} HRESULT;\n" + macro_magic += "#define HRES_ERROR(x) ((HRESULT) { x })\n" + macro_magic += "#define HRES_ERROR_V(x) ((x).h)\n" + macro_magic += "#else\n" + macro_magic += "typedef uint32_t HRESULT;\n" + macro_magic += "#define HRES_ERROR(x) (x)\n" + macro_magic += "#define HRES_ERROR_V(x) (x)\n" + macro_magic += "#endif\n" + macro_magic += "\n" + macro_magic += "#define HRES_IS_OK(x) (HRES_ERROR_V(x) == 0)\n" + macro_magic += "#define HRES_IS_EQUAL(x,y) (HRES_ERROR_V(x) == HRES_ERROR_V(y))\n" + + out_file.write(macro_magic) + out_file.write("\n\n") + out_file.write("/*\n") + out_file.write(" * The following error codes are autogenerated from [MS-ERREF]\n") + out_file.write(" * see http://msdn.microsoft.com/en-us/library/cc704587.aspx\n") + out_file.write(" */\n") + out_file.write("\n") + + for err in Errors: + line = "#define {0:49} HRES_ERROR({1})\n".format(err.err_define ,err.err_code) + out_file.write(line) + out_file.write("\nconst char *hresult_errstr_const(HRESULT err_code);\n") + out_file.write("\nconst char *hresult_errstr(HRESULT err_code);\n") + out_file.write("\n#define FACILITY_WIN32 0x0007\n") + out_file.write("#define WIN32_FROM_HRESULT(x) (HRES_ERROR_V(x) == 0 ? HRES_ERROR_V(x) : ~((FACILITY_WIN32 << 16) | 0x80000000) & HRES_ERROR_V(x))\n") + out_file.write("#define HRESULT_IS_LIKELY_WERR(x) ((HRES_ERROR_V(x) & 0xFFFF0000) == 0x80070000)\n") + out_file.write("#define HRESULT_FROM_WERROR(x) (HRES_ERROR(0x80070000 | W_ERROR_V(x)))\n") + out_file.write("\n\n\n#endif /*_HRESULT_H_*/") + + +def generateSourceFile(out_file): + write_license(out_file) + out_file.write("#include \"includes.h\"\n") + out_file.write("#include \"hresult.h\"\n") + out_file.write("/*\n") + out_file.write(" * The following error codes and descriptions are autogenerated from [MS-ERREF]\n") + out_file.write(" * see http://msdn.microsoft.com/en-us/library/cc704587.aspx\n") + out_file.write(" */\n") + out_file.write("\n") + out_file.write("static const struct {\n") + out_file.write(" HRESULT error_code;\n") + out_file.write(" const char *error_str;\n") + out_file.write(" const char *error_message;\n") + out_file.write("} hresult_errs[] = {\n") + + for err in Errors: + out_file.write(" {\n") + if err.isWinError: + out_file.write(" HRESULT_FROM_WIN32(%s),\n"%err.err_define) + out_file.write(" \"HRESULT_FROM_WIN32(%s)\",\n"%err.err_define) + else: + out_file.write(" %s,\n"%err.err_define) + out_file.write(" \"%s\",\n"%err.err_define) + out_file.write(" \"%s\"\n"%err.err_string) + out_file.write(" },\n") + out_file.write("};\n") + out_file.write("\n") + out_file.write("const char *hresult_errstr_const(HRESULT err_code)\n") + out_file.write("{\n"); + out_file.write(" const char *result = NULL;\n") + out_file.write(" int i;\n") + out_file.write(" for (i = 0; i < ARRAY_SIZE(hresult_errs); ++i) {\n") + out_file.write(" if (HRES_IS_EQUAL(err_code, hresult_errs[i].error_code)) {\n") + out_file.write(" result = hresult_errs[i].error_message;\n") + out_file.write(" break;\n") + out_file.write(" }\n") + out_file.write(" }\n") + out_file.write(" /* convert & check win32 error space? */\n") + out_file.write(" if (result == NULL && HRESULT_IS_LIKELY_WERR(err_code)) {\n") + out_file.write(" WERROR wErr = W_ERROR(WIN32_FROM_HRESULT(err_code));\n") + out_file.write(" result = get_friendly_werror_msg(wErr);\n") + out_file.write(" }\n") + out_file.write(" return result;\n") + out_file.write("};\n") + out_file.write("\n") + out_file.write("const char *hresult_errstr(HRESULT err_code)\n") + out_file.write("{\n"); + out_file.write(" static char msg[22];\n") + out_file.write(" int i;\n") + out_file.write("\n") + out_file.write(" for (i = 0; i < ARRAY_SIZE(hresult_errs); i++) {\n") + out_file.write(" if (HRES_IS_EQUAL(err_code, hresult_errs[i].error_code)) {\n") + out_file.write(" return hresult_errs[i].error_str;\n") + out_file.write(" }\n") + out_file.write(" }\n") + out_file.write(" snprintf(msg, sizeof(msg), \"HRES code 0x%08x\", HRES_ERROR_V(err_code));\n") + out_file.write(" return msg;\n") + out_file.write("};\n") + +# Very simple script to generate files hresult.c & hresult.h +# The script simply takes a text file as input, format of input file is +# very simple and is just the content of a html table ( such as that found +# in http://msdn.microsoft.com/en-us/library/cc704587.aspx ) copied and +# pasted into a text file + +def main (): + input_file1 = None; + filename = "hresult" + headerfile_name = filename + ".h" + sourcefile_name = filename + ".c" + if len(sys.argv) > 1: + input_file1 = sys.argv[1] + else: + print("usage: %s winerrorfile"%(sys.argv[0])) + sys.exit() + + parseErrorDescriptions(input_file1, False) + out_file = open(headerfile_name,"w") + generateHeaderFile(out_file) + out_file.close() + out_file = open(sourcefile_name,"w") + generateSourceFile(out_file) + +if __name__ == '__main__': + + main() diff --git a/source4/scripting/bin/gen_ntstatus.py b/source4/scripting/bin/gen_ntstatus.py new file mode 100755 index 0000000..b4a9bfc --- /dev/null +++ b/source4/scripting/bin/gen_ntstatus.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +# +# Unix SMB/CIFS implementation. +# +# HRESULT Error definitions +# +# Copyright (C) Noel Power <noel.power@suse.com> 2014 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import sys, os.path, io, string +from gen_error_common import parseErrorDescriptions, ErrorDef + +def generateHeaderFile(out_file, errors): + out_file.write("/*\n") + out_file.write(" * Descriptions for errors generated from\n") + out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n") + out_file.write(" */\n\n") + out_file.write("#ifndef _NTSTATUS_GEN_H\n") + out_file.write("#define _NTSTATUS_GEN_H\n") + for err in errors: + line = "#define %s NT_STATUS(%#x)\n" % (err.err_define, err.err_code) + out_file.write(line) + out_file.write("\n#endif /* _NTSTATUS_GEN_H */\n") + +def generateSourceFile(out_file, errors): + out_file.write("/*\n") + out_file.write(" * Names for errors generated from\n") + out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n") + out_file.write(" */\n") + + out_file.write("static const nt_err_code_struct nt_errs[] = \n") + out_file.write("{\n") + for err in errors: + out_file.write("\t{ \"%s\", %s },\n" % (err.err_define, err.err_define)) + out_file.write("{ 0, NT_STATUS(0) }\n") + out_file.write("};\n") + + out_file.write("\n/*\n") + out_file.write(" * Descriptions for errors generated from\n") + out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n") + out_file.write(" */\n") + + out_file.write("static const nt_err_code_struct nt_err_desc[] = \n") + out_file.write("{\n") + for err in errors: + # Account for the possibility that some errors may not have descriptions + if err.err_string == "": + continue + out_file.write("\t{ N_(\"%s\"), %s },\n"%(err.err_string, err.err_define)) + out_file.write("{ 0, NT_STATUS(0) }\n") + out_file.write("};") + +def generatePythonFile(out_file, errors): + out_file.write("/*\n") + out_file.write(" * New descriptions for existing errors generated from\n") + out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n") + out_file.write(" */\n") + out_file.write("#include <Python.h>\n") + out_file.write("#include \"python/py3compat.h\"\n") + out_file.write("#include \"includes.h\"\n\n") + # This is needed to avoid a missing prototype error from the C + # compiler. There is never a prototype for this function, it is a + # module loaded by python with dlopen() and found with dlsym(). + out_file.write("static struct PyModuleDef moduledef = {\n") + out_file.write("\tPyModuleDef_HEAD_INIT,\n") + out_file.write("\t.m_name = \"ntstatus\",\n") + out_file.write("\t.m_doc = \"NTSTATUS error defines\",\n") + out_file.write("\t.m_size = -1,\n") + out_file.write("};\n\n") + out_file.write("MODULE_INIT_FUNC(ntstatus)\n") + out_file.write("{\n") + out_file.write("\tPyObject *m;\n\n") + out_file.write("\tm = PyModule_Create(&moduledef);\n"); + out_file.write("\tif (m == NULL)\n"); + out_file.write("\t\treturn NULL;\n\n"); + for err in errors: + line = """\tPyModule_AddObject(m, \"%s\", + \t\tPyLong_FromUnsignedLongLong(NT_STATUS_V(%s)));\n""" % (err.err_define, err.err_define) + out_file.write(line) + out_file.write("\n"); + out_file.write("\treturn m;\n"); + out_file.write("}\n"); + +def transformErrorName( error_name ): + if error_name.startswith("STATUS_"): + error_name = error_name.replace("STATUS_", "", 1) + elif error_name.startswith("RPC_NT_"): + error_name = error_name.replace("RPC_NT_", "RPC_", 1) + elif error_name.startswith("EPT_NT_"): + error_name = error_name.replace("EPT_NT_", "EPT_", 1) + return "NT_STATUS_" + error_name + +# Very simple script to generate files nterr_gen.c & ntstatus_gen.h. +# These files contain generated definitions. +# This script takes four inputs: +# [1]: The name of the text file which is the content of an HTML table +# (e.g. the one found at http://msdn.microsoft.com/en-us/library/cc231200.aspx) +# copied and pasted. +# [2]: The name of the output generated header file with NTStatus #defines +# [3]: The name of the output generated source file with C arrays +# [4]: The name of the output generated python file +def main (): + input_file = None; + + if len(sys.argv) == 5: + input_file = sys.argv[1] + gen_headerfile_name = sys.argv[2] + gen_sourcefile_name = sys.argv[3] + gen_pythonfile_name = sys.argv[4] + else: + print("usage: %s winerrorfile headerfile sourcefile pythonfile" % (sys.argv[0])) + sys.exit() + + # read in the data + file_contents = io.open(input_file, "rt", encoding='utf8') + + errors = parseErrorDescriptions(file_contents, False, transformErrorName) + + print("writing new header file: %s" % gen_headerfile_name) + out_file = io.open(gen_headerfile_name, "wt", encoding='utf8') + generateHeaderFile(out_file, errors) + out_file.close() + print("writing new source file: %s" % gen_sourcefile_name) + out_file = io.open(gen_sourcefile_name, "wt", encoding='utf8') + generateSourceFile(out_file, errors) + out_file.close() + print("writing new python file: %s" % gen_pythonfile_name) + out_file = io.open(gen_pythonfile_name, "wt", encoding='utf8') + generatePythonFile(out_file, errors) + out_file.close() + +if __name__ == '__main__': + + main() diff --git a/source4/scripting/bin/gen_output.py b/source4/scripting/bin/gen_output.py new file mode 100755 index 0000000..8f5239f --- /dev/null +++ b/source4/scripting/bin/gen_output.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +# Copyright (C) Catalyst IT Ltd. 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +""" +A data generator to help tests. + +Generate large output to stdout by repeating input data. +Usage: + + python gen_output.py --data @ --repeat 1024 --retcode 1 + +The above command will output @ x 1024 (1K) and exit with 1. +""" + +import sys +import argparse + +parser = argparse.ArgumentParser(description='Generate output data') + +parser.add_argument( + '--data', type=str, default='$', + help='Characters used to generate data by repeating them' +) + +parser.add_argument( + '--repeat', type=int, default=1024 * 1024, + help='How many times to repeat the data' +) + +parser.add_argument( + '--retcode', type=int, default=0, + help='Specify the exit code for this script' +) + +args = parser.parse_args() + +sys.stdout.write(args.data * args.repeat) + +sys.exit(args.retcode) diff --git a/source4/scripting/bin/gen_werror.py b/source4/scripting/bin/gen_werror.py new file mode 100755 index 0000000..1ac9d33 --- /dev/null +++ b/source4/scripting/bin/gen_werror.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 + +# +# Unix SMB/CIFS implementation. +# +# WERROR error definition generation +# +# Copyright (C) Catalyst.Net Ltd. 2017 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import sys, os.path, io, string +from gen_error_common import parseErrorDescriptions, ErrorDef + +def generateHeaderFile(out_file, errors): + out_file.write("/*\n") + out_file.write(" * Descriptions for errors generated from\n") + out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n") + out_file.write(" */\n\n") + out_file.write("#ifndef _WERR_GEN_H\n") + out_file.write("#define _WERR_GEN_H\n") + for err in errors: + line = "#define %s W_ERROR(%s)\n" % (err.err_define, hex(err.err_code)) + out_file.write(line) + out_file.write("\n#endif /* _WERR_GEN_H */\n") + +def generateSourceFile(out_file, errors): + out_file.write("#include \"werror.h\"\n") + + out_file.write("/*\n") + out_file.write(" * Names for errors generated from\n") + out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n") + out_file.write(" */\n") + + out_file.write("static const struct werror_code_struct dos_errs[] = \n") + out_file.write("{\n") + for err in errors: + out_file.write("\t{ \"%s\", %s },\n" % (err.err_define, err.err_define)) + out_file.write("{ 0, W_ERROR(0) }\n") + out_file.write("};\n") + + out_file.write("\n/*\n") + out_file.write(" * Descriptions for errors generated from\n") + out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n") + out_file.write(" */\n") + + out_file.write("static const struct werror_str_struct dos_err_strs[] = \n") + out_file.write("{\n") + for err in errors: + # Account for the possibility that some errors may not have descriptions + if err.err_string == "": + continue + out_file.write("\t{ %s, \"%s\" },\n"%(err.err_define, err.err_string)) + out_file.write("\t{ W_ERROR(0), 0 }\n") + out_file.write("};") + +def generatePythonFile(out_file, errors): + out_file.write("/*\n") + out_file.write(" * Errors generated from\n") + out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n") + out_file.write(" */\n") + out_file.write("#include <Python.h>\n") + out_file.write("#include \"python/py3compat.h\"\n") + out_file.write("#include \"includes.h\"\n\n") + # This is needed to avoid a missing prototype error from the C + # compiler. There is never a prototype for this function, it is a + # module loaded by python with dlopen() and found with dlsym(). + out_file.write("static struct PyModuleDef moduledef = {\n") + out_file.write("\tPyModuleDef_HEAD_INIT,\n") + out_file.write("\t.m_name = \"werror\",\n") + out_file.write("\t.m_doc = \"WERROR defines\",\n") + out_file.write("\t.m_size = -1,\n") + out_file.write("};\n\n") + out_file.write("MODULE_INIT_FUNC(werror)\n") + out_file.write("{\n") + out_file.write("\tPyObject *m;\n\n") + out_file.write("\tm = PyModule_Create(&moduledef);\n"); + out_file.write("\tif (m == NULL)\n"); + out_file.write("\t\treturn NULL;\n\n"); + for err in errors: + line = """\tPyModule_AddObject(m, \"%s\", + \t\tPyLong_FromUnsignedLongLong(W_ERROR_V(%s)));\n""" % (err.err_define, err.err_define) + out_file.write(line) + out_file.write("\n"); + out_file.write("\treturn m;\n"); + out_file.write("}\n"); + +def transformErrorName( error_name ): + if error_name.startswith("WERR_"): + error_name = error_name.replace("WERR_", "", 1) + elif error_name.startswith("ERROR_"): + error_name = error_name.replace("ERROR_", "", 1) + return "WERR_" + error_name.upper() + +# Script to generate files werror_gen.h, doserr_gen.c and +# py_werror.c. +# +# These files contain generated definitions for WERRs and +# their descriptions/names. +# +# This script takes four inputs: +# [1]: The name of the text file which is the content of an HTML table +# (e.g. the one found at https://msdn.microsoft.com/en-us/library/cc231199.aspx) +# copied and pasted. +# [2]: [[output werror_gen.h]] +# [3]: [[output doserr_gen.c]] +# [4]: [[output py_werror.c]] +def main(): + if len(sys.argv) == 5: + input_file_name = sys.argv[1] + gen_headerfile_name = sys.argv[2] + gen_sourcefile_name = sys.argv[3] + gen_pythonfile_name = sys.argv[4] + else: + print("usage: %s winerrorfile headerfile sourcefile pythonfile" % sys.argv[0]) + sys.exit() + + input_file = io.open(input_file_name, "rt", encoding='utf8') + errors = parseErrorDescriptions(input_file, True, transformErrorName) + input_file.close() + + print("writing new header file: %s" % gen_headerfile_name) + out_file = io.open(gen_headerfile_name, "wt", encoding='utf8') + generateHeaderFile(out_file, errors) + out_file.close() + print("writing new source file: %s" % gen_sourcefile_name) + out_file = io.open(gen_sourcefile_name, "wt", encoding='utf8') + generateSourceFile(out_file, errors) + out_file.close() + print("writing new python file: %s" % gen_pythonfile_name) + out_file = io.open(gen_pythonfile_name, "wt", encoding='utf8') + generatePythonFile(out_file, errors) + out_file.close() + +if __name__ == '__main__': + + main() diff --git a/source4/scripting/bin/get-descriptors b/source4/scripting/bin/get-descriptors new file mode 100755 index 0000000..6e69222 --- /dev/null +++ b/source4/scripting/bin/get-descriptors @@ -0,0 +1,154 @@ +#!/usr/bin/env python3 +# +# Unix SMB/CIFS implementation. +# A script to compare differences of security descriotors between +# a remote host and the local Ldb +# Needs the local domain, the remote domain, IP of the remote host +# Username and password for the remote domain, must be at least +# Domain Administrator +# +# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008 +# Copyright (C) Nadezhda Ivanova <nadezhda.ivanova@postpath.com> 2009 +# +# Based on the original in EJS: +# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import optparse +import sys +import base64 + +sys.path.insert(0, "bin/python") + +import samba +from samba.auth import system_session +import samba.getopt as options +from samba.ndr import ndr_pack, ndr_unpack +from samba.dcerpc import security +from samba import Ldb +from samba.samdb import SamDB +from ldb import SCOPE_SUBTREE, SCOPE_BASE + +parser = optparse.OptionParser("get-descriptors [options]") +sambaopts = options.SambaOptions(parser) +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) + +parser.add_option("--local-domain", type="string", metavar="LOCALDOMAIN", + help="set local domain") +parser.add_option("--remote-domain", type="string", metavar="REMOTEDOMAIN", + help="set remote domain") +parser.add_option("--host", type="string", metavar="HOST", + help="Ip of the remote host used for comparison") +parser.add_option("--as-ldif", help="Output in LDIF format", action="store_true") + +lp = sambaopts.get_loadparm() +creds = credopts.get_credentials(lp) + +opts = parser.parse_args()[0] + +if not opts.host or not opts.localdomain or not opts.remote_domain: + parser.print_usage() + sys.exit(1) + +class DescrGetter: + + def __init__(self, localdomain, remotedomain): + self.samdb = SamDB(session_info=system_session(), lp=lp, options=["modules:paged_searches"]) + self.remote_ldb= Ldb("ldap://" + opts.host + ":389", credentials=creds, lp=lp, + options=["modules:paged_searches"]) + self.local_domain = localdomain.replace(".", ",DC=") + self.local_domain = "DC=" + self.local_domain + self.remote_domain = remotedomain.replace(".", ",DC=") + self.remote_domain = "DC=" + self.remote_domain + self.local_map = {} + self.remote_map = {} + + def get_domain_local_sid(self): + res = self.samdb.search(base=self.local_domain,expression="(objectClass=*)", scope=SCOPE_BASE) + self.local_sid = ndr_unpack( security.dom_sid,res[0]["objectSid"][0]) + + def get_domain_remote_sid(self): + res = self.remote_ldb.search(base=self.remote_domain, expression="(objectClass=*)", scope=SCOPE_BASE) + self.remote_sid = ndr_unpack( security.dom_sid,res[0]["objectSid"][0]) + + def add_to_ldif(self, dn, descr): + ldif_entry = ["dn: " + dn, + "changetype: modify", + "replace: nTSecurityDescriptor", + "nTSecurityDescriptor:: " + base64.b64encode(ndr_pack(descr)).decode('utf8')] + + for line in ldif_entry: + length = 79 + if len(line) <= length + 1: + print(line) + else: + for i in range(len(line) / length + 1): + if i == 0: + l = line[i * length:((i + 1) * length)] + else: + l = " " + line[(i * length):((i + 1) * length)] + print(l) + print("\n") + + def write_as_sddl(self, dn, descr): + print(dn) + print(descr + "\n") + + def read_descr_by_base(self, search_base): + res = self.samdb.search(base=search_base + self.local_domain, expression="(objectClass=*)", scope=SCOPE_SUBTREE, attrs=["nTSecurityDescriptor"]) + for entry in res: + dn = entry["dn"].__str__().replace(self.local_domain, "") + + if "nTSecurityDescriptor" in entry: + desc_obj = ndr_unpack(security.descriptor, entry["nTSecurityDescriptor"][0]) + self.local_map[dn] = desc_obj + + res = self.remote_ldb.search(base=search_base + self.remote_domain, expression="(objectClass=*)", scope=SCOPE_SUBTREE, attrs=["nTSecurityDescriptor"]) + for entry in res: + dn = entry["dn"].__str__().replace(self.remote_domain, "") + + if "nTSecurityDescriptor" in entry: + desc_obj = ndr_unpack(security.descriptor, entry["nTSecurityDescriptor"][0]) + self.remote_map[dn] = desc_obj + + def read_desc(self): + self.read_descr_by_base("CN=Schema,CN=Configuration,") + self.read_descr_by_base("CN=Configuration,") + self.read_descr_by_base("") + + def write_desc_to_ldif(self): + key_list_local = self.local_map.keys() + key_list_remote = self.remote_map.keys() + for key in key_list_remote: + if key in key_list_local: + sddl = self.remote_map[key].as_sddl(self.remote_sid) + sddl_local = self.local_map[key].as_sddl(self.local_sid) + if sddl != sddl_local: + descr = security.descriptor.from_sddl(sddl, self.local_sid) + if opts.as_ldif: + self.add_to_ldif(key + self.local_domain, descr) + else: + self.write_as_sddl(key, descr.as_sddl(self.local_sid)) + + def run(self): + self.get_domain_local_sid() + self.get_domain_remote_sid() + self.read_desc() + self.write_desc_to_ldif() + +desc = DescrGetter(opts.local_domain, opts.remote_domain) +desc.run() diff --git a/source4/scripting/bin/ktpass.sh b/source4/scripting/bin/ktpass.sh new file mode 100755 index 0000000..a165816 --- /dev/null +++ b/source4/scripting/bin/ktpass.sh @@ -0,0 +1,122 @@ +#!/bin/sh +# vim: expandtab +# +# Copyright (C) Matthieu Patou <mat@matws.net> 2010 +# +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +name="ktpass.sh" +TEMP=$(getopt -o h --long princ:,pass:,out:,host:,ptype:,enc:,path-to-ldbsearch: \ + -n "$name" -- "$@") +eval set -- "$TEMP" + +usage() +{ + echo -ne "$name --out <keytabfile> --princ <principal> --pass <password>|*\n" + echo -ne " [--host hostname] [--enc <encryption>]\n" + echo -ne " [--ptype <type>] [--path-to-ldbsearch <path>]\n" + echo -ne "\nEncoding should be one of:\n" + echo -ne " * des-cbc-crc\n" + echo -ne " * des-cbc-md5\n" + echo -ne " * rc4-hmac (default)\n" + echo -ne " * aes128-cts\n" + echo -ne " * aes256-cts\n" + exit 0 +} +while true; do + case "$1" in + --out) + outfile=$2 + shift 2 + ;; + --princ) + princ=$2 + shift 2 + ;; + --pass) + pass=$2 + shift 2 + ;; + --host) + host=$2 + shift 2 + ;; + --ptype) shift 2 ;; + --enc) + enc=$2 + shift 2 + ;; + --path-to-ldbsearch) + path="$2/" + shift 2 + ;; + -h) usage ;; + --) + shift + break + ;; + *) + echo "Internal error!" + exit 1 + ;; + esac +done +#RC4-HMAC-NT|AES256-SHA1|AES128-SHA +if [ -z "$enc" ]; then + enc="rc4-hmac" +fi +if [ -z "$path" ]; then + path=$(dirname $0)/../bin/ + if [ ! -f ${path}ldbsearch ]; then + path=$(dirname $0)/../../bin/ + fi +fi +if [ -z "$outfile" -o -z "$princ" -o -z "$pass" ]; then + echo "At least one mandatory parameter (--out, --princ, --pass) was not specified" + usage +fi +if [ -z $host ]; then + host=$(hostname) +fi + +kvno=$(${path}ldbsearch -H ldap://$host "(|(samaccountname=$princ)(serviceprincipalname=$princ)(userprincipalname=$princ))" msds-keyversionnumber -k 1 -N 2>/dev/null | grep -i msds-keyversionnumber) +if [ x"$kvno" = x"" ]; then + echo -ne "Unable to find kvno for principal $princ\n" + echo -ne " check that you are authentified with kerberos\n" + exit 1 +else + kvno=$(echo $kvno | sed 's/^.*: //') +fi + +if [ "$pass" = "*" ]; then + echo -n "Enter password for $princ: " + stty -echo + read pass + stty echo + echo "" +fi + +ktutil >/dev/null <<EOF +add_entry -password -p $princ -k $kvno -e $enc +$pass +wkt $outfile +EOF + +if [ $? -eq 0 ]; then + echo "Keytab file $outfile created with success" +else + echo "Error while creating the keytab file $outfile" +fi diff --git a/source4/scripting/bin/machineaccountccache b/source4/scripting/bin/machineaccountccache new file mode 100755 index 0000000..5e6d3c5 --- /dev/null +++ b/source4/scripting/bin/machineaccountccache @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +import optparse +import sys + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + + +import samba +from samba import getopt as options +from samba.credentials import Credentials +parser = optparse.OptionParser("machineaccountccache <ccache name>") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +opts, args = parser.parse_args() + +if len(args) != 1: + parser.print_usage() + sys.exit(1) + +ccachename = args[0] + +lp_ctx = sambaopts.get_loadparm() + +creds = Credentials() + +creds.guess(lp_ctx) +creds.set_machine_account(lp_ctx) +creds.get_named_ccache(lp_ctx, ccachename) diff --git a/source4/scripting/bin/machineaccountpw b/source4/scripting/bin/machineaccountpw new file mode 100755 index 0000000..eab773e --- /dev/null +++ b/source4/scripting/bin/machineaccountpw @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +import optparse +import sys + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + + +import samba +from samba import getopt as options +from samba import NTSTATUSError +from samba.credentials import Credentials +parser = optparse.OptionParser("machineaccountpw") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +opts, args = parser.parse_args() + +if len(args) != 0: + parser.print_usage() + sys.exit(1) + +try: + lp_ctx = sambaopts.get_loadparm() +except RuntimeError as error: + print("Unable to load smb.conf %s: %s" % (sambaopts.get_loadparm_path(), + error), + file=sys.stderr) + sys.exit(1) + +creds = Credentials() + +creds.guess(lp_ctx) +try: + creds.set_machine_account(lp_ctx) +except NTSTATUSError as error: + print("Failed to find a stored machine account credential on this system: %s" \ + % error.args[1], + file=sys.stderr) + sys.exit(1) + +print(creds.get_password()) diff --git a/source4/scripting/bin/nsupdate-gss b/source4/scripting/bin/nsupdate-gss new file mode 100755 index 0000000..509220d --- /dev/null +++ b/source4/scripting/bin/nsupdate-gss @@ -0,0 +1,352 @@ +#!/usr/bin/perl -w +# update a win2000 DNS server using gss-tsig +# tridge@samba.org, October 2002 + +# jmruiz@animatika.net +# updated, 2004-Enero + +# tridge@samba.org, September 2009 +# added --verbose, --noverify, --ntype and --nameserver + +# See draft-ietf-dnsext-gss-tsig-02, RFC2845 and RFC2930 + +use strict; +use lib "GSSAPI"; +use Net::DNS; +use GSSAPI; +use Getopt::Long; + +my $opt_wipe = 0; +my $opt_add = 0; +my $opt_noverify = 0; +my $opt_verbose = 0; +my $opt_help = 0; +my $opt_nameserver; +my $opt_realm; +my $opt_ntype = "A"; + +# main program +GetOptions ( + 'h|help|?' => \$opt_help, + 'wipe' => \$opt_wipe, + 'realm=s' => \$opt_realm, + 'nameserver=s' => \$opt_nameserver, + 'ntype=s' => \$opt_ntype, + 'add' => \$opt_add, + 'noverify' => \$opt_noverify, + 'verbose' => \$opt_verbose + ); + +######################################### +# display help text +sub ShowHelp() +{ + print " + nsupdate with gssapi + Copyright (C) tridge\@samba.org + + Usage: nsupdate-gss [options] HOST DOMAIN TARGET TTL + + Options: + --wipe wipe all records for this name + --add add to any existing records + --ntype=TYPE specify name type (default A) + --nameserver=server specify a specific nameserver + --noverify don't verify the MIC of the reply + --verbose show detailed steps + +"; + exit(0); +} + +if ($opt_help) { + ShowHelp(); +} + +if ($#ARGV != 3) { + ShowHelp(); +} + + +my $host = $ARGV[0]; +my $domain = $ARGV[1]; +my $target = $ARGV[2]; +my $ttl = $ARGV[3]; +my $alg = "gss.microsoft.com"; + + + +####################################################################### +# signing callback function for TSIG module +sub gss_sign($$) +{ + my $key = shift; + my $data = shift; + my $sig; + $key->get_mic(0, $data, $sig); + return $sig; +} + + + +##################################################################### +# write a string into a file +sub FileSave($$) +{ + my($filename) = shift; + my($v) = shift; + local(*FILE); + open(FILE, ">$filename") || die "can't open $filename"; + print FILE $v; + close(FILE); +} + + +####################################################################### +# verify a TSIG signature from a DNS server reply +# +sub sig_verify($$) +{ + my $context = shift; + my $packet = shift; + + my $tsig = ($packet->additional)[0]; + $opt_verbose && print "calling sig_data\n"; + my $sigdata = $tsig->sig_data($packet); + + $opt_verbose && print "sig_data_done\n"; + + return $context->verify_mic($sigdata, $tsig->{"mac"}, 0); +} + + +####################################################################### +# find the nameserver for the domain +# +sub find_nameserver($) +{ + my $server_name = shift; + return Net::DNS::Resolver->new( + nameservers => [$server_name], + recurse => 0, + debug => 0); +} + + +####################################################################### +# find a server name for a domain - currently uses the NS record +sub find_server_name($) +{ + my $domain = shift; + my $res = Net::DNS::Resolver->new; + my $srv_query = $res->query("$domain.", "NS"); + if (!defined($srv_query)) { + return undef; + } + my $server_name; + foreach my $rr (grep { $_->type eq 'NS' } $srv_query->answer) { + $server_name = $rr->nsdname; + } + return $server_name; +} + +####################################################################### +# +# +sub negotiate_tkey($$$$) +{ + + my $nameserver = shift; + my $domain = shift; + my $server_name = shift; + my $key_name = shift; + + my $status; + + my $context = GSSAPI::Context->new; + my $name = GSSAPI::Name->new; + + # use a principal name of dns/server@REALM + $opt_verbose && + print "Using principal dns/" . $server_name . "@" . uc($opt_realm) . "\n"; + $status = $name->import($name, "dns/" . $server_name . "@" . uc($opt_realm)); + if (! $status) { + print "import name: $status\n"; + return undef; + } + + my $flags = + GSS_C_REPLAY_FLAG | GSS_C_MUTUAL_FLAG | + GSS_C_SEQUENCE_FLAG | GSS_C_CONF_FLAG | + GSS_C_INTEG_FLAG; + + + $status = GSSAPI::Cred::acquire_cred(undef, 120, undef, GSS_C_INITIATE, + my $cred, my $oidset, my $time); + + if (! $status) { + print "acquire_cred: $status\n"; + return undef; + } + + $opt_verbose && print "creds acquired\n"; + + # call gss_init_sec_context() + $status = $context->init($cred, $name, undef, $flags, + 0, undef, "", undef, my $tok, + undef, undef); + if (! $status) { + print "init_sec_context: $status\n"; + return undef; + } + + $opt_verbose && print "init done\n"; + + my $gss_query = Net::DNS::Packet->new("$key_name", "TKEY", "IN"); + + # note that Windows2000 uses a SPNEGO wrapping on GSSAPI data sent to the nameserver. + # I tested using the gen_negTokenTarg() call from Samba 3.0 and it does work, but + # for this utility it is better to use plain GSSAPI/krb5 data so as to reduce the + # dependence on external libraries. If we ever want to sign DNS packets using + # NTLMSSP instead of krb5 then the SPNEGO wrapper could be used + + $opt_verbose && print "calling RR new\n"; + + $a = Net::DNS::RR->new( + Name => "$key_name", + Type => "TKEY", + TTL => 0, + Class => "ANY", + mode => 3, + algorithm => $alg, + inception => time, + expiration => time + 24*60*60, + key => $tok, + other_data => "", + ); + + $gss_query->push("answer", $a); + + my $reply = $nameserver->send($gss_query); + + if (!defined($reply) || $reply->header->{'rcode'} ne 'NOERROR') { + print "failed to send TKEY\n"; + return undef; + } + + my $key2 = ($reply->answer)[0]->{"key"}; + + # call gss_init_sec_context() again. Strictly speaking + # we should loop until this stops returning CONTINUE + # but I'm a lazy bastard + $status = $context->init($cred, $name, undef, $flags, + 0, undef, $key2, undef, $tok, + undef, undef); + if (! $status) { + print "init_sec_context step 2: $status\n"; + return undef; + } + + if (!$opt_noverify) { + $opt_verbose && print "verifying\n"; + + # check the signature on the TKEY reply + my $rc = sig_verify($context, $reply); + if (! $rc) { + print "Failed to verify TKEY reply: $rc\n"; +# return undef; + } + + $opt_verbose && print "verifying done\n"; + } + + return $context; +} + + +####################################################################### +# MAIN +####################################################################### + +if (!$opt_realm) { + $opt_realm = $domain; +} + +# find the name of the DNS server +if (!$opt_nameserver) { + $opt_nameserver = find_server_name($domain); + if (!defined($opt_nameserver)) { + print "Failed to find a DNS server name for $domain\n"; + exit 1; + } +} +$opt_verbose && print "Using DNS server name $opt_nameserver\n"; + +# connect to the nameserver +my $nameserver = find_nameserver($opt_nameserver); +if (!defined($nameserver) || $nameserver->{'errorstring'} ne 'NOERROR') { + print "Failed to connect to nameserver for domain $domain\n"; + exit 1; +} + + +# use a long random key name +my $key_name = int(rand 10000000000000); + +# negotiate a TKEY key +my $gss_context = negotiate_tkey($nameserver, $domain, $opt_nameserver, $key_name); +if (!defined($gss_context)) { + print "Failed to negotiate a TKEY\n"; + exit 1; +} +$opt_verbose && print "Negotiated TKEY $key_name\n"; + +# construct a signed update +my $update = Net::DNS::Update->new($domain); + +$update->push("pre", yxdomain("$domain")); +if (!$opt_add) { + $update->push("update", rr_del("$host.$domain. $opt_ntype")); +} +if (!$opt_wipe) { + $update->push("update", rr_add("$host.$domain. $ttl $opt_ntype $target")); +} + +my $sig = Net::DNS::RR->new( + Name => $key_name, + Type => "TSIG", + TTL => 0, + Class => "ANY", + Algorithm => $alg, + Time_Signed => time, + Fudge => 36000, + Mac_Size => 0, + Mac => "", + Key => $gss_context, + Sign_Func => \&gss_sign, + Other_Len => 0, + Other_Data => "", + Error => 0, + mode => 3, + ); + +$update->push("additional", $sig); + +# send the dynamic update +my $update_reply = $nameserver->send($update); + +if (! defined($update_reply)) { + print "No reply to dynamic update\n"; + exit 1; +} + +# make sure it worked +my $result = $update_reply->header->{"rcode"}; + +($opt_verbose || $result ne 'NOERROR') && print "Update gave rcode $result\n"; + +if ($result ne 'NOERROR') { + exit 1; +} + +exit 0; diff --git a/source4/scripting/bin/rebuildextendeddn b/source4/scripting/bin/rebuildextendeddn new file mode 100755 index 0000000..d5c0ecb --- /dev/null +++ b/source4/scripting/bin/rebuildextendeddn @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# +# Unix SMB/CIFS implementation. +# Extended attributes (re)building +# Copyright (C) Matthieu Patou <mat@matws.net> 2009 +# +# Based on provision a Samba4 server by +# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008 +# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +import optparse +import os +import sys +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import samba +from samba.credentials import DONT_USE_KERBEROS +from samba.auth import system_session +from samba import Ldb +from ldb import SCOPE_SUBTREE, SCOPE_BASE +import ldb +import samba.getopt as options +from samba import param +from samba.provision import ProvisionNames, provision_paths_from_lp +from samba.schema import get_dnsyntax_attributes, get_linked_attributes + +parser = optparse.OptionParser("rebuildextendeddn [options]") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) +parser.add_option("--targetdir", type="string", metavar="DIR", + help="Set target directory") + +opts = parser.parse_args()[0] + +def message(text): + """print a message if quiet is not set.""" + if not opts.quiet: + print(text) + +if len(sys.argv) == 1: + opts.interactive = True + +lp = sambaopts.get_loadparm() +smbconf = lp.configfile + +creds = credopts.get_credentials(lp) + +creds.set_kerberos_state(DONT_USE_KERBEROS) + +session = system_session() + + +def get_paths(targetdir=None,smbconf=None): + if targetdir is not None: + if (not os.path.exists(os.path.join(targetdir, "etc"))): + os.makedirs(os.path.join(targetdir, "etc")) + smbconf = os.path.join(targetdir, "etc", "smb.conf") + if smbconf is None: + smbconf = param.default_path() + + if not os.path.exists(smbconf): + print("Unable to find smb.conf .. "+smbconf, file=sys.stderr) + parser.print_usage() + sys.exit(1) + + lp = param.LoadParm() + lp.load(smbconf) + paths = provision_paths_from_lp(lp,"foo") + return paths + + + +def rebuild_en_dn(credentials,session_info,paths): + lp = param.LoadParm() + lp.load(paths.smbconf) + names = ProvisionNames() + names.domain = lp.get("workgroup") + names.realm = lp.get("realm") + names.rootdn = "DC=" + names.realm.replace(".",",DC=") + + attrs = ["dn" ] + dn = "" + sam_ldb = Ldb(paths.samdb, session_info=session_info, credentials=credentials,lp=lp) + attrs2 = ["schemaNamingContext"] + res2 = sam_ldb.search(expression="(objectClass=*)",base="", scope=SCOPE_BASE, attrs=attrs2) + attrs.extend(get_linked_attributes(ldb.Dn(sam_ldb,str(res2[0]["schemaNamingContext"])),sam_ldb).keys()) + attrs.extend(get_dnsyntax_attributes(ldb.Dn(sam_ldb,str(res2[0]["schemaNamingContext"])),sam_ldb)) + sam_ldb.transaction_start() + res = sam_ldb.search(expression="(cn=*)", scope=SCOPE_SUBTREE, attrs=attrs,controls=["search_options:1:2"]) + mod = "" + for i in range (0,len(res)): + #print >>sys.stderr,res[i].dn + dn = res[i].dn + for att in res[i]: + if ( (att != "dn" and att != "cn") and not (res[i][att] is None) ): + m = ldb.Message() + m.dn = ldb.Dn(sam_ldb, str(dn)) + saveatt = [] + for j in range (0,len( res[i][att])): + mod = mod +att +": "+str(res[i][att][j])+"\n" + saveatt.append(str(res[i][att][j])) + m[att] = ldb.MessageElement(saveatt, ldb.FLAG_MOD_REPLACE, att) + sam_ldb.modify(m) + res3 = sam_ldb.search(expression="(&(distinguishedName=%s)(%s=*))"%(dn,att),scope=SCOPE_SUBTREE, attrs=[att],controls=["search_options:1:2"]) + if( len(res3) == 0 or (len(res3[0][att])!= len(saveatt))): + print(str(dn) + " has no attr " +att+ " or a wrong value", + file=sys.stderr) + for satt in saveatt: + print("%s = %s" % (att, satt), + file=sys.stderr) + sam_ldb.transaction_cancel() + sam_ldb.transaction_commit() + + +paths = get_paths(targetdir=opts.targetdir, smbconf=smbconf) + +rebuild_en_dn(creds,session,paths) + diff --git a/source4/scripting/bin/renamedc b/source4/scripting/bin/renamedc new file mode 100755 index 0000000..e5e8a2c --- /dev/null +++ b/source4/scripting/bin/renamedc @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# vim: expandtab +# +# Copyright (C) Matthieu Patou <mat@matws.net> 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +import optparse +import sys +# Allow to run from s4 source directory (without installing samba) +sys.path.insert(0, "bin/python") + +import ldb +import samba +import samba.getopt as options +import os + +from samba.credentials import DONT_USE_KERBEROS +from samba.auth import system_session +from samba import param +from samba.provision import find_provision_key_parameters, secretsdb_self_join +from samba.upgradehelpers import get_ldbs, get_paths + + +__docformat__ = "restructuredText" + +parser = optparse.OptionParser("renamedc [options]") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) +parser.add_option("--oldname", + help="Old DC name") +parser.add_option("--newname", + help="New DC name") + +opts = parser.parse_args()[0] + +if len(sys.argv) == 1: + opts.interactive = True +lp = sambaopts.get_loadparm() +smbconf = lp.configfile + +creds = credopts.get_credentials(lp) +creds.set_kerberos_state(DONT_USE_KERBEROS) + + +if __name__ == '__main__': + defSDmodified = False + # 1) First get files paths + paths = get_paths(param, smbconf=smbconf) + # Get ldbs with the system session, it is needed for searching + # provision parameters + session = system_session() + + ldbs = get_ldbs(paths, creds, session, lp) + ldbs.sam.transaction_start() + ldbs.secrets.transaction_start() + + if opts.oldname is None or opts.newname is None: + raise Exception("Option oldname or newname is missing") + res = ldbs.sam.search(expression="(&(name=%s)(serverReferenceBL=*))" % opts.oldname) + if len(res) != 1: + raise Exception("Wrong number of result returned (%d), are you sure of the old name %s" % + (len(res), opts.oldname)) + + # Ok got it then check that the new name is not used as well + res2 = ldbs.sam.search(expression="(&(name=%s)(objectclass=computer))" % opts.newname) + if len(res2) != 0: + raise Exception("Seems that %s is a name that already exists, pick another one" % + opts.newname) + + names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap, + paths, smbconf, lp) + + # First rename the entry + # provision put the name in upper case so let's do it too ! + newdn = ldb.Dn(ldbs.sam, str(res[0].dn)) + newdn.set_component(0, "cn", opts.newname.upper()) + ldbs.sam.rename(res[0].dn, newdn) + + # Then change password and samaccountname and dnshostname + msg = ldb.Message(newdn) + machinepass = samba.generate_random_machine_password(120, 120) + mputf16 = machinepass.encode('utf-16-le') + + account = "%s$" % opts.newname.upper() + msg["clearTextPassword"] = ldb.MessageElement(mputf16, + ldb.FLAG_MOD_REPLACE, + "clearTextPassword") + + msg["sAMAccountName"] = ldb.MessageElement(account, + ldb.FLAG_MOD_REPLACE, + "sAMAccountName") + + msg["dNSHostName"] = ldb.MessageElement("%s.%s" % (opts.newname, + names.dnsdomain), + ldb.FLAG_MOD_REPLACE, + "dNSHostName") + ldbs.sam.modify(msg) + + # Do a self join one more time to resync the secrets file + res = ldbs.sam.search(base=newdn, scope=ldb.SCOPE_BASE, + attrs=["msDs-keyVersionNumber", "serverReferenceBL"]) + assert(len(res) == 1) + kvno = int(str(res[0]["msDs-keyVersionNumber"])) + serverbldn = ldb.Dn(ldbs.sam, str(res[0]["serverReferenceBL"])) + + secrets_msg = ldbs.secrets.search(expression="sAMAccountName=%s$" % + opts.oldname.upper(), + attrs=["secureChannelType"]) + + secChanType = int(secrets_msg[0]["secureChannelType"][0]) + + secretsdb_self_join(ldbs.secrets, domain=names.domain, + realm=names.realm, + domainsid=names.domainsid, + dnsdomain=names.dnsdomain, + netbiosname=opts.newname.upper(), + machinepass=machinepass, + key_version_number=kvno, + secure_channel_type=secChanType) + + # Update RID set reference so we don't have to runtime fixup until the next dbcheck as there is no back link. + + res = ldbs.sam.search(expression="(objectClass=rIDSet)", base=newdn, scope=ldb.SCOPE_ONELEVEL, attrs=[]) + assert(len(res) == 1) + newridset = str(res[0].dn) + msg = ldb.Message(newdn) + + msg["rIDSetReferences"] = ldb.MessageElement(newridset, + ldb.FLAG_MOD_REPLACE, + "rIDSetReferences") + ldbs.sam.modify(msg) + + # Update the server's sites configuration + newserverrefdn = ldb.Dn(ldbs.sam, str(serverbldn)) + newserverrefdn.set_component(0, "cn", opts.newname.upper()) + + ldbs.sam.rename(serverbldn, newserverrefdn) + + msg = ldb.Message(newserverrefdn) + msg["dNSHostName"] = ldb.MessageElement("%s.%s" % (opts.newname, + names.dnsdomain), + ldb.FLAG_MOD_REPLACE, + "dNSHostName") + ldbs.sam.modify(msg) + + try: + ldbs.sam.transaction_prepare_commit() + ldbs.secrets.transaction_prepare_commit() + except Exception: + ldbs.sam.rollback() + ldbs.secrets.rollback() + raise + + try: + ldbs.sam.transaction_commit() + ldbs.secrets.transaction_commit() + except Exception: + ldbs.sam.rollback() + ldbs.secrets.rollback() + raise + + # All good so far + #print lp.get("private dir") + cf = open(lp.configfile) + ncfname = "%s.new" % lp.configfile + newconf = open(ncfname, 'w') + for l in cf.readlines(): + if l.find("netbios name") > 0: + newconf.write("\tnetbios name = %s\n" % opts.newname.upper()) + else: + newconf.write(l) + newconf.close() + cf.close() + os.rename(ncfname, lp.configfile) + diff --git a/source4/scripting/bin/samba-gpupdate b/source4/scripting/bin/samba-gpupdate new file mode 100755 index 0000000..4b3f057 --- /dev/null +++ b/source4/scripting/bin/samba-gpupdate @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# Copyright Luke Morrison <luc785@.hotmail.com> July 2013 +# Co-Edited by Matthieu Pattou July 2013 from original August 2013 +# Edited by Garming Sam Feb. 2014 +# Edited by Luke Morrison April 2014 +# Edited by David Mulder May 2017 + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +'''This script reads a log file of previous GPO, gets all GPO from sysvol +and sorts them by container. Then, it applies the ones that haven't been +applied, have changed, or is in the right container''' + +import os +import sys + +sys.path.insert(0, "bin/python") + +import optparse +from samba import getopt as options +from samba.gp.gpclass import apply_gp, unapply_gp, GPOStorage, rsop +from samba.gp.gp_sec_ext import gp_krb_ext, gp_access_ext +from samba.gp.gp_ext_loader import get_gp_client_side_extensions +from samba.gp.gp_scripts_ext import gp_scripts_ext, gp_user_scripts_ext +from samba.gp.gp_sudoers_ext import gp_sudoers_ext +from samba.gp.vgp_sudoers_ext import vgp_sudoers_ext +from samba.gp.gp_smb_conf_ext import gp_smb_conf_ext +from samba.gp.gp_msgs_ext import gp_msgs_ext +from samba.gp.vgp_symlink_ext import vgp_symlink_ext +from samba.gp.vgp_files_ext import vgp_files_ext +from samba.gp.vgp_openssh_ext import vgp_openssh_ext +from samba.gp.vgp_motd_ext import vgp_motd_ext +from samba.gp.vgp_issue_ext import vgp_issue_ext +from samba.gp.vgp_startup_scripts_ext import vgp_startup_scripts_ext +from samba.gp.vgp_access_ext import vgp_access_ext +from samba.gp.gp_gnome_settings_ext import gp_gnome_settings_ext +from samba.gp.gp_cert_auto_enroll_ext import gp_cert_auto_enroll_ext +from samba.gp.gp_firefox_ext import gp_firefox_ext +from samba.gp.gp_chromium_ext import gp_chromium_ext, gp_chrome_ext +from samba.gp.gp_firewalld_ext import gp_firewalld_ext +from samba.gp.gp_centrify_sudoers_ext import gp_centrify_sudoers_ext +from samba.gp.gp_centrify_crontab_ext import gp_centrify_crontab_ext, \ + gp_user_centrify_crontab_ext +from samba.credentials import Credentials +from samba.gp.util.logging import logger_init + +if __name__ == "__main__": + parser = optparse.OptionParser('samba-gpupdate [options]') + sambaopts = options.Samba3Options(parser) + + # Get the command line options + parser.add_option_group(sambaopts) + parser.add_option_group(options.VersionOptions(parser)) + credopts = options.CredentialsOptions(parser) + parser.add_option('-X', '--unapply', help='Unapply Group Policy', + action='store_true') + parser.add_option('--target', default='Computer', help='{Computer | User}', + choices=['Computer', 'User']) + parser.add_option('--force', help='Reapplies all policy settings', + action='store_true') + parser.add_option('--rsop', help='Print the Resultant Set of Policy', + action='store_true') + parser.add_option_group(credopts) + + # Set the options and the arguments + (opts, args) = parser.parse_args() + + # Set the loadparm context + lp = sambaopts.get_loadparm() + + creds = credopts.get_credentials(lp, fallback_machine=True) + # Apply policy to the command line specified user + if opts.target == 'Computer': + username = creds.get_username() + elif opts.target == 'User': + username = '%s\\%s' % (creds.get_domain(), creds.get_username()) + # Always supply the machine creds for fetching the gpo list + creds = Credentials() + creds.guess(lp) + creds.set_machine_account(lp) + + # Set up logging + logger_init('samba-gpupdate', lp.log_level()) + + cache_dir = lp.get('cache directory') + store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb')) + + machine_exts, user_exts = get_gp_client_side_extensions(lp.configfile) + gp_extensions = [] + if opts.target == 'Computer': + gp_extensions.append(gp_access_ext) + gp_extensions.append(gp_krb_ext) + gp_extensions.append(gp_scripts_ext) + gp_extensions.append(gp_sudoers_ext) + gp_extensions.append(vgp_sudoers_ext) + gp_extensions.append(gp_centrify_sudoers_ext) + gp_extensions.append(gp_centrify_crontab_ext) + gp_extensions.append(gp_smb_conf_ext) + gp_extensions.append(gp_msgs_ext) + gp_extensions.append(vgp_symlink_ext) + gp_extensions.append(vgp_files_ext) + gp_extensions.append(vgp_openssh_ext) + gp_extensions.append(vgp_motd_ext) + gp_extensions.append(vgp_issue_ext) + gp_extensions.append(vgp_startup_scripts_ext) + gp_extensions.append(vgp_access_ext) + gp_extensions.append(gp_gnome_settings_ext) + gp_extensions.append(gp_cert_auto_enroll_ext) + gp_extensions.append(gp_firefox_ext) + gp_extensions.append(gp_chromium_ext) + gp_extensions.append(gp_chrome_ext) + gp_extensions.append(gp_firewalld_ext) + gp_extensions.extend(machine_exts) + elif opts.target == 'User': + gp_extensions.append(gp_user_scripts_ext) + gp_extensions.append(gp_user_centrify_crontab_ext) + gp_extensions.extend(user_exts) + + if opts.rsop: + rsop(lp, creds, store, gp_extensions, username, opts.target) + elif not opts.unapply: + apply_gp(lp, creds, store, gp_extensions, username, + opts.target, opts.force) + else: + unapply_gp(lp, creds, store, gp_extensions, username, + opts.target) + diff --git a/source4/scripting/bin/samba-tool b/source4/scripting/bin/samba-tool new file mode 100755 index 0000000..f8a70a6 --- /dev/null +++ b/source4/scripting/bin/samba-tool @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +# Unix SMB/CIFS implementation. +# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008-2012 +# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011 +# Copyright (C) Giampaolo Lauria <lauria2@yahoo.com> 2011 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import sys + +# Find right direction when running from source tree +sys.path.insert(0, "bin/python") + +# make sure the script dies immediately when hitting control-C, +# rather than raising KeyboardInterrupt. As we do all database +# operations using transactions, this is safe. +import signal +signal.signal(signal.SIGINT, signal.SIG_DFL) + +from samba.netcmd.main import cmd_sambatool +cmd = cmd_sambatool() +subcommand = None +args = () + +if len(sys.argv) > 1: + subcommand = sys.argv[1] + if len(sys.argv) > 2: + args = sys.argv[2:] + +try: + retval = cmd._run("samba-tool", subcommand, *args) +except SystemExit as e: + retval = e.code +except Exception as e: + cmd.show_command_error(e) + retval = 1 +sys.exit(retval) diff --git a/source4/scripting/bin/samba3dump b/source4/scripting/bin/samba3dump new file mode 100755 index 0000000..1a5d74f --- /dev/null +++ b/source4/scripting/bin/samba3dump @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +# +# Dump Samba3 data +# Copyright Jelmer Vernooij 2005-2007 +# Released under the GNU GPL v3 or later +# + +import optparse +import os, sys + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import samba +import samba.samba3 +from samba.samba3 import param as s3param +from samba.dcerpc import lsa + +parser = optparse.OptionParser("samba3dump <libdir> [<smb.conf>]") +parser.add_option("--format", type="choice", metavar="FORMAT", + choices=["full", "summary"]) + +opts, args = parser.parse_args() + +if opts.format is None: + opts.format = "summary" + +def print_header(txt): + print("\n%s" % txt) + print("=" * len(txt)) + +def print_samba3_policy(pol): + print_header("Account Policies") + print("Min password length: %d" % pol['min password length']) + print("Password history length: %d" % pol['password history']) + if pol['user must logon to change password']: + print("User must logon to change password: %d" % pol['user must logon to change password']) + if pol['maximum password age']: + print("Maximum password age: %d" % pol['maximum password age']) + if pol['minimum password age']: + print("Minimum password age: %d" % pol['minimum password age']) + if pol['lockout duration']: + print("Lockout duration: %d" % pol['lockout duration']) + if pol['reset count minutes']: + print("Reset Count Minutes: %d" % pol['reset count minutes']) + if pol['bad lockout attempt']: + print("Bad Lockout Minutes: %d" % pol['bad lockout attempt']) + if pol['disconnect time']: + print("Disconnect Time: %d" % pol['disconnect time']) + if pol['refuse machine password change']: + print("Refuse Machine Password Change: %d" % pol['refuse machine password change']) + +def print_samba3_sam(samdb): + print_header("SAM Database") + for user in samdb.search_users(0): + print("%s (%d): %s" % (user['account_name'], user['rid'], user['fullname'])) + +def print_samba3_shares(lp): + print_header("Configured shares") + for s in lp.services(): + print("--- %s ---" % s) + for p in ['path']: + print("\t%s = %s" % (p, lp.get(p, s))) + print("") + +def print_samba3_secrets(secrets): + print_header("Secrets") + + if secrets.get_auth_user(): + print("IPC Credentials:") + if secrets.get_auth_user(): + print(" User: %s\n" % secrets.get_auth_user()) + if secrets.get_auth_password(): + print(" Password: %s\n" % secrets.get_auth_password()) + if secrets.get_auth_domain(): + print(" Domain: %s\n" % secrets.get_auth_domain()) + + if len(list(secrets.ldap_dns())) > 0: + print("LDAP passwords:") + for dn in secrets.ldap_dns(): + print("\t%s -> %s" % (dn, secrets.get_ldap_bind_pw(dn))) + print("") + + print("Domains:") + for domain in secrets.domains(): + print("\t--- %s ---" % domain) + print("\tSID: %s" % secrets.get_sid(domain)) + print("\tGUID: %s" % secrets.get_domain_guid(domain)) + print("\tPlaintext pwd: %s" % secrets.get_machine_password(domain)) + if secrets.get_machine_last_change_time(domain): + print("\tLast Changed: %lu" % secrets.get_machine_last_change_time(domain)) + if secrets.get_machine_sec_channel_type(domain): + print("\tSecure Channel Type: %d\n" % secrets.get_machine_sec_channel_type(domain)) + + print("Trusted domains:") + for td in secrets.trusted_domains(): + print(td) + +def print_samba3_regdb(regdb): + print_header("Registry") + from samba.registry import str_regtype + + for k in regdb.keys(): + print("[%s]" % k) + for (value_name, (type, value)) in regdb.values(k).items(): + print("\"%s\"=%s:%s" % (value_name, str_regtype(type), value)) + +def print_samba3_winsdb(winsdb): + print_header("WINS Database") + + for name in winsdb: + (ttl, ips, nb_flags) = winsdb[name] + print("%s, nb_flags: %s, ttl: %lu, %d ips, fst: %s" % (name, nb_flags, ttl, len(ips), ips[0])) + +def print_samba3_groupmappings(groupdb): + print_header("Group Mappings") + + for g in groupdb.enum_group_mapping(samba.samba3.passdb.get_global_sam_sid(), + lsa.SID_NAME_DOM_GRP): + print("\t--- Group: %s ---" % g.sid) + +def print_samba3_aliases(groupdb): + for g in groupdb.enum_group_mapping(samba.samba3.passdb.get_global_sam_sid(), + lsa.SID_NAME_ALIAS): + print("\t--- Alias: %s ---" % g.sid) + +def print_samba3_idmapdb(idmapdb): + print_header("Winbindd SID<->GID/UID mappings") + + print("User High Water Mark: %d" % idmapdb.get_user_hwm()) + print("Group High Water Mark: %d\n" % idmapdb.get_group_hwm()) + + for uid in idmapdb.uids(): + print("%s -> UID %d" % (idmapdb.get_user_sid(uid), uid)) + + for gid in idmapdb.gids(): + print("%s -> GID %d" % (idmapdb.get_group_sid(gid), gid)) + +def print_samba3(samba3): + passdb = samba3.get_sam_db() + print_samba3_policy(passdb.get_account_policy()) + print_samba3_winsdb(samba3.get_wins_db()) + print_samba3_regdb(samba3.get_registry()) + print_samba3_secrets(samba3.get_secrets_db()) + print_samba3_idmapdb(samba3.get_idmap_db()) + print_samba3_sam(passdb) + print_samba3_groupmappings(passdb) + print_samba3_aliases(passdb) + print_samba3_shares(samba3.lp) + +def print_samba3_summary(samba3): + print("WINS db entries: %d" % len(samba3.get_wins_db())) + print("Registry key count: %d" % len(samba3.get_registry())) + passdb = samba3.get_sam_db() + print("Groupmap count: %d" % len(passdb.enum_group_mapping())) + print("Alias count: %d" % len(passdb.search_aliases())) + idmapdb = samba3.get_idmap_db() + print("Idmap count: %d" % (len(list(idmapdb.uids())) + len(list(idmapdb.gids())))) + +if len(args) < 1: + parser.print_help() + sys.exit(1) + +libdir = args[0] +if len(args) < 1: + smbconf = args[1] +else: + smbconf = os.path.join(libdir, "smb.conf") + +s3_lp = s3param.get_context() +s3_lp.set("private dir", libdir) +s3_lp.set("state directory", libdir) +s3_lp.set("lock directory", libdir) +s3_lp.load(smbconf) +samba3 = samba.samba3.Samba3(smbconf, s3_lp) + +if opts.format == "summary": + print_samba3_summary(samba3) +elif opts.format == "full": + print_samba3(samba3) diff --git a/source4/scripting/bin/samba_dnsupdate b/source4/scripting/bin/samba_dnsupdate new file mode 100755 index 0000000..1ce53f5 --- /dev/null +++ b/source4/scripting/bin/samba_dnsupdate @@ -0,0 +1,960 @@ +#!/usr/bin/env python3 +# vim: expandtab +# +# update our DNS names using TSIG-GSS +# +# Copyright (C) Andrew Tridgell 2010 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +import os +import fcntl +import sys +import tempfile +import subprocess + +# ensure we get messages out immediately, so they get in the samba logs, +# and don't get swallowed by a timeout +os.environ['PYTHONUNBUFFERED'] = '1' + +# forcing GMT avoids a problem in some timezones with kerberos. Both MIT +# heimdal can get mutual authentication errors due to the 24 second difference +# between UTC and GMT when using some zone files (eg. the PDT zone from +# the US) +os.environ["TZ"] = "GMT" + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import samba +import optparse +from samba import getopt as options +from ldb import SCOPE_BASE +from samba import dsdb +from samba.auth import system_session +from samba.samdb import SamDB +from samba.dcerpc import netlogon, winbind +from samba.netcmd.dns import cmd_dns +from samba import gensec +from samba.kcc import kcc_utils +from samba.common import get_string +import ldb + +from samba.dnsresolver import DNSResolver +import dns.resolver +import dns.exception + +default_ttl = 900 +am_rodc = False +error_count = 0 + +parser = optparse.OptionParser("samba_dnsupdate [options]") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +parser.add_option("--verbose", action="store_true") +parser.add_option("--use-samba-tool", action="store_true", help="Use samba-tool to make updates over RPC, rather than over DNS") +parser.add_option("--use-nsupdate", action="store_true", help="Use nsupdate command to make updates over DNS (default, if kinit successful)") +parser.add_option("--all-names", action="store_true") +parser.add_option("--all-interfaces", action="store_true") +parser.add_option("--current-ip", action="append", help="IP address to update DNS to match (helpful if behind NAT, valid multiple times, defaults to values from interfaces=)") +parser.add_option("--rpc-server-ip", type="string", help="IP address of server to use with samba-tool (defaults to first --current-ip)") +parser.add_option("--use-file", type="string", help="Use a file, rather than real DNS calls") +parser.add_option("--update-list", type="string", help="Add DNS names from the given file") +parser.add_option("--update-cache", type="string", help="Cache database of already registered records") +parser.add_option("--fail-immediately", action='store_true', help="Exit on first failure") +parser.add_option("--no-credentials", dest='nocreds', action='store_true', help="don't try and get credentials") +parser.add_option("--no-substitutions", dest='nosubs', action='store_true', help="don't try and expands variables in file specified by --update-list") + +creds = None +ccachename = None + +opts, args = parser.parse_args() + +if len(args) != 0: + parser.print_usage() + sys.exit(1) + +lp = sambaopts.get_loadparm() + +domain = lp.get("realm") +host = lp.get("netbios name") +all_interfaces = opts.all_interfaces + +IPs = opts.current_ip or samba.interface_ips(lp, bool(all_interfaces)) or [] + +nsupdate_cmd = lp.get('nsupdate command') +dns_zone_scavenging = lp.get("dns zone scavenging") + +if len(IPs) == 0: + print("No IP interfaces - skipping DNS updates\n") + parser.print_usage() + sys.exit(0) + +rpc_server_ip = opts.rpc_server_ip or IPs[0] + +IP6s = [ip for ip in IPs if ':' in ip] +IP4s = [ip for ip in IPs if ':' not in ip] + +smb_conf = sambaopts.get_loadparm_path() + +if opts.verbose: + print("IPs: %s" % IPs) + +def get_possible_rw_dns_server(creds, domain): + """Get a list of possible read-write DNS servers, starting with + the SOA. The SOA is the correct answer, but old Samba domains + (4.6 and prior) do not maintain this value, so add NS servers + as well""" + + ans_soa = check_one_dns_name(domain, 'SOA') + # Actually there is only one + hosts_soa = [str(a.mname).rstrip('.') for a in ans_soa] + + # This is not strictly legit, but old Samba domains may have an + # unmaintained SOA record, so go for any NS that we can get a + # ticket to. + ans_ns = check_one_dns_name(domain, 'NS') + # Actually there is only one + hosts_ns = [str(a.target).rstrip('.') for a in ans_ns] + + return hosts_soa + hosts_ns + +def get_krb5_rw_dns_server(creds, domain): + """Get a list of read-write DNS servers that we can obtain a ticket + for, starting with the SOA. The SOA is the correct answer, but + old Samba domains (4.6 and prior) do not maintain this value, + so continue with the NS servers as well until we get one that + the KDC will issue a ticket to. + """ + + rw_dns_servers = get_possible_rw_dns_server(creds, domain) + # Actually there is only one + for i, target_hostname in enumerate(rw_dns_servers): + settings = {} + settings["lp_ctx"] = lp + settings["target_hostname"] = target_hostname + + gensec_client = gensec.Security.start_client(settings) + gensec_client.set_credentials(creds) + gensec_client.set_target_service("DNS") + gensec_client.set_target_hostname(target_hostname) + gensec_client.want_feature(gensec.FEATURE_SEAL) + gensec_client.start_mech_by_sasl_name("GSSAPI") + server_to_client = b"" + try: + (client_finished, client_to_server) = gensec_client.update(server_to_client) + if opts.verbose: + print("Successfully obtained Kerberos ticket to DNS/%s as %s" \ + % (target_hostname, creds.get_username())) + return target_hostname + except RuntimeError: + # Only raise an exception if they all failed + if i == len(rw_dns_servers) - 1: + raise + +def get_credentials(lp): + """# get credentials if we haven't got them already.""" + from samba import credentials + global ccachename + creds = credentials.Credentials() + creds.guess(lp) + creds.set_machine_account(lp) + creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE) + (tmp_fd, ccachename) = tempfile.mkstemp() + try: + if opts.use_file is not None: + return + + creds.get_named_ccache(lp, ccachename) + + # Now confirm we can get a ticket to the DNS server + get_krb5_rw_dns_server(creds, sub_vars['DNSDOMAIN'] + '.') + return creds + + except RuntimeError as e: + os.unlink(ccachename) + raise e + + +class dnsobj(object): + """an object to hold a parsed DNS line""" + + def __init__(self, string_form): + list = string_form.split() + if len(list) < 3: + raise Exception("Invalid DNS entry %r" % string_form) + self.dest = None + self.port = None + self.ip = None + self.existing_port = None + self.existing_weight = None + self.existing_cname_target = None + self.rpc = False + self.zone = None + if list[0] == "RPC": + self.rpc = True + self.zone = list[1] + list = list[2:] + self.type = list[0] + self.name = list[1] + self.nameservers = [] + if self.type == 'SRV': + if len(list) < 4: + raise Exception("Invalid DNS entry %r" % string_form) + self.dest = list[2] + self.port = list[3] + elif self.type in ['A', 'AAAA']: + self.ip = list[2] # usually $IP, which gets replaced + elif self.type == 'CNAME': + self.dest = list[2] + elif self.type == 'NS': + self.dest = list[2] + else: + raise Exception("Received unexpected DNS reply of type %s: %s" % (self.type, string_form)) + + def __str__(self): + if self.type == "A": + return "%s %s %s" % (self.type, self.name, self.ip) + if self.type == "AAAA": + return "%s %s %s" % (self.type, self.name, self.ip) + if self.type == "SRV": + return "%s %s %s %s" % (self.type, self.name, self.dest, self.port) + if self.type == "CNAME": + return "%s %s %s" % (self.type, self.name, self.dest) + if self.type == "NS": + return "%s %s %s" % (self.type, self.name, self.dest) + + +def parse_dns_line(line, sub_vars): + """parse a DNS line from.""" + if line.startswith("SRV _ldap._tcp.pdc._msdcs.") and not samdb.am_pdc(): + # We keep this as compat to the dns_update_list of 4.0/4.1 + if opts.verbose: + print("Skipping PDC entry (%s) as we are not a PDC" % line) + return None + subline = samba.substitute_var(line, sub_vars) + if subline == '' or subline[0] == "#": + return None + return dnsobj(subline) + + +def hostname_match(h1, h2): + """see if two hostnames match.""" + h1 = str(h1) + h2 = str(h2) + return h1.lower().rstrip('.') == h2.lower().rstrip('.') + +def get_resolver(d=None): + resolv_conf = os.getenv('RESOLV_CONF', default='/etc/resolv.conf') + resolver = DNSResolver(filename=resolv_conf, configure=True) + + if d is not None and d.nameservers != []: + resolver.nameservers = d.nameservers + + return resolver + +def check_one_dns_name(name, name_type, d=None): + resolver = get_resolver(d) + if d and not d.nameservers: + d.nameservers = resolver.nameservers + # dns.resolver.Answer + return resolver.resolve(name, name_type) + +def check_dns_name(d): + """check that a DNS entry exists.""" + normalised_name = d.name.rstrip('.') + '.' + if opts.verbose: + print("Looking for DNS entry %s as %s" % (d, normalised_name)) + + if opts.use_file is not None: + try: + dns_file = open(opts.use_file, "r") + except IOError: + return False + + for line in dns_file: + line = line.strip() + if line == '' or line[0] == "#": + continue + if line.lower() == str(d).lower(): + return True + return False + + try: + ans = check_one_dns_name(normalised_name, d.type, d) + except dns.exception.Timeout: + raise Exception("Timeout while waiting to contact a working DNS server while looking for %s as %s" % (d, normalised_name)) + except dns.resolver.NoNameservers: + raise Exception("Unable to contact a working DNS server while looking for %s as %s" % (d, normalised_name)) + except dns.resolver.NXDOMAIN: + if opts.verbose: + print("The DNS entry %s, queried as %s does not exist" % (d, normalised_name)) + return False + except dns.resolver.NoAnswer: + if opts.verbose: + print("The DNS entry %s, queried as %s does not hold this record type" % (d, normalised_name)) + return False + except dns.exception.DNSException: + raise Exception("Failure while trying to resolve %s as %s" % (d, normalised_name)) + if d.type in ['A', 'AAAA']: + # we need to be sure that our IP is there + for rdata in ans: + if str(rdata) == str(d.ip): + return True + elif d.type == 'CNAME': + for i in range(len(ans)): + if hostname_match(ans[i].target, d.dest): + return True + else: + d.existing_cname_target = str(ans[i].target) + elif d.type == 'NS': + for i in range(len(ans)): + if hostname_match(ans[i].target, d.dest): + return True + elif d.type == 'SRV': + for rdata in ans: + if opts.verbose: + print("Checking %s against %s" % (rdata, d)) + if hostname_match(rdata.target, d.dest): + if str(rdata.port) == str(d.port): + return True + else: + d.existing_port = str(rdata.port) + d.existing_weight = str(rdata.weight) + + if opts.verbose: + print("Lookup of %s succeeded, but we failed to find a matching DNS entry for %s" % (normalised_name, d)) + + return False + + +def get_subst_vars(samdb): + """get the list of substitution vars.""" + global lp, am_rodc + vars = {} + + vars['DNSDOMAIN'] = samdb.domain_dns_name() + vars['DNSFOREST'] = samdb.forest_dns_name() + vars['HOSTNAME'] = samdb.host_dns_name() + vars['NTDSGUID'] = samdb.get_ntds_GUID() + vars['SITE'] = samdb.server_site_name() + res = samdb.search(base=samdb.get_default_basedn(), scope=SCOPE_BASE, attrs=["objectGUID"]) + guid = samdb.schema_format_value("objectGUID", res[0]['objectGUID'][0]) + vars['DOMAINGUID'] = get_string(guid) + + vars['IF_DC'] = "" + vars['IF_RWDC'] = "# " + vars['IF_RODC'] = "# " + vars['IF_PDC'] = "# " + vars['IF_GC'] = "# " + vars['IF_RWGC'] = "# " + vars['IF_ROGC'] = "# " + vars['IF_DNS_DOMAIN'] = "# " + vars['IF_RWDNS_DOMAIN'] = "# " + vars['IF_RODNS_DOMAIN'] = "# " + vars['IF_DNS_FOREST'] = "# " + vars['IF_RWDNS_FOREST'] = "# " + vars['IF_R0DNS_FOREST'] = "# " + + am_rodc = samdb.am_rodc() + if am_rodc: + vars['IF_RODC'] = "" + else: + vars['IF_RWDC'] = "" + + if samdb.am_pdc(): + vars['IF_PDC'] = "" + + # check if we "are DNS server" + res = samdb.search(base=samdb.get_config_basedn(), + expression='(objectguid=%s)' % vars['NTDSGUID'], + attrs=["options", "msDS-hasMasterNCs"]) + + if len(res) == 1: + if "options" in res[0]: + options = int(res[0]["options"][0]) + if (options & dsdb.DS_NTDSDSA_OPT_IS_GC) != 0: + vars['IF_GC'] = "" + if am_rodc: + vars['IF_ROGC'] = "" + else: + vars['IF_RWGC'] = "" + + basedn = str(samdb.get_default_basedn()) + forestdn = str(samdb.get_root_basedn()) + + if "msDS-hasMasterNCs" in res[0]: + for e in res[0]["msDS-hasMasterNCs"]: + if str(e) == "DC=DomainDnsZones,%s" % basedn: + vars['IF_DNS_DOMAIN'] = "" + if am_rodc: + vars['IF_RODNS_DOMAIN'] = "" + else: + vars['IF_RWDNS_DOMAIN'] = "" + if str(e) == "DC=ForestDnsZones,%s" % forestdn: + vars['IF_DNS_FOREST'] = "" + if am_rodc: + vars['IF_RODNS_FOREST'] = "" + else: + vars['IF_RWDNS_FOREST'] = "" + + return vars + + +def call_nsupdate(d, op="add"): + """call nsupdate for an entry.""" + global ccachename, nsupdate_cmd, krb5conf + + assert(op in ["add", "delete"]) + + if opts.use_file is not None: + if opts.verbose: + print("Use File instead of nsupdate for %s (%s)" % (d, op)) + + try: + rfile = open(opts.use_file, 'r+') + except IOError: + # Perhaps create it + open(opts.use_file, 'w+') + # Open it for reading again, in case someone else got to it first + rfile = open(opts.use_file, 'r+') + fcntl.lockf(rfile, fcntl.LOCK_EX) + (file_dir, file_name) = os.path.split(opts.use_file) + (tmp_fd, tmpfile) = tempfile.mkstemp(dir=file_dir, prefix=file_name, suffix="XXXXXX") + wfile = os.fdopen(tmp_fd, 'a') + rfile.seek(0) + for line in rfile: + if op == "delete": + l = parse_dns_line(line, {}) + if str(l).lower() == str(d).lower(): + continue + wfile.write(line) + if op == "add": + wfile.write(str(d)+"\n") + os.rename(tmpfile, opts.use_file) + fcntl.lockf(rfile, fcntl.LOCK_UN) + return + + if opts.verbose: + print("Calling nsupdate for %s (%s)" % (d, op)) + + normalised_name = d.name.rstrip('.') + '.' + + (tmp_fd, tmpfile) = tempfile.mkstemp() + f = os.fdopen(tmp_fd, 'w') + + resolver = get_resolver(d) + + # Local the zone for this name + zone = dns.resolver.zone_for_name(normalised_name, + resolver=resolver) + + # Now find the SOA, or if we can't get a ticket to the SOA, + # any server with an NS record we can get a ticket for. + # + # Thanks to the Kerberos Credentials cache this is not + # expensive inside the loop + server = get_krb5_rw_dns_server(creds, zone) + f.write('server %s\n' % server) + + if d.type == "A": + f.write("update %s %s %u A %s\n" % (op, normalised_name, default_ttl, d.ip)) + if d.type == "AAAA": + f.write("update %s %s %u AAAA %s\n" % (op, normalised_name, default_ttl, d.ip)) + if d.type == "SRV": + if op == "add" and d.existing_port is not None: + f.write("update delete %s SRV 0 %s %s %s\n" % (normalised_name, d.existing_weight, + d.existing_port, d.dest)) + f.write("update %s %s %u SRV 0 100 %s %s\n" % (op, normalised_name, default_ttl, d.port, d.dest)) + if d.type == "CNAME": + f.write("update %s %s %u CNAME %s\n" % (op, normalised_name, default_ttl, d.dest)) + if d.type == "NS": + f.write("update %s %s %u NS %s\n" % (op, normalised_name, default_ttl, d.dest)) + if opts.verbose: + f.write("show\n") + f.write("send\n") + f.close() + + # Set a bigger MTU size to work around a bug in nsupdate's doio_send() + os.environ["SOCKET_WRAPPER_MTU"] = "2000" + + global error_count + if ccachename: + os.environ["KRB5CCNAME"] = ccachename + try: + cmd = nsupdate_cmd[:] + cmd.append(tmpfile) + env = os.environ + if krb5conf: + env["KRB5_CONFIG"] = krb5conf + if ccachename: + env["KRB5CCNAME"] = ccachename + ret = subprocess.call(cmd, shell=False, env=env) + if ret != 0: + if opts.fail_immediately: + if opts.verbose: + print("Failed update with %s" % tmpfile) + sys.exit(1) + error_count = error_count + 1 + if opts.verbose: + print("Failed nsupdate: %d" % ret) + except Exception as estr: + if opts.fail_immediately: + sys.exit(1) + error_count = error_count + 1 + if opts.verbose: + print("Failed nsupdate: %s : %s" % (str(d), estr)) + os.unlink(tmpfile) + + # Let socket_wrapper set the default MTU size + os.environ["SOCKET_WRAPPER_MTU"] = "0" + + +def call_samba_tool(d, op="add", zone=None): + """call samba-tool dns to update an entry.""" + + assert(op in ["add", "delete"]) + + if (sub_vars['DNSFOREST'] != sub_vars['DNSDOMAIN']) and \ + sub_vars['DNSFOREST'].endswith('.' + sub_vars['DNSDOMAIN']): + print("Refusing to use samba-tool when forest %s is under domain %s" \ + % (sub_vars['DNSFOREST'], sub_vars['DNSDOMAIN'])) + + if opts.verbose: + print("Calling samba-tool dns for %s (%s)" % (d, op)) + + normalised_name = d.name.rstrip('.') + '.' + if zone is None: + if normalised_name == (sub_vars['DNSDOMAIN'] + '.'): + short_name = '@' + zone = sub_vars['DNSDOMAIN'] + elif normalised_name == (sub_vars['DNSFOREST'] + '.'): + short_name = '@' + zone = sub_vars['DNSFOREST'] + elif normalised_name == ('_msdcs.' + sub_vars['DNSFOREST'] + '.'): + short_name = '@' + zone = '_msdcs.' + sub_vars['DNSFOREST'] + else: + if not normalised_name.endswith('.' + sub_vars['DNSDOMAIN'] + '.'): + print("Not Calling samba-tool dns for %s (%s), %s not in %s" % (d, op, normalised_name, sub_vars['DNSDOMAIN'] + '.')) + return False + elif normalised_name.endswith('._msdcs.' + sub_vars['DNSFOREST'] + '.'): + zone = '_msdcs.' + sub_vars['DNSFOREST'] + else: + zone = sub_vars['DNSDOMAIN'] + len_zone = len(zone)+2 + short_name = normalised_name[:-len_zone] + else: + len_zone = len(zone)+2 + short_name = normalised_name[:-len_zone] + + if d.type == "A": + args = [rpc_server_ip, zone, short_name, "A", d.ip] + if d.type == "AAAA": + args = [rpc_server_ip, zone, short_name, "AAAA", d.ip] + if d.type == "SRV": + if op == "add" and d.existing_port is not None: + print("Not handling modify of existing SRV %s using samba-tool" % d) + return False + args = [rpc_server_ip, zone, short_name, "SRV", + "%s %s %s %s" % (d.dest, d.port, "0", "100")] + if d.type == "CNAME": + if d.existing_cname_target is None: + args = [rpc_server_ip, zone, short_name, "CNAME", d.dest] + else: + op = "update" + args = [rpc_server_ip, zone, short_name, "CNAME", + d.existing_cname_target.rstrip('.'), d.dest] + + if d.type == "NS": + args = [rpc_server_ip, zone, short_name, "NS", d.dest] + + if smb_conf and args: + args += ["--configfile=" + smb_conf] + + global error_count + try: + cmd = cmd_dns() + if opts.verbose: + print(f'Calling samba-tool dns {op} --use-kerberos off -P {args}') + ret = cmd._run("dns", op, "--use-kerberos", "off", "-P", *args) + if ret == -1: + if opts.fail_immediately: + sys.exit(1) + error_count = error_count + 1 + if opts.verbose: + print("Failed 'samba-tool dns' based update of %s" % (str(d))) + except Exception as estr: + if opts.fail_immediately: + sys.exit(1) + error_count = error_count + 1 + if opts.verbose: + print("Failed 'samba-tool dns' based update: %s : %s" % (str(d), estr)) + raise + +irpc_wb = None +def cached_irpc_wb(lp): + global irpc_wb + if irpc_wb is not None: + return irpc_wb + irpc_wb = winbind.winbind("irpc:winbind_server", lp) + return irpc_wb + +def rodc_dns_update(d, t, op): + '''a single DNS update via the RODC netlogon call''' + global sub_vars + + assert(op in ["add", "delete"]) + + if opts.verbose: + print("Calling netlogon RODC update for %s" % d) + + typemap = { + netlogon.NlDnsLdapAtSite : netlogon.NlDnsInfoTypeNone, + netlogon.NlDnsGcAtSite : netlogon.NlDnsDomainNameAlias, + netlogon.NlDnsDsaCname : netlogon.NlDnsDomainNameAlias, + netlogon.NlDnsKdcAtSite : netlogon.NlDnsInfoTypeNone, + netlogon.NlDnsDcAtSite : netlogon.NlDnsInfoTypeNone, + netlogon.NlDnsRfc1510KdcAtSite : netlogon.NlDnsInfoTypeNone, + netlogon.NlDnsGenericGcAtSite : netlogon.NlDnsDomainNameAlias + } + + w = cached_irpc_wb(lp) + dns_names = netlogon.NL_DNS_NAME_INFO_ARRAY() + dns_names.count = 1 + name = netlogon.NL_DNS_NAME_INFO() + name.type = t + name.dns_domain_info_type = typemap[t] + name.priority = 0 + name.weight = 0 + if d.port is not None: + name.port = int(d.port) + if op == "add": + name.dns_register = True + else: + name.dns_register = False + dns_names.names = [ name ] + site_name = sub_vars['SITE'] + + global error_count + + try: + ret_names = w.DsrUpdateReadOnlyServerDnsRecords(site_name, default_ttl, dns_names) + if ret_names.names[0].status != 0: + print("Failed to set DNS entry: %s (status %u)" % (d, ret_names.names[0].status)) + error_count = error_count + 1 + except RuntimeError as reason: + print("Error setting DNS entry of type %u: %s: %s" % (t, d, reason)) + error_count = error_count + 1 + + if opts.verbose: + print("Called netlogon RODC update for %s" % d) + + if error_count != 0 and opts.fail_immediately: + sys.exit(1) + + +def call_rodc_update(d, op="add"): + '''RODCs need to use the netlogon API for nsupdate''' + global lp, sub_vars + + assert(op in ["add", "delete"]) + + # we expect failure for 3268 if we aren't a GC + if d.port is not None and int(d.port) == 3268: + return + + # map the DNS request to a netlogon update type + map = { + netlogon.NlDnsLdapAtSite : '_ldap._tcp.${SITE}._sites.${DNSDOMAIN}', + netlogon.NlDnsGcAtSite : '_ldap._tcp.${SITE}._sites.gc._msdcs.${DNSDOMAIN}', + netlogon.NlDnsDsaCname : '${NTDSGUID}._msdcs.${DNSFOREST}', + netlogon.NlDnsKdcAtSite : '_kerberos._tcp.${SITE}._sites.dc._msdcs.${DNSDOMAIN}', + netlogon.NlDnsDcAtSite : '_ldap._tcp.${SITE}._sites.dc._msdcs.${DNSDOMAIN}', + netlogon.NlDnsRfc1510KdcAtSite : '_kerberos._tcp.${SITE}._sites.${DNSDOMAIN}', + netlogon.NlDnsGenericGcAtSite : '_gc._tcp.${SITE}._sites.${DNSFOREST}' + } + + for t in map: + subname = samba.substitute_var(map[t], sub_vars) + if subname.lower() == d.name.lower(): + # found a match - do the update + rodc_dns_update(d, t, op) + return + if opts.verbose: + print("Unable to map to netlogon DNS update: %s" % d) + + +# get the list of DNS entries we should have +dns_update_list = opts.update_list or lp.private_path('dns_update_list') + +dns_update_cache = opts.update_cache or lp.private_path('dns_update_cache') + +krb5conf = None +# only change the krb5.conf if we are not in selftest +if 'SOCKET_WRAPPER_DIR' not in os.environ: + # use our private krb5.conf to avoid problems with the wrong domain + # bind9 nsupdate wants the default domain set + krb5conf = lp.private_path('krb5.conf') + os.environ['KRB5_CONFIG'] = krb5conf + +try: + file = open(dns_update_list, "r") +except OSError as e: + if opts.update_cache: + print("The specified update list does not exist") + else: + print("The server update list was not found, " + "and --update-list was not provided.") + print(e) + print() + parser.print_usage() + sys.exit(1) + +if opts.nosubs: + sub_vars = {} +else: + samdb = SamDB(url=lp.samdb_url(), session_info=system_session(), lp=lp) + + # get the substitution dictionary + sub_vars = get_subst_vars(samdb) + +# build up a list of update commands to pass to nsupdate +update_list = [] +dns_list = [] +cache_list = [] +delete_list = [] + +dup_set = set() +cache_set = set() + +rebuild_cache = False +try: + cfile = open(dns_update_cache, 'r+') +except IOError: + # Perhaps create it + open(dns_update_cache, 'w+') + # Open it for reading again, in case someone else got to it first + cfile = open(dns_update_cache, 'r+') +fcntl.lockf(cfile, fcntl.LOCK_EX) +for line in cfile: + line = line.strip() + if line == '' or line[0] == "#": + continue + c = parse_dns_line(line, {}) + if c is None: + continue + if str(c) not in cache_set: + cache_list.append(c) + cache_set.add(str(c)) + +site_specific_rec = [] + +# read each line, and check that the DNS name exists +for line in file: + line = line.strip() + + if '${SITE}' in line: + site_specific_rec.append(line) + + if line == '' or line[0] == "#": + continue + d = parse_dns_line(line, sub_vars) + if d is None: + continue + if d.type == 'A' and len(IP4s) == 0: + continue + if d.type == 'AAAA' and len(IP6s) == 0: + continue + if str(d) not in dup_set: + dns_list.append(d) + dup_set.add(str(d)) + +# Perform automatic site coverage by default +auto_coverage = True + +if not am_rodc and auto_coverage: + site_names = kcc_utils.uncovered_sites_to_cover(samdb, + samdb.server_site_name()) + + # Duplicate all site specific records for the uncovered site + for site in site_names: + to_add = [samba.substitute_var(line, {'SITE': site}) + for line in site_specific_rec] + + for site_line in to_add: + d = parse_dns_line(site_line, + sub_vars=sub_vars) + if d is not None and str(d) not in dup_set: + dns_list.append(d) + dup_set.add(str(d)) + +# now expand the entries, if any are A record with ip set to $IP +# then replace with multiple entries, one for each interface IP +for d in dns_list: + if d.ip != "$IP": + continue + if d.type == 'A': + d.ip = IP4s[0] + for i in range(len(IP4s)-1): + d2 = dnsobj(str(d)) + d2.ip = IP4s[i+1] + dns_list.append(d2) + if d.type == 'AAAA': + d.ip = IP6s[0] + for i in range(len(IP6s)-1): + d2 = dnsobj(str(d)) + d2.ip = IP6s[i+1] + dns_list.append(d2) + +# now check if the entries already exist on the DNS server +for d in dns_list: + found = False + for c in cache_list: + if str(c).lower() == str(d).lower(): + found = True + break + if not found: + rebuild_cache = True + if opts.verbose: + print("need cache add: %s" % d) + if dns_zone_scavenging: + update_list.append(d) + if opts.verbose: + print("scavenging requires update: %s" % d) + elif opts.all_names: + update_list.append(d) + if opts.verbose: + print("force update: %s" % d) + elif not check_dns_name(d): + update_list.append(d) + if opts.verbose: + print("need update: %s" % d) + +for c in cache_list: + found = False + for d in dns_list: + if str(c).lower() == str(d).lower(): + found = True + break + if found: + continue + rebuild_cache = True + if opts.verbose: + print("need cache remove: %s" % c) + if not opts.all_names and not check_dns_name(c): + continue + delete_list.append(c) + if opts.verbose: + print("need delete: %s" % c) + +if len(delete_list) == 0 and len(update_list) == 0 and not rebuild_cache: + if opts.verbose: + print("No DNS updates needed") + sys.exit(0) +else: + if opts.verbose: + print("%d DNS updates and %d DNS deletes needed" % (len(update_list), len(delete_list))) + +use_samba_tool = opts.use_samba_tool +use_nsupdate = opts.use_nsupdate +# get our krb5 creds +if (delete_list or update_list) and not opts.nocreds: + try: + creds = get_credentials(lp) + except RuntimeError as e: + ccachename = None + + if sub_vars['IF_RWDNS_DOMAIN'] == "# ": + raise + + if use_nsupdate: + raise + + print("Failed to get Kerberos credentials, falling back to samba-tool: %s" % e) + use_samba_tool = True + + +# ask nsupdate to delete entries as needed +for d in delete_list: + if d.rpc or (not use_nsupdate and use_samba_tool): + if opts.verbose: + print("delete (samba-tool): %s" % d) + call_samba_tool(d, op="delete", zone=d.zone) + + elif am_rodc: + if d.name.lower() == domain.lower(): + if opts.verbose: + print("skip delete (rodc): %s" % d) + continue + if not d.type in [ 'A', 'AAAA' ]: + if opts.verbose: + print("delete (rodc): %s" % d) + call_rodc_update(d, op="delete") + else: + if opts.verbose: + print("delete (nsupdate): %s" % d) + call_nsupdate(d, op="delete") + else: + if opts.verbose: + print("delete (nsupdate): %s" % d) + call_nsupdate(d, op="delete") + +# ask nsupdate to add entries as needed +for d in update_list: + if d.rpc or (not use_nsupdate and use_samba_tool): + if opts.verbose: + print("update (samba-tool): %s" % d) + call_samba_tool(d, zone=d.zone) + + elif am_rodc: + if d.name.lower() == domain.lower(): + if opts.verbose: + print("skip (rodc): %s" % d) + continue + if not d.type in [ 'A', 'AAAA' ]: + if opts.verbose: + print("update (rodc): %s" % d) + call_rodc_update(d) + else: + if opts.verbose: + print("update (nsupdate): %s" % d) + call_nsupdate(d) + else: + if opts.verbose: + print("update(nsupdate): %s" % d) + call_nsupdate(d) + +if rebuild_cache: + print("Rebuilding cache at %s" % dns_update_cache) + (file_dir, file_name) = os.path.split(dns_update_cache) + (tmp_fd, tmpfile) = tempfile.mkstemp(dir=file_dir, prefix=file_name, suffix="XXXXXX") + wfile = os.fdopen(tmp_fd, 'a') + for d in dns_list: + if opts.verbose: + print("Adding %s to %s" % (str(d), file_name)) + wfile.write(str(d)+"\n") + wfile.flush() + os.rename(tmpfile, dns_update_cache) +fcntl.lockf(cfile, fcntl.LOCK_UN) + +# delete the ccache if we created it +if ccachename is not None: + os.unlink(ccachename) + +if error_count != 0: + print("Failed update of %u entries" % error_count) +sys.exit(error_count) diff --git a/source4/scripting/bin/samba_downgrade_db b/source4/scripting/bin/samba_downgrade_db new file mode 100755 index 0000000..b9a0909 --- /dev/null +++ b/source4/scripting/bin/samba_downgrade_db @@ -0,0 +1,135 @@ +#!/usr/bin/python3 +# +# Unix SMB/CIFS implementation. +# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2019 +# +# Downgrade a database from 4.11 format to 4.7 format. 4.7 Format will +# run on any version of Samba AD, and Samba will repack/reconfigure the +# database if necessary. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +import optparse +import sys + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + + +import samba +import ldb +import urllib +import os +from samba import getopt as options +from samba.samdb import SamDB +from samba.dbchecker import dbcheck +from samba.credentials import Credentials +parser = optparse.OptionParser("samba_downgrade_db") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(options.VersionOptions(parser)) +parser.add_option("-H", "--URL", help="LDB URL for database", + type=str, metavar="URL", dest="H") +opts, args = parser.parse_args() + +if len(args) != 0: + parser.print_usage() + sys.exit(1) + +lp_ctx = sambaopts.get_loadparm() + +if opts.H is None: + url = lp_ctx.private_path("sam.ldb") +else: + url = opts.H + +samdb = ldb.Ldb(url=url, + flags=ldb.FLG_DONT_CREATE_DB, + options=["modules:"]) + +partitions = samdb.search(base="@PARTITION", + scope=ldb.SCOPE_BASE, + attrs=["backendStore", "partition"]) + +backend = str(partitions[0].get('backendStore', 'tdb')) + +if backend == "mdb": + samdb = None + options = ["pack_format_override=%d" % ldb.PACKING_FORMAT] + # We can't remove GUID indexes from LMDB in case there are very + # long DNs, so we just move down the pack format, which also removes + # references to ORDERED_INTEGER in @ATTRIBUTES. + + # Reopen the DB with pack_format_override set + samdb = SamDB(url=url, + flags=ldb.FLG_DONT_CREATE_DB, + lp=lp_ctx, + options=options) + samdb.transaction_start() + samdb.transaction_commit() + print("Your database has been downgraded to LDB pack format version %0x (v1)." % ldb.PACKING_FORMAT) + + print("NOTE: Any use of a Samba 4.11 tool that modifies the DB will " + "auto-upgrade back to pack format version %0x (v2)" % + ldb.PACKING_FORMAT_V2) + exit(0); + +# This is needed to force the @ATTRIBUTES and @INDEXLIST to be correct +lp_ctx.set("dsdb:guid index", "false") + +modmsg = ldb.Message() +modmsg.dn = ldb.Dn(samdb, '@INDEXLIST') +modmsg.add(ldb.MessageElement( + elements=[], + flags=ldb.FLAG_MOD_REPLACE, + name='@IDXGUID')) +modmsg.add(ldb.MessageElement( + elements=[], + flags=ldb.FLAG_MOD_REPLACE, + name='@IDX_DN_GUID')) + +samdb.transaction_start() +samdb.modify(modmsg) + +privatedir = os.path.dirname(url) + +dbs = [] +for part in partitions[0]['partition']: + dbname = str(part).split(":")[1] + dbpath = os.path.join(privatedir, dbname) + if os.path.isfile(dbpath): + dbpath = "ldb://" + dbpath + db = ldb.Ldb(url=dbpath, + options=["modules:"], + flags=ldb.FLG_DONT_CREATE_DB) + db.transaction_start() + db.modify(modmsg) + dbs.append(db) + +for db in dbs: + db.transaction_commit() + +samdb.transaction_commit() + +print("Re-opening with the full DB stack") +samdb = SamDB(url=url, + flags=ldb.FLG_DONT_CREATE_DB, + lp=lp_ctx) +print("Re-triggering another re-index") +chk = dbcheck(samdb) + +chk.reindex_database() + +print("Your database has been downgraded to DN-based index values.") + +print("NOTE: Any use of a Samba 4.8 or later tool including ldbsearch will " + "auto-upgrade back to GUID index mode") diff --git a/source4/scripting/bin/samba_kcc b/source4/scripting/bin/samba_kcc new file mode 100755 index 0000000..67d801e --- /dev/null +++ b/source4/scripting/bin/samba_kcc @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 +# +# Compute our KCC topology +# +# Copyright (C) Dave Craft 2011 +# Copyright (C) Andrew Bartlett 2015 +# +# Andrew Bartlett's alleged work performed by his underlings Douglas +# Bagnall and Garming Sam. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import sys +import random + +# ensure we get messages out immediately, so they get in the samba logs, +# and don't get swallowed by a timeout +os.environ['PYTHONUNBUFFERED'] = '1' + +# forcing GMT avoids a problem in some timezones with kerberos. Both MIT +# heimdal can get mutual authentication errors due to the 24 second difference +# between UTC and GMT when using some zone files (eg. the PDT zone from +# the US) +os.environ["TZ"] = "GMT" + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import optparse +import time + +from samba import getopt as options + +from samba.kcc.graph_utils import verify_and_dot, list_verify_tests +from samba.kcc.graph_utils import GraphError + +import logging +from samba.kcc.debug import logger, DEBUG, DEBUG_FN +from samba.kcc import KCC + +# If DEFAULT_RNG_SEED is None, /dev/urandom or system time is used. +DEFAULT_RNG_SEED = None + + +def test_all_reps_from(kcc, dburl, lp, creds, unix_now, rng_seed=None, + ldif_file=None): + """Run the KCC from all DSAs in read-only mode + + The behaviour depends on the global opts variable which contains + command line variables. Usually you will want to run it with + opt.dot_file_dir set (via --dot-file-dir) to see the graphs that + would be created from each DC. + + :param lp: a loadparm object. + :param creds: a Credentials object. + :param unix_now: the unix epoch time as an integer + :param rng_seed: a seed for the random number generator + :return None: + """ + # This implies readonly and attempt_live_connections + dsas = kcc.list_dsas() + samdb = kcc.samdb + needed_parts = {} + current_parts = {} + + guid_to_dnstr = {} + for site in kcc.site_table.values(): + guid_to_dnstr.update((str(dsa.dsa_guid), dnstr) + for dnstr, dsa in site.dsa_table.items()) + + dot_edges = [] + dot_vertices = [] + colours = [] + vertex_colours = [] + + for dsa_dn in dsas: + if rng_seed is not None: + random.seed(rng_seed) + kcc = KCC(unix_now, readonly=True, + verify=opts.verify, debug=opts.debug, + dot_file_dir=opts.dot_file_dir) + if ldif_file is not None: + try: + # The dburl in this case is a temporary database. + # Its non-existence is ensured at the script startup. + # If it exists, it is from a previous iteration of + # this loop -- unless we're in an unfortunate race. + # Because this database is temporary, it lacks some + # detail and needs to be re-created anew to set the + # local dsa. + os.unlink(dburl) + except OSError: + pass + + kcc.import_ldif(dburl, lp, ldif_file, dsa_dn) + else: + kcc.samdb = samdb + kcc.run(dburl, lp, creds, forced_local_dsa=dsa_dn, + forget_local_links=opts.forget_local_links, + forget_intersite_links=opts.forget_intersite_links, + attempt_live_connections=opts.attempt_live_connections) + + current, needed = kcc.my_dsa.get_rep_tables() + + for dsa in kcc.my_site.dsa_table.values(): + if dsa is kcc.my_dsa: + continue + kcc.translate_ntdsconn(dsa) + c, n = dsa.get_rep_tables() + current.update(c) + needed.update(n) + + for name, rep_table, rep_parts in ( + ('needed', needed, needed_parts), + ('current', current, current_parts)): + for part, nc_rep in rep_table.items(): + edges = rep_parts.setdefault(part, []) + for reps_from in nc_rep.rep_repsFrom: + source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)] + dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)] + edges.append((source, dest)) + + for site in kcc.site_table.values(): + for dsa in site.dsa_table.values(): + if dsa.is_ro(): + vertex_colours.append('#cc0000') + else: + vertex_colours.append('#0000cc') + dot_vertices.append(dsa.dsa_dnstr) + if dsa.connect_table: + DEBUG_FN("DSA %s %s connections:\n%s" % + (dsa.dsa_dnstr, len(dsa.connect_table), + [x.from_dnstr for x in + dsa.connect_table.values()])) + for con in dsa.connect_table.values(): + if con.is_rodc_topology(): + colours.append('red') + else: + colours.append('blue') + dot_edges.append((con.from_dnstr, dsa.dsa_dnstr)) + + verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices, + label="all dsa NTDSConnections", properties=(), + debug=DEBUG, verify=opts.verify, + dot_file_dir=opts.dot_file_dir, + directed=True, edge_colors=colours, + vertex_colors=vertex_colours) + + for name, rep_parts in (('needed', needed_parts), + ('current', current_parts)): + for part, edges in rep_parts.items(): + verify_and_dot('all-repsFrom_%s__%s' % (name, part), edges, + directed=True, label=part, + properties=(), debug=DEBUG, verify=opts.verify, + dot_file_dir=opts.dot_file_dir) + +################################################## +# samba_kcc entry point +################################################## + + +parser = optparse.OptionParser("samba_kcc [options]") +sambaopts = options.SambaOptions(parser) +credopts = options.CredentialsOptions(parser) + +parser.add_option_group(sambaopts) +parser.add_option_group(credopts) +parser.add_option_group(options.VersionOptions(parser)) + +parser.add_option("--readonly", default=False, + help="compute topology but do not update database", + action="store_true") + +parser.add_option("--debug", + help="debug output", + action="store_true") + +parser.add_option("--verify", + help="verify that assorted invariants are kept", + action="store_true") + +parser.add_option("--list-verify-tests", + help=("list what verification actions are available " + "and do nothing else"), + action="store_true") + +parser.add_option("--dot-file-dir", default=None, + help="Write Graphviz .dot files to this directory") + +parser.add_option("--seed", + help="random number seed", + type=int, default=DEFAULT_RNG_SEED) + +parser.add_option("--importldif", + help="import topology ldif file", + type=str, metavar="<file>") + +parser.add_option("--exportldif", + help="export topology ldif file", + type=str, metavar="<file>") + +parser.add_option("-H", "--URL", + help="LDB URL for database or target server", + type=str, metavar="<URL>", dest="dburl") + +parser.add_option("--tmpdb", + help="schemaless database file to create for ldif import", + type=str, metavar="<file>") + +parser.add_option("--now", + help=("assume current time is this ('YYYYmmddHHMMSS[tz]'," + " default: system time)"), + type=str, metavar="<date>") + +parser.add_option("--forced-local-dsa", + help="run calculations assuming the DSA is this DN", + type=str, metavar="<DSA>") + +parser.add_option("--attempt-live-connections", default=False, + help="Attempt to connect to other DSAs to test links", + action="store_true") + +parser.add_option("--list-valid-dsas", default=False, + help=("Print a list of DSA dnstrs that could be" + " used in --forced-local-dsa"), + action="store_true") + +parser.add_option("--test-all-reps-from", default=False, + help="Create and verify a graph of reps-from for every DSA", + action="store_true") + +parser.add_option("--forget-local-links", default=False, + help="pretend not to know the existing local topology", + action="store_true") + +parser.add_option("--forget-intersite-links", default=False, + help="pretend not to know the existing intersite topology", + action="store_true") + +opts, args = parser.parse_args() + + +if opts.list_verify_tests: + list_verify_tests() + sys.exit(0) + +if opts.test_all_reps_from: + opts.readonly = True + +if opts.debug: + logger.setLevel(logging.DEBUG) +elif opts.readonly: + logger.setLevel(logging.INFO) +else: + logger.setLevel(logging.WARNING) + +random.seed(opts.seed) + +if opts.now: + for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"): + try: + now_tuple = time.strptime(opts.now, timeformat) + break + except ValueError: + pass + else: + # else happens if break doesn't --> no match + print("could not parse time '%s'" % (opts.now), file = sys.stderr) + sys.exit(1) + unix_now = int(time.mktime(now_tuple)) +else: + unix_now = int(time.time()) + +lp = sambaopts.get_loadparm() +# only log warnings/errors by default, unless the user has specified otherwise +if opts.debug is None: + lp.set('log level', '1') + +creds = credopts.get_credentials(lp, fallback_machine=True) + +if opts.dburl is None: + if opts.importldif: + opts.dburl = opts.tmpdb + else: + opts.dburl = lp.samdb_url() +elif opts.importldif: + logger.error("Don't use -H/--URL with --importldif, use --tmpdb instead") + sys.exit(1) + +# Instantiate Knowledge Consistency Checker and perform run +kcc = KCC(unix_now, readonly=opts.readonly, verify=opts.verify, + debug=opts.debug, dot_file_dir=opts.dot_file_dir) + +if opts.exportldif: + rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif) + sys.exit(rc) + +if opts.importldif: + if opts.tmpdb is None or opts.tmpdb.startswith('ldap'): + logger.error("Specify a target temp database file with --tmpdb option") + sys.exit(1) + if os.path.exists(opts.tmpdb): + logger.error("The temp database file (%s) specified with --tmpdb " + "already exists. We refuse to clobber it." % opts.tmpdb) + sys.exit(1) + + rc = kcc.import_ldif(opts.tmpdb, lp, opts.importldif, + forced_local_dsa=opts.forced_local_dsa) + if rc != 0: + sys.exit(rc) + + +kcc.load_samdb(opts.dburl, lp, creds, force=False) + +if opts.test_all_reps_from: + test_all_reps_from(kcc, opts.dburl, lp, creds, unix_now, + rng_seed=opts.seed, + ldif_file=opts.importldif) + sys.exit() + +if opts.list_valid_dsas: + print('\n'.join(kcc.list_dsas())) + sys.exit() + +try: + rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa, + opts.forget_local_links, opts.forget_intersite_links, + attempt_live_connections=opts.attempt_live_connections) + sys.exit(rc) + +except GraphError as e: + print( e) + sys.exit(1) diff --git a/source4/scripting/bin/samba_spnupdate b/source4/scripting/bin/samba_spnupdate new file mode 100755 index 0000000..84ff771 --- /dev/null +++ b/source4/scripting/bin/samba_spnupdate @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 +# +# update our servicePrincipalName names from spn_update_list +# +# Copyright (C) Andrew Tridgell 2010 +# Copyright (C) Matthieu Patou <mat@matws.net> 2012 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +import os, sys, re + +# ensure we get messages out immediately, so they get in the samba logs, +# and don't get swallowed by a timeout +os.environ['PYTHONUNBUFFERED'] = '1' + +# forcing GMT avoids a problem in some timezones with kerberos. Both MIT +# heimdal can get mutual authentication errors due to the 24 second difference +# between UTC and GMT when using some zone files (eg. the PDT zone from +# the US) +os.environ["TZ"] = "GMT" + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import samba, ldb +import optparse +from samba import Ldb +from samba import getopt as options +from samba.auth import system_session +from samba.samdb import SamDB +from samba.credentials import Credentials, DONT_USE_KERBEROS +from samba.common import get_string + +parser = optparse.OptionParser("samba_spnupdate") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +parser.add_option("--verbose", action="store_true") + +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) + +ccachename = None + +opts, args = parser.parse_args() + +if len(args) != 0: + parser.print_usage() + sys.exit(1) + +lp = sambaopts.get_loadparm() +creds = credopts.get_credentials(lp) + +domain = lp.get("realm") +host = lp.get("netbios name") + + +# get the list of substitution vars +def get_subst_vars(samdb): + global lp + vars = {} + + vars['DNSDOMAIN'] = samdb.domain_dns_name() + vars['DNSFOREST'] = samdb.forest_dns_name() + vars['HOSTNAME'] = samdb.host_dns_name() + vars['NETBIOSNAME'] = lp.get('netbios name').upper() + vars['WORKGROUP'] = lp.get('workgroup') + vars['NTDSGUID'] = samdb.get_ntds_GUID() + res = samdb.search(base=samdb.get_default_basedn(), scope=ldb.SCOPE_BASE, attrs=["objectGUID"]) + guid = samdb.schema_format_value("objectGUID", res[0]['objectGUID'][0]) + vars['DOMAINGUID'] = get_string(guid) + return vars + +try: + private_dir = lp.get("private dir") + secrets_path = os.path.join(private_dir, "secrets.ldb") + + secrets_db = Ldb(url=secrets_path, session_info=system_session(), + credentials=creds, lp=lp) + res = secrets_db.search(base=None, + expression="(&(objectclass=ldapSecret)(cn=SAMDB Credentials))", + attrs=["samAccountName", "secret"]) + + if len(res) == 1: + credentials = Credentials() + credentials.set_kerberos_state(DONT_USE_KERBEROS) + + if "samAccountName" in res[0]: + credentials.set_username(res[0]["samAccountName"][0]) + + if "secret" in res[0]: + credentials.set_password(res[0]["secret"][0]) + + else: + credentials = None + + samdb = SamDB(url=lp.samdb_url(), session_info=system_session(), credentials=credentials, lp=lp) +except ldb.LdbError as e: + (num, msg) = e.args + print("Unable to open sam database %s : %s" % (lp.samdb_url(), msg)) + sys.exit(1) + + +# get the substitution dictionary +sub_vars = get_subst_vars(samdb) + +# get the list of SPN entries we should have +spn_update_list = lp.private_path('spn_update_list') + +file = open(spn_update_list, "r") + +spn_list = [] + +has_forest_dns = False +has_domain_dns = False +# check if we "are DNS server" +res = samdb.search(base=samdb.get_config_basedn(), + expression='(objectguid=%s)' % sub_vars['NTDSGUID'], + attrs=["msDS-hasMasterNCs"]) + +basedn = str(samdb.get_default_basedn()) +if len(res) == 1: + if "msDS-hasMasterNCs" in res[0]: + for e in res[0]["msDS-hasMasterNCs"]: + if str(e) == "DC=DomainDnsZones,%s" % basedn: + has_domain_dns = True + if str(e) == "DC=ForestDnsZones,%s" % basedn: + has_forest_dns = True + + +# build the spn list +for line in file: + line = line.strip() + if line == '' or line[0] == "#": + continue + if re.match(r".*/DomainDnsZones\..*", line) and not has_domain_dns: + continue + if re.match(r".*/ForestDnsZones\..*", line) and not has_forest_dns: + continue + line = samba.substitute_var(line, sub_vars) + spn_list.append(line) + +# get the current list of SPNs in our sam +res = samdb.search(base=samdb.get_default_basedn(), + expression='(&(objectClass=computer)(samaccountname=%s$))' % sub_vars['NETBIOSNAME'], + attrs=["servicePrincipalName"]) +if not res or len(res) != 1: + print("Failed to find computer object for %s$" % sub_vars['NETBIOSNAME']) + sys.exit(1) + +machine_dn = res[0]["dn"] + +old_spns = [] +if "servicePrincipalName" in res[0]: + for s in res[0]["servicePrincipalName"]: + old_spns.append(str(s)) + +if opts.verbose: + print("Existing SPNs: %s" % old_spns) + +add_list = [] + +# work out what needs to be added +for s in spn_list: + in_list = False + for s2 in old_spns: + if s2.upper() == s.upper(): + in_list = True + break + if not in_list: + add_list.append(s) + +if opts.verbose: + print("New SPNs: %s" % add_list) + +if add_list == []: + if opts.verbose: + print("Nothing to add") + sys.exit(0) + +def local_update(add_list): + '''store locally''' + global res + msg = ldb.Message() + msg.dn = res[0]['dn'] + msg[""] = ldb.MessageElement(add_list, + ldb.FLAG_MOD_ADD, "servicePrincipalName") + res = samdb.modify(msg) + +def call_rodc_update(d): + '''RODCs need to use the writeSPN DRS call''' + global lp, sub_vars + from samba import drs_utils + from samba.dcerpc import drsuapi, nbt + from samba.net import Net + + if opts.verbose: + print("Using RODC SPN update") + + creds = credopts.get_credentials(lp) + creds.set_machine_account(lp) + + net = Net(creds=creds, lp=lp) + try: + cldap_ret = net.finddc(domain=domain, flags=nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE) + except Exception as reason: + print("Unable to find writeable DC for domain '%s' to send DRS writeSPN to : %s" % (domain, reason)) + sys.exit(1) + server = cldap_ret.pdc_dns_name + try: + binding_options = "seal" + if lp.log_level() >= 5: + binding_options += ",print" + drs = drsuapi.drsuapi('ncacn_ip_tcp:%s[%s]' % (server, binding_options), lp, creds) + (drs_handle, supported_extensions) = drs_utils.drs_DsBind(drs) + except Exception as reason: + print("Unable to connect to DC '%s' for domain '%s' : %s" % (server, domain, reason)) + sys.exit(1) + req1 = drsuapi.DsWriteAccountSpnRequest1() + req1.operation = drsuapi.DRSUAPI_DS_SPN_OPERATION_ADD + req1.object_dn = str(machine_dn) + req1.count = 0 + spn_names = [] + for n in add_list: + if n.find('E3514235-4B06-11D1-AB04-00C04FC2DCD2') != -1: + # this one isn't allowed for RODCs, but we don't know why yet + continue + ns = drsuapi.DsNameString() + ns.str = n + spn_names.append(ns) + req1.count = req1.count + 1 + if spn_names == []: + return + req1.spn_names = spn_names + (level, res) = drs.DsWriteAccountSpn(drs_handle, 1, req1) + if (res.status != (0, 'WERR_OK')): + print("WriteAccountSpn has failed with error %s" % str(res.status)) + +if samdb.am_rodc(): + call_rodc_update(add_list) +else: + local_update(add_list) diff --git a/source4/scripting/bin/samba_upgradedns b/source4/scripting/bin/samba_upgradedns new file mode 100755 index 0000000..afc5807 --- /dev/null +++ b/source4/scripting/bin/samba_upgradedns @@ -0,0 +1,589 @@ +#!/usr/bin/env python3 +# +# Unix SMB/CIFS implementation. +# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2012 +# +# Upgrade DNS provision from BIND9_FLATFILE to BIND9_DLZ or SAMBA_INTERNAL +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import sys +import os +import errno +import optparse +import logging +import grp +from base64 import b64encode +import shlex + +sys.path.insert(0, "bin/python") + +import ldb +import samba +from samba import param +from samba.auth import system_session +from samba.ndr import ( + ndr_pack, + ndr_unpack ) +import samba.getopt as options +from samba.upgradehelpers import ( + get_paths, + get_ldbs ) +from samba.dsdb import DS_DOMAIN_FUNCTION_2003 +from samba.provision import ( + find_provision_key_parameters, + interface_ips_v4, + interface_ips_v6 ) +from samba.provision.common import ( + setup_path, + setup_add_ldif, + FILL_FULL) +from samba.provision.sambadns import ( + ARecord, + AAAARecord, + CNAMERecord, + NSRecord, + SOARecord, + SRVRecord, + TXTRecord, + get_dnsadmins_sid, + add_dns_accounts, + create_dns_partitions, + fill_dns_data_partitions, + create_dns_dir, + secretsdb_setup_dns, + create_dns_dir_keytab_link, + create_samdb_copy, + create_named_conf, + create_named_txt ) +from samba.dcerpc import security + +import dns.zone, dns.rdatatype + +__docformat__ = 'restructuredText' + + +def find_bind_gid(): + """Find system group id for bind9 + """ + for name in ["bind", "named"]: + try: + return grp.getgrnam(name)[2] + except KeyError: + pass + return None + + +def convert_dns_rdata(rdata, serial=1): + """Convert resource records in dnsRecord format + """ + if rdata.rdtype == dns.rdatatype.A: + rec = ARecord(rdata.address, serial=serial) + elif rdata.rdtype == dns.rdatatype.AAAA: + rec = AAAARecord(rdata.address, serial=serial) + elif rdata.rdtype == dns.rdatatype.CNAME: + rec = CNAMERecord(rdata.target.to_text(), serial=serial) + elif rdata.rdtype == dns.rdatatype.NS: + rec = NSRecord(rdata.target.to_text(), serial=serial) + elif rdata.rdtype == dns.rdatatype.SRV: + rec = SRVRecord(rdata.target.to_text(), int(rdata.port), + priority=int(rdata.priority), weight=int(rdata.weight), + serial=serial) + elif rdata.rdtype == dns.rdatatype.TXT: + slist = shlex.split(rdata.to_text()) + rec = TXTRecord(slist, serial=serial) + elif rdata.rdtype == dns.rdatatype.SOA: + rec = SOARecord(rdata.mname.to_text(), rdata.rname.to_text(), + serial=int(rdata.serial), + refresh=int(rdata.refresh), retry=int(rdata.retry), + expire=int(rdata.expire), minimum=int(rdata.minimum)) + else: + rec = None + return rec + + +def import_zone_data(samdb, logger, zone, serial, domaindn, forestdn, + dnsdomain, dnsforest): + """Insert zone data in DNS partitions + """ + labels = dnsdomain.split('.') + labels.append('') + domain_root = dns.name.Name(labels) + domain_prefix = "DC=%s,CN=MicrosoftDNS,DC=DomainDnsZones,%s" % (dnsdomain, + domaindn) + + tmp = "_msdcs.%s" % dnsforest + labels = tmp.split('.') + labels.append('') + forest_root = dns.name.Name(labels) + dnsmsdcs = "_msdcs.%s" % dnsforest + forest_prefix = "DC=%s,CN=MicrosoftDNS,DC=ForestDnsZones,%s" % (dnsmsdcs, + forestdn) + + # Extract @ record + at_record = zone.get_node(domain_root) + zone.delete_node(domain_root) + + # SOA record + rdset = at_record.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA) + soa_rec = ndr_pack(convert_dns_rdata(rdset[0])) + at_record.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA) + + # NS record + rdset = at_record.get_rdataset(dns.rdataclass.IN, dns.rdatatype.NS) + ns_rec = ndr_pack(convert_dns_rdata(rdset[0])) + at_record.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.NS) + + # A/AAAA records + ip_recs = [] + for rdset in at_record: + for r in rdset: + rec = convert_dns_rdata(r) + ip_recs.append(ndr_pack(rec)) + + # Add @ record for domain + dns_rec = [soa_rec, ns_rec] + ip_recs + msg = ldb.Message(ldb.Dn(samdb, 'DC=@,%s' % domain_prefix)) + msg["objectClass"] = ["top", "dnsNode"] + msg["dnsRecord"] = ldb.MessageElement(dns_rec, ldb.FLAG_MOD_ADD, + "dnsRecord") + try: + samdb.add(msg) + except Exception: + logger.error("Failed to add @ record for domain") + raise + logger.debug("Added @ record for domain") + + # Add @ record for forest + dns_rec = [soa_rec, ns_rec] + msg = ldb.Message(ldb.Dn(samdb, 'DC=@,%s' % forest_prefix)) + msg["objectClass"] = ["top", "dnsNode"] + msg["dnsRecord"] = ldb.MessageElement(dns_rec, ldb.FLAG_MOD_ADD, + "dnsRecord") + try: + samdb.add(msg) + except Exception: + logger.error("Failed to add @ record for forest") + raise + logger.debug("Added @ record for forest") + + # Add remaining records in domain and forest + for node in zone.nodes: + name = node.relativize(forest_root).to_text() + if name == node.to_text(): + name = node.relativize(domain_root).to_text() + dn = "DC=%s,%s" % (name, domain_prefix) + fqdn = "%s.%s" % (name, dnsdomain) + else: + dn = "DC=%s,%s" % (name, forest_prefix) + fqdn = "%s.%s" % (name, dnsmsdcs) + + dns_rec = [] + for rdataset in zone.nodes[node]: + for rdata in rdataset: + rec = convert_dns_rdata(rdata, serial) + if not rec: + logger.warn("Unsupported record type (%s) for %s, ignoring" % + dns.rdatatype.to_text(rdata.rdatatype), name) + else: + dns_rec.append(ndr_pack(rec)) + + msg = ldb.Message(ldb.Dn(samdb, dn)) + msg["objectClass"] = ["top", "dnsNode"] + msg["dnsRecord"] = ldb.MessageElement(dns_rec, ldb.FLAG_MOD_ADD, + "dnsRecord") + try: + samdb.add(msg) + except Exception: + logger.error("Failed to add DNS record %s" % (fqdn)) + raise + logger.debug("Added DNS record %s" % (fqdn)) + +def cleanup_remove_file(file_path): + try: + os.remove(file_path) + except OSError as e: + if e.errno not in [errno.EEXIST, errno.ENOENT]: + pass + else: + logger.debug("Could not remove %s: %s" % (file_path, e.strerror)) + +def cleanup_remove_dir(dir_path): + try: + for root, dirs, files in os.walk(dir_path, topdown=False): + for name in files: + os.remove(os.path.join(root, name)) + for name in dirs: + os.rmdir(os.path.join(root, name)) + os.rmdir(dir_path) + except OSError as e: + if e.errno not in [errno.EEXIST, errno.ENOENT]: + pass + else: + logger.debug("Could not delete dir %s: %s" % (dir_path, e.strerror)) + +def cleanup_obsolete_dns_files(paths): + cleanup_remove_file(os.path.join(paths.private_dir, "named.conf")) + cleanup_remove_file(os.path.join(paths.private_dir, "named.conf.update")) + cleanup_remove_file(os.path.join(paths.private_dir, "named.txt")) + + cleanup_remove_dir(os.path.join(paths.private_dir, "dns")) + + +# dnsprovision creates application partitions for AD based DNS mainly if the existing +# provision was created using earlier snapshots of samba4 which did not have support +# for DNS partitions + +if __name__ == '__main__': + + # Setup command line parser + parser = optparse.OptionParser("samba_upgradedns [options]") + sambaopts = options.SambaOptions(parser) + credopts = options.CredentialsOptions(parser) + + parser.add_option_group(options.VersionOptions(parser)) + parser.add_option_group(sambaopts) + parser.add_option_group(credopts) + + parser.add_option("--dns-backend", type="choice", metavar="<BIND9_DLZ|SAMBA_INTERNAL>", + choices=["SAMBA_INTERNAL", "BIND9_DLZ"], default="SAMBA_INTERNAL", + help="The DNS server backend, default SAMBA_INTERNAL") + parser.add_option("--migrate", type="choice", metavar="<yes|no>", + choices=["yes","no"], default="yes", + help="Migrate existing zone data, default yes") + parser.add_option("--verbose", help="Be verbose", action="store_true") + + opts = parser.parse_args()[0] + + if opts.dns_backend is None: + opts.dns_backend = 'SAMBA_INTERNAL' + + if opts.migrate: + autofill = False + else: + autofill = True + + # Set up logger + logger = logging.getLogger("upgradedns") + logger.addHandler(logging.StreamHandler(sys.stdout)) + logger.setLevel(logging.INFO) + if opts.verbose: + logger.setLevel(logging.DEBUG) + + lp = sambaopts.get_loadparm() + lp.load(lp.configfile) + creds = credopts.get_credentials(lp) + + logger.info("Reading domain information") + paths = get_paths(param, smbconf=lp.configfile) + paths.bind_gid = find_bind_gid() + ldbs = get_ldbs(paths, creds, system_session(), lp) + names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap, + paths, lp.configfile, lp) + + if names.domainlevel < DS_DOMAIN_FUNCTION_2003: + logger.error("Cannot create AD based DNS for OS level < 2003") + sys.exit(1) + + domaindn = names.domaindn + forestdn = names.rootdn + + dnsdomain = names.dnsdomain.lower() + dnsforest = dnsdomain + + site = names.sitename + hostname = names.hostname + dnsname = '%s.%s' % (hostname, dnsdomain) + + domainsid = names.domainsid + domainguid = names.domainguid + ntdsguid = names.ntdsguid + + # Check for DNS accounts and create them if required + try: + msg = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT, + expression='(sAMAccountName=DnsAdmins)', + attrs=['objectSid']) + dnsadmins_sid = ndr_unpack(security.dom_sid, msg[0]['objectSid'][0]) + except IndexError: + logger.info("Adding DNS accounts") + add_dns_accounts(ldbs.sam, domaindn) + dnsadmins_sid = get_dnsadmins_sid(ldbs.sam, domaindn) + else: + logger.info("DNS accounts already exist") + + # Import dns records from zone file + if os.path.exists(paths.dns): + logger.info("Reading records from zone file %s" % paths.dns) + try: + zone = dns.zone.from_file(paths.dns, relativize=False) + rrset = zone.get_rdataset("%s." % dnsdomain, dns.rdatatype.SOA) + serial = int(rrset[0].serial) + except Exception as e: + logger.warn("Error parsing DNS data from '%s' (%s)" % (paths.dns, str(e))) + autofill = True + else: + logger.info("No zone file %s (normal)" % paths.dns) + autofill = True + + # Create DNS partitions if missing and fill DNS information + try: + expression = '(|(dnsRoot=DomainDnsZones.%s)(dnsRoot=ForestDnsZones.%s))' % \ + (dnsdomain, dnsforest) + msg = ldbs.sam.search(base=names.configdn, scope=ldb.SCOPE_DEFAULT, + expression=expression, attrs=['nCName']) + ncname = msg[0]['nCName'][0] + except IndexError: + logger.info("Creating DNS partitions") + + logger.info("Looking up IPv4 addresses") + hostip = interface_ips_v4(lp) + try: + hostip.remove('127.0.0.1') + except ValueError: + pass + if not hostip: + logger.error("No IPv4 addresses found") + sys.exit(1) + else: + hostip = hostip[0] + logger.debug("IPv4 addresses: %s" % hostip) + + logger.info("Looking up IPv6 addresses") + hostip6 = interface_ips_v6(lp) + if not hostip6: + hostip6 = None + else: + hostip6 = hostip6[0] + logger.debug("IPv6 addresses: %s" % hostip6) + + create_dns_partitions(ldbs.sam, domainsid, names, domaindn, forestdn, + dnsadmins_sid, FILL_FULL) + + logger.info("Populating DNS partitions") + if autofill: + logger.warn("DNS records will be automatically created") + + fill_dns_data_partitions(ldbs.sam, domainsid, site, domaindn, forestdn, + dnsdomain, dnsforest, hostname, hostip, hostip6, + domainguid, ntdsguid, dnsadmins_sid, + autofill=autofill) + + if not autofill: + logger.info("Importing records from zone file") + import_zone_data(ldbs.sam, logger, zone, serial, domaindn, forestdn, + dnsdomain, dnsforest) + else: + logger.info("DNS partitions already exist") + + # Mark that we are hosting DNS partitions + try: + dns_nclist = [ 'DC=DomainDnsZones,%s' % domaindn, + 'DC=ForestDnsZones,%s' % forestdn ] + + msgs = ldbs.sam.search(base=names.serverdn, scope=ldb.SCOPE_DEFAULT, + expression='(objectclass=nTDSDSa)', + attrs=['hasPartialReplicaNCs', + 'msDS-hasMasterNCs']) + msg = msgs[0] + + master_nclist = [] + ncs = msg.get("msDS-hasMasterNCs") + if ncs: + for nc in ncs: + master_nclist.append(str(nc)) + + partial_nclist = [] + ncs = msg.get("hasPartialReplicaNCs") + if ncs: + for nc in ncs: + partial_nclist.append(str(nc)) + + modified_master = False + modified_partial = False + + for nc in dns_nclist: + if nc not in master_nclist: + master_nclist.append(nc) + modified_master = True + if nc in partial_nclist: + partial_nclist.remove(nc) + modified_partial = True + + if modified_master or modified_partial: + logger.debug("Updating msDS-hasMasterNCs and hasPartialReplicaNCs attributes") + m = ldb.Message() + m.dn = msg.dn + if modified_master: + m["msDS-hasMasterNCs"] = ldb.MessageElement(master_nclist, + ldb.FLAG_MOD_REPLACE, + "msDS-hasMasterNCs") + if modified_partial: + if partial_nclist: + m["hasPartialReplicaNCs"] = ldb.MessageElement(partial_nclist, + ldb.FLAG_MOD_REPLACE, + "hasPartialReplicaNCs") + else: + m["hasPartialReplicaNCs"] = ldb.MessageElement(ncs, + ldb.FLAG_MOD_DELETE, + "hasPartialReplicaNCs") + ldbs.sam.modify(m) + except Exception: + raise + + # Special stuff for DLZ backend + if opts.dns_backend == "BIND9_DLZ": + config_migration = False + + if (paths.private_dir != paths.binddns_dir and + os.path.isfile(os.path.join(paths.private_dir, "named.conf"))): + config_migration = True + + # Check if dns-HOSTNAME account exists and create it if required + secrets_msgs = ldbs.secrets.search(expression='(samAccountName=dns-%s)' % hostname, attrs=['secret']) + msg = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT, + expression='(sAMAccountName=dns-%s)' % (hostname), + attrs=[]) + + if len(secrets_msgs) == 0 or len(msg) == 0: + logger.info("Adding dns-%s account" % hostname) + + if len(secrets_msgs) == 1: + dn = secrets_msgs[0].dn + ldbs.secrets.delete(dn) + + if len(msg) == 1: + dn = msg[0].dn + ldbs.sam.delete(dn) + + dnspass = samba.generate_random_password(128, 255) + setup_add_ldif(ldbs.sam, setup_path("provision_dns_add_samba.ldif"), { + "DNSDOMAIN": dnsdomain, + "DOMAINDN": domaindn, + "DNSPASS_B64": b64encode(dnspass.encode('utf-16-le')).decode('utf8'), + "HOSTNAME" : hostname, + "DNSNAME" : dnsname } + ) + + res = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT, + expression='(sAMAccountName=dns-%s)' % (hostname), + attrs=["msDS-KeyVersionNumber"]) + if "msDS-KeyVersionNumber" in res[0]: + dns_key_version_number = int(res[0]["msDS-KeyVersionNumber"][0]) + else: + dns_key_version_number = None + + secretsdb_setup_dns(ldbs.secrets, names, + paths.private_dir, paths.binddns_dir, realm=names.realm, + dnsdomain=names.dnsdomain, + dns_keytab_path=paths.dns_keytab, dnspass=dnspass, + key_version_number=dns_key_version_number) + + else: + logger.info("dns-%s account already exists" % hostname) + + if not os.path.exists(paths.binddns_dir): + # This directory won't exist if we're restoring from an offline backup. + os.mkdir(paths.binddns_dir, 0o770) + + create_dns_dir_keytab_link(logger, paths) + + # This forces a re-creation of dns directory and all the files within + # It's an overkill, but it's easier to re-create a samdb copy, rather + # than trying to fix a broken copy. + create_dns_dir(logger, paths) + + # Setup a copy of SAM for BIND9 + create_samdb_copy(ldbs.sam, logger, paths, names, domainsid, + domainguid) + + create_named_conf(paths, names.realm, dnsdomain, opts.dns_backend, logger) + + create_named_txt(paths.namedtxt, names.realm, dnsdomain, dnsname, + paths.binddns_dir, paths.dns_keytab) + + cleanup_obsolete_dns_files(paths) + + if config_migration: + logger.info("ATTENTION: The BIND configuration and keytab has been moved to: %s", + paths.binddns_dir) + logger.info(" Please update your BIND configuration accordingly.") + else: + logger.info("See %s for an example configuration include file for BIND", paths.namedconf) + logger.info("and %s for further documentation required for secure DNS " + "updates", paths.namedtxt) + + elif opts.dns_backend == "SAMBA_INTERNAL": + # Make sure to remove everything from the bind-dns directory to avoid + # possible security issues with the named group having write access + # to all AD partitions + cleanup_remove_file(os.path.join(paths.binddns_dir, "dns.keytab")) + cleanup_remove_file(os.path.join(paths.binddns_dir, "named.conf")) + cleanup_remove_file(os.path.join(paths.binddns_dir, "named.conf.update")) + cleanup_remove_file(os.path.join(paths.binddns_dir, "named.txt")) + + cleanup_remove_dir(os.path.dirname(paths.dns)) + + try: + os.chmod(paths.private_dir, 0o700) + os.chown(paths.private_dir, -1, 0) + except: + logger.warn("Failed to restore owner and permissions for %s", + (paths.private_dir)) + + # Check if dns-HOSTNAME account exists and delete it if required + try: + dn_str = 'samAccountName=dns-%s,CN=Principals' % hostname + msg = ldbs.secrets.search(expression='(dn=%s)' % dn_str, attrs=[]) + dn = msg[0].dn + except IndexError: + dn = None + + if dn is not None: + try: + ldbs.secrets.delete(dn) + except Exception: + logger.info("Failed to delete %s from secrets.ldb" % dn) + + try: + msg = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT, + expression='(sAMAccountName=dns-%s)' % (hostname), + attrs=[]) + dn = msg[0].dn + except IndexError: + dn = None + + if dn is not None: + try: + ldbs.sam.delete(dn) + except Exception: + logger.info("Failed to delete %s from sam.ldb" % dn) + + logger.info("Finished upgrading DNS") + + services = lp.get("server services") + for service in services: + if service == "dns": + if opts.dns_backend.startswith("BIND"): + logger.info("You have switched to using %s as your dns backend," + " but still have the internal dns starting. Please" + " make sure you add '-dns' to your server services" + " line in your smb.conf." % opts.dns_backend) + break + else: + if opts.dns_backend == "SAMBA_INTERNAL": + logger.info("You have switched to using %s as your dns backend," + " but you still have samba starting looking for a" + " BIND backend. Please remove the -dns from your" + " server services line." % opts.dns_backend) diff --git a/source4/scripting/bin/samba_upgradeprovision b/source4/scripting/bin/samba_upgradeprovision new file mode 100755 index 0000000..3d072bc --- /dev/null +++ b/source4/scripting/bin/samba_upgradeprovision @@ -0,0 +1,1848 @@ +#!/usr/bin/env python3 +# vim: expandtab +# +# Copyright (C) Matthieu Patou <mat@matws.net> 2009 - 2010 +# +# Based on provision a Samba4 server by +# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008 +# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008 +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +import logging +import optparse +import os +import shutil +import sys +import tempfile +import re +import traceback +# Allow to run from s4 source directory (without installing samba) +sys.path.insert(0, "bin/python") + +import ldb +import samba +import samba.getopt as options +from samba.samdb import get_default_backend_store + +from base64 import b64encode +from samba.credentials import DONT_USE_KERBEROS +from samba.auth import system_session, admin_session +from samba import tdb_util +from samba import mdb_util +from ldb import (SCOPE_SUBTREE, SCOPE_BASE, + FLAG_MOD_REPLACE, FLAG_MOD_ADD, FLAG_MOD_DELETE, + MessageElement, Message, Dn, LdbError) +from samba import param, dsdb, Ldb +from samba.common import confirm +from samba.descriptor import get_wellknown_sds, get_empty_descriptor, get_diff_sds +from samba.provision import (find_provision_key_parameters, + ProvisioningError, get_last_provision_usn, + get_max_usn, update_provision_usn, setup_path) +from samba.schema import get_linked_attributes, Schema, get_schema_descriptor +from samba.dcerpc import security, drsblobs +from samba.dcerpc.security import ( + SECINFO_OWNER, SECINFO_GROUP, SECINFO_DACL, SECINFO_SACL) +from samba.ndr import ndr_unpack +from samba.upgradehelpers import (dn_sort, get_paths, newprovision, + get_ldbs, findprovisionrange, + usn_in_range, identic_rename, + update_secrets, CHANGE, ERROR, SIMPLE, + CHANGEALL, GUESS, CHANGESD, PROVISION, + updateOEMInfo, getOEMInfo, update_gpo, + delta_update_basesamdb, update_policyids, + update_machine_account_password, + search_constructed_attrs_stored, + int64range2str, update_dns_account_password, + increment_calculated_keyversion_number, + print_provision_ranges) +from samba.xattr import copytree_with_xattrs +from functools import cmp_to_key + +# make sure the script dies immediately when hitting control-C, +# rather than raising KeyboardInterrupt. As we do all database +# operations using transactions, this is safe. +import signal +signal.signal(signal.SIGINT, signal.SIG_DFL) + +replace=2**FLAG_MOD_REPLACE +add=2**FLAG_MOD_ADD +delete=2**FLAG_MOD_DELETE +never=0 + + +# Will be modified during provision to tell if default sd has been modified +# somehow ... + +#Errors are always logged + +__docformat__ = "restructuredText" + +# Attributes that are never copied from the reference provision (even if they +# do not exist in the destination object). +# This is most probably because they are populated automatcally when object is +# created +# This also apply to imported object from reference provision +replAttrNotCopied = [ "dn", "whenCreated", "whenChanged", "objectGUID", + "parentGUID", "distinguishedName", + "instanceType", "cn", + "lmPwdHistory", "pwdLastSet", "ntPwdHistory", + "unicodePwd", "dBCSPwd", "supplementalCredentials", + "gPCUserExtensionNames", "gPCMachineExtensionNames", + "maxPwdAge", "secret", "possibleInferiors", "privilege", + "sAMAccountType", "oEMInformation", "creationTime" ] + +nonreplAttrNotCopied = ["uSNCreated", "replPropertyMetaData", "uSNChanged", + "nextRid" ,"rIDNextRID", "rIDPreviousAllocationPool"] + +nonDSDBAttrNotCopied = ["msDS-KeyVersionNumber", "priorSecret", "priorWhenChanged"] + + +attrNotCopied = replAttrNotCopied +attrNotCopied.extend(nonreplAttrNotCopied) +attrNotCopied.extend(nonDSDBAttrNotCopied) +# Usually for an object that already exists we do not overwrite attributes as +# they might have been changed for good reasons. Anyway for a few of them it's +# mandatory to replace them otherwise the provision will be broken somehow. +# But for attribute that are just missing we do not have to specify them as the default +# behavior is to add missing attribute +hashOverwrittenAtt = { "prefixMap": replace, "systemMayContain": replace, + "systemOnly":replace, "searchFlags":replace, + "mayContain":replace, "systemFlags":replace+add, + "description":replace, "operatingSystemVersion":replace, + "adminPropertyPages":replace, "groupType":replace, + "wellKnownObjects":replace, "privilege":never, + "rIDAvailablePool": never, + "rIDNextRID": add, "rIDUsedPool": never, + "defaultSecurityDescriptor": replace + add, + "isMemberOfPartialAttributeSet": delete, + "attributeDisplayNames": replace + add, + "versionNumber": add} + +dnNotToRecalculateFound = False +dnToRecalculate = [] +backlinked = [] +forwardlinked = set() +dn_syntax_att = [] +not_replicated = [] +def define_what_to_log(opts): + what = 0 + if opts.debugchange: + what = what | CHANGE + if opts.debugchangesd: + what = what | CHANGESD + if opts.debugguess: + what = what | GUESS + if opts.debugprovision: + what = what | PROVISION + if opts.debugall: + what = what | CHANGEALL + return what + + +parser = optparse.OptionParser("samba_upgradeprovision [options]") +sambaopts = options.SambaOptions(parser) +parser.add_option_group(sambaopts) +parser.add_option_group(options.VersionOptions(parser)) +credopts = options.CredentialsOptions(parser) +parser.add_option_group(credopts) +parser.add_option("--setupdir", type="string", metavar="DIR", + help="directory with setup files") +parser.add_option("--debugprovision", help="Debug provision", action="store_true") +parser.add_option("--debugguess", action="store_true", + help="Print information on which values are guessed") +parser.add_option("--debugchange", action="store_true", + help="Print information on what is different but won't be changed") +parser.add_option("--debugchangesd", action="store_true", + help="Print security descriptor differences") +parser.add_option("--debugall", action="store_true", + help="Print all available information (very verbose)") +parser.add_option("--db_backup_only", action="store_true", + help="Do the backup of the database in the provision, skip the sysvol / netlogon shares") +parser.add_option("--full", action="store_true", + help="Perform full upgrade of the samdb (schema, configuration, new objects, ...") +parser.add_option("--very-old-pre-alpha9", action="store_true", + help="Perform additional forced SD resets required for a database from before Samba 4.0.0alpha9.") + +opts = parser.parse_args()[0] + +handler = logging.StreamHandler(sys.stdout) +upgrade_logger = logging.getLogger("upgradeprovision") +upgrade_logger.setLevel(logging.INFO) + +upgrade_logger.addHandler(handler) + +provision_logger = logging.getLogger("provision") +provision_logger.addHandler(handler) + +whatToLog = define_what_to_log(opts) + +def message(what, text): + """Print a message if this message type has been selected to be printed + + :param what: Category of the message + :param text: Message to print """ + if (whatToLog & what) or what <= 0: + upgrade_logger.info("%s", text) + +if len(sys.argv) == 1: + opts.interactive = True +lp = sambaopts.get_loadparm() +smbconf = lp.configfile + +creds = credopts.get_credentials(lp) +creds.set_kerberos_state(DONT_USE_KERBEROS) + + + +def check_for_DNS(refprivate, private, refbinddns_dir, binddns_dir, dns_backend): + """Check if the provision has already the requirement for dynamic dns + + :param refprivate: The path to the private directory of the reference + provision + :param private: The path to the private directory of the upgraded + provision""" + + spnfile = "%s/spn_update_list" % private + dnsfile = "%s/dns_update_list" % private + + if not os.path.exists(spnfile): + shutil.copy("%s/spn_update_list" % refprivate, "%s" % spnfile) + + if not os.path.exists(dnsfile): + shutil.copy("%s/dns_update_list" % refprivate, "%s" % dnsfile) + + if not os.path.exists(binddns_dir): + os.mkdir(binddns_dir) + + if dns_backend not in ['BIND9_DLZ', 'BIND9_FLATFILE']: + return + + namedfile = lp.get("dnsupdate:path") + if not namedfile: + namedfile = "%s/named.conf.update" % binddns_dir + if not os.path.exists(namedfile): + destdir = "%s/new_dns" % binddns_dir + dnsdir = "%s/dns" % binddns_dir + + if not os.path.exists(destdir): + os.mkdir(destdir) + if not os.path.exists(dnsdir): + os.mkdir(dnsdir) + shutil.copy("%s/named.conf" % refbinddns_dir, "%s/named.conf" % destdir) + shutil.copy("%s/named.txt" % refbinddns_dir, "%s/named.txt" % destdir) + message(SIMPLE, "It seems that your provision did not integrate " + "new rules for dynamic dns update of domain related entries") + message(SIMPLE, "A copy of the new bind configuration files and " + "template has been put in %s, you should read them and " + "configure dynamic dns updates" % destdir) + + +def populate_links(samdb, schemadn): + """Populate an array with all the back linked attributes + + This attributes that are modified automatically when + front attibutes are changed + + :param samdb: A LDB object for sam.ldb file + :param schemadn: DN of the schema for the partition""" + linkedAttHash = get_linked_attributes(Dn(samdb, str(schemadn)), samdb) + backlinked.extend(linkedAttHash.values()) + for t in linkedAttHash.keys(): + forwardlinked.add(t) + +def isReplicated(att): + """ Indicate if the attribute is replicated or not + + :param att: Name of the attribute to be tested + :return: True is the attribute is replicated, False otherwise + """ + + return (att not in not_replicated) + +def populateNotReplicated(samdb, schemadn): + """Populate an array with all the attributes that are not replicated + + :param samdb: A LDB object for sam.ldb file + :param schemadn: DN of the schema for the partition""" + res = samdb.search(expression="(&(objectclass=attributeSchema)(systemflags:1.2.840.113556.1.4.803:=1))", base=Dn(samdb, + str(schemadn)), scope=SCOPE_SUBTREE, + attrs=["lDAPDisplayName"]) + for elem in res: + not_replicated.append(str(elem["lDAPDisplayName"])) + + +def populate_dnsyntax(samdb, schemadn): + """Populate an array with all the attributes that have DN synthax + (oid 2.5.5.1) + + :param samdb: A LDB object for sam.ldb file + :param schemadn: DN of the schema for the partition""" + res = samdb.search(expression="(attributeSyntax=2.5.5.1)", base=Dn(samdb, + str(schemadn)), scope=SCOPE_SUBTREE, + attrs=["lDAPDisplayName"]) + for elem in res: + dn_syntax_att.append(elem["lDAPDisplayName"]) + + +def sanitychecks(samdb, names): + """Make some checks before trying to update + + :param samdb: An LDB object opened on sam.ldb + :param names: list of key provision parameters + :return: Status of check (1 for Ok, 0 for not Ok) """ + res = samdb.search(expression="objectClass=ntdsdsa", base=str(names.configdn), + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + if len(res) == 0: + print("No DC found. Your provision is most probably broken!") + return False + elif len(res) != 1: + print("Found %d domain controllers. For the moment " \ + "upgradeprovision is not able to handle an upgrade on a " \ + "domain with more than one DC. Please demote the other " \ + "DC(s) before upgrading") % len(res) + return False + else: + return True + + +def print_provision_key_parameters(names): + """Do a a pretty print of provision parameters + + :param names: list of key provision parameters """ + message(GUESS, "rootdn :" + str(names.rootdn)) + message(GUESS, "configdn :" + str(names.configdn)) + message(GUESS, "schemadn :" + str(names.schemadn)) + message(GUESS, "serverdn :" + str(names.serverdn)) + message(GUESS, "netbiosname :" + names.netbiosname) + message(GUESS, "defaultsite :" + names.sitename) + message(GUESS, "dnsdomain :" + names.dnsdomain) + message(GUESS, "hostname :" + names.hostname) + message(GUESS, "domain :" + names.domain) + message(GUESS, "realm :" + names.realm) + message(GUESS, "invocationid:" + names.invocation) + message(GUESS, "policyguid :" + names.policyid) + message(GUESS, "policyguiddc:" + str(names.policyid_dc)) + message(GUESS, "domainsid :" + str(names.domainsid)) + message(GUESS, "domainguid :" + names.domainguid) + message(GUESS, "ntdsguid :" + names.ntdsguid) + message(GUESS, "domainlevel :" + str(names.domainlevel)) + + +def handle_special_case(att, delta, new, old, useReplMetadata, basedn, aldb): + """Define more complicate update rules for some attributes + + :param att: The attribute to be updated + :param delta: A messageElement object that correspond to the difference + between the updated object and the reference one + :param new: The reference object + :param old: The Updated object + :param useReplMetadata: A boolean that indicate if the update process + use replPropertyMetaData to decide what has to be updated. + :param basedn: The base DN of the provision + :param aldb: An ldb object used to build DN + :return: True to indicate that the attribute should be kept, False for + discarding it""" + + # We do most of the special case handle if we do not have the + # highest usn as otherwise the replPropertyMetaData will guide us more + # correctly + if not useReplMetadata: + flag = delta.get(att).flags() + if (att == "sPNMappings" and flag == FLAG_MOD_REPLACE and + ldb.Dn(aldb, "CN=Directory Service,CN=Windows NT," + "CN=Services,CN=Configuration,%s" % basedn) + == old[0].dn): + return True + if (att == "userAccountControl" and flag == FLAG_MOD_REPLACE and + ldb.Dn(aldb, "CN=Administrator,CN=Users,%s" % basedn) + == old[0].dn): + message(SIMPLE, "We suggest that you change the userAccountControl" + " for user Administrator from value %d to %d" % + (int(str(old[0][att])), int(str(new[0][att])))) + return False + if (att == "minPwdAge" and flag == FLAG_MOD_REPLACE): + if (int(str(old[0][att])) == 0): + delta[att] = MessageElement(new[0][att], FLAG_MOD_REPLACE, att) + return True + + if (att == "member" and flag == FLAG_MOD_REPLACE): + hash = {} + newval = [] + changeDelta=0 + for elem in old[0][att]: + hash[str(elem).lower()]=1 + newval.append(str(elem)) + + for elem in new[0][att]: + if not str(elem).lower() in hash: + changeDelta=1 + newval.append(str(elem)) + if changeDelta == 1: + delta[att] = MessageElement(newval, FLAG_MOD_REPLACE, att) + else: + delta.remove(att) + return True + + if (att in ("gPLink", "gPCFileSysPath") and + flag == FLAG_MOD_REPLACE and + str(new[0].dn).lower() == str(old[0].dn).lower()): + delta.remove(att) + return True + + if att == "forceLogoff": + ref=0x8000000000000000 + oldval=int(old[0][att][0]) + newval=int(new[0][att][0]) + ref == old and ref == abs(new) + return True + + if att in ("adminDisplayName", "adminDescription"): + return True + + if (str(old[0].dn) == "CN=Samba4-Local-Domain, %s" % (names.schemadn) + and att == "defaultObjectCategory" and flag == FLAG_MOD_REPLACE): + return True + + if (str(old[0].dn) == "CN=Title, %s" % (str(names.schemadn)) and + att == "rangeUpper" and flag == FLAG_MOD_REPLACE): + return True + + if (str(old[0].dn) == "%s" % (str(names.rootdn)) + and att == "subRefs" and flag == FLAG_MOD_REPLACE): + return True + #Allow to change revision of ForestUpdates objects + if (att == "revision" or att == "objectVersion"): + if str(delta.dn).lower().find("domainupdates") and str(delta.dn).lower().find("forestupdates") > 0: + return True + if str(delta.dn).endswith("CN=DisplaySpecifiers, %s" % names.configdn): + return True + + # This is a bit of special animal as we might have added + # already SPN entries to the list that has to be modified + # So we go in detail to try to find out what has to be added ... + if (att == "servicePrincipalName" and delta.get(att).flags() == FLAG_MOD_REPLACE): + hash = {} + newval = [] + changeDelta = 0 + for elem in old[0][att]: + hash[str(elem)]=1 + newval.append(str(elem)) + + for elem in new[0][att]: + if not str(elem) in hash: + changeDelta = 1 + newval.append(str(elem)) + if changeDelta == 1: + delta[att] = MessageElement(newval, FLAG_MOD_REPLACE, att) + else: + delta.remove(att) + return True + + return False + +def dump_denied_change(dn, att, flagtxt, current, reference): + """Print detailed information about why a change is denied + + :param dn: DN of the object which attribute is denied + :param att: Attribute that was supposed to be upgraded + :param flagtxt: Type of the update that should be performed + (add, change, remove, ...) + :param current: Value(s) of the current attribute + :param reference: Value(s) of the reference attribute""" + + message(CHANGE, "dn= " + str(dn)+" " + att+" with flag " + flagtxt + + " must not be changed/removed. Discarding the change") + if att == "objectSid" : + message(CHANGE, "old : %s" % ndr_unpack(security.dom_sid, current[0])) + message(CHANGE, "new : %s" % ndr_unpack(security.dom_sid, reference[0])) + elif att == "rIDPreviousAllocationPool" or att == "rIDAllocationPool": + message(CHANGE, "old : %s" % int64range2str(current[0])) + message(CHANGE, "new : %s" % int64range2str(reference[0])) + else: + i = 0 + for e in range(0, len(current)): + message(CHANGE, "old %d : %s" % (i, str(current[e]))) + i+=1 + if reference is not None: + i = 0 + for e in range(0, len(reference)): + message(CHANGE, "new %d : %s" % (i, str(reference[e]))) + i+=1 + +def handle_special_add(samdb, dn, names): + """Handle special operation (like remove) on some object needed during + upgrade + + This is mostly due to wrong creation of the object in previous provision. + :param samdb: An Ldb object representing the SAM database + :param dn: DN of the object to inspect + :param names: list of key provision parameters + """ + + dntoremove = None + objDn = Dn(samdb, "CN=IIS_IUSRS, CN=Builtin, %s" % names.rootdn) + if dn == objDn : + #This entry was misplaced lets remove it if it exists + dntoremove = "CN=IIS_IUSRS, CN=Users, %s" % names.rootdn + + objDn = Dn(samdb, + "CN=Certificate Service DCOM Access, CN=Builtin, %s" % names.rootdn) + if dn == objDn: + #This entry was misplaced lets remove it if it exists + dntoremove = "CN=Certificate Service DCOM Access,"\ + "CN=Users, %s" % names.rootdn + + objDn = Dn(samdb, "CN=Cryptographic Operators, CN=Builtin, %s" % names.rootdn) + if dn == objDn: + #This entry was misplaced lets remove it if it exists + dntoremove = "CN=Cryptographic Operators, CN=Users, %s" % names.rootdn + + objDn = Dn(samdb, "CN=Event Log Readers, CN=Builtin, %s" % names.rootdn) + if dn == objDn: + #This entry was misplaced lets remove it if it exists + dntoremove = "CN=Event Log Readers, CN=Users, %s" % names.rootdn + + objDn = Dn(samdb,"CN=System,CN=WellKnown Security Principals," + "CN=Configuration,%s" % names.rootdn) + if dn == objDn: + oldDn = Dn(samdb,"CN=Well-Known-Security-Id-System," + "CN=WellKnown Security Principals," + "CN=Configuration,%s" % names.rootdn) + + res = samdb.search(expression="(distinguishedName=%s)" % oldDn, + base=str(names.rootdn), + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + + res2 = samdb.search(expression="(distinguishedName=%s)" % dn, + base=str(names.rootdn), + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + + if len(res) > 0 and len(res2) == 0: + message(CHANGE, "Existing object %s must be replaced by %s. " + "Renaming old object" % (str(oldDn), str(dn))) + samdb.rename(oldDn, objDn, ["relax:0", "provision:0"]) + + return 0 + + if dntoremove is not None: + res = samdb.search(expression="(cn=RID Set)", + base=str(names.rootdn), + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + + if len(res) == 0: + return 2 + res = samdb.search(expression="(distinguishedName=%s)" % dntoremove, + base=str(names.rootdn), + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + if len(res) > 0: + message(CHANGE, "Existing object %s must be replaced by %s. " + "Removing old object" % (dntoremove, str(dn))) + samdb.delete(res[0]["dn"]) + return 0 + + return 1 + + +def check_dn_nottobecreated(hash, index, listdn): + """Check if one of the DN present in the list has a creation order + greater than the current. + + Hash is indexed by dn to be created, with each key + is associated the creation order. + + First dn to be created has the creation order 0, second has 1, ... + Index contain the current creation order + + :param hash: Hash holding the different DN of the object to be + created as key + :param index: Current creation order + :param listdn: List of DNs on which the current DN depends on + :return: None if the current object do not depend on other + object or if all object have been created before.""" + if listdn is None: + return None + for dn in listdn: + key = str(dn).lower() + if key in hash and hash[key] > index: + return str(dn) + return None + + + +def add_missing_object(ref_samdb, samdb, dn, names, basedn, hash, index): + """Add a new object if the dependencies are satisfied + + The function add the object if the object on which it depends are already + created + + :param ref_samdb: Ldb object representing the SAM db of the reference + provision + :param samdb: Ldb object representing the SAM db of the upgraded + provision + :param dn: DN of the object to be added + :param names: List of key provision parameters + :param basedn: DN of the partition to be updated + :param hash: Hash holding the different DN of the object to be + created as key + :param index: Current creation order + :return: True if the object was created False otherwise""" + + ret = handle_special_add(samdb, dn, names) + + if ret == 2: + return False + + if ret == 0: + return True + + + reference = ref_samdb.search(expression="(distinguishedName=%s)" % (str(dn)), + base=basedn, scope=SCOPE_SUBTREE, + controls=["search_options:1:2"]) + empty = Message() + delta = samdb.msg_diff(empty, reference[0]) + delta.dn + skip = False + try: + if str(reference[0].get("cn")) == "RID Set": + for klass in reference[0].get("objectClass"): + if str(klass).lower() == "ridset": + skip = True + finally: + if delta.get("objectSid"): + sid = str(ndr_unpack(security.dom_sid, reference[0]["objectSid"][0])) + m = re.match(r".*-(\d+)$", sid) + if m and int(m.group(1))>999: + delta.remove("objectSid") + for att in attrNotCopied: + delta.remove(att) + for att in backlinked: + delta.remove(att) + for att in dn_syntax_att: + depend_on_yet_tobecreated = check_dn_nottobecreated(hash, index, + delta.get(str(att))) + if depend_on_yet_tobecreated is not None: + message(CHANGE, "Object %s depends on %s in attribute %s. " + "Delaying the creation" % (dn, + depend_on_yet_tobecreated, att)) + return False + + delta.dn = dn + if not skip: + message(CHANGE,"Object %s will be added" % dn) + samdb.add(delta, ["relax:0", "provision:0"]) + else: + message(CHANGE,"Object %s was skipped" % dn) + + return True + +def gen_dn_index_hash(listMissing): + """Generate a hash associating the DN to its creation order + + :param listMissing: List of DN + :return: Hash with DN as keys and creation order as values""" + hash = {} + for i in range(0, len(listMissing)): + hash[str(listMissing[i]).lower()] = i + return hash + +def add_deletedobj_containers(ref_samdb, samdb, names): + """Add the object container: CN=Deleted Objects + + This function create the container for each partition that need one and + then reference the object into the root of the partition + + :param ref_samdb: Ldb object representing the SAM db of the reference + provision + :param samdb: Ldb object representing the SAM db of the upgraded provision + :param names: List of key provision parameters""" + + + wkoPrefix = "B:32:18E2EA80684F11D2B9AA00C04F79F805" + partitions = [str(names.rootdn), str(names.configdn)] + for part in partitions: + ref_delObjCnt = ref_samdb.search(expression="(cn=Deleted Objects)", + base=part, scope=SCOPE_SUBTREE, + attrs=["dn"], + controls=["show_deleted:0", + "show_recycled:0"]) + delObjCnt = samdb.search(expression="(cn=Deleted Objects)", + base=part, scope=SCOPE_SUBTREE, + attrs=["dn"], + controls=["show_deleted:0", + "show_recycled:0"]) + if len(ref_delObjCnt) > len(delObjCnt): + reference = ref_samdb.search(expression="cn=Deleted Objects", + base=part, scope=SCOPE_SUBTREE, + controls=["show_deleted:0", + "show_recycled:0"]) + empty = Message() + delta = samdb.msg_diff(empty, reference[0]) + + delta.dn = Dn(samdb, str(reference[0]["dn"])) + for att in attrNotCopied: + delta.remove(att) + + modcontrols = ["relax:0", "provision:0"] + samdb.add(delta, modcontrols) + + listwko = [] + res = samdb.search(expression="(objectClass=*)", base=part, + scope=SCOPE_BASE, + attrs=["dn", "wellKnownObjects"]) + + targetWKO = "%s:%s" % (wkoPrefix, str(reference[0]["dn"])) + found = False + + if len(res[0]) > 0: + wko = res[0]["wellKnownObjects"] + + # The wellKnownObject that we want to add. + for o in wko: + if str(o) == targetWKO: + found = True + listwko.append(str(o)) + + if not found: + listwko.append(targetWKO) + + delta = Message() + delta.dn = Dn(samdb, str(res[0]["dn"])) + delta["wellKnownObjects"] = MessageElement(listwko, + FLAG_MOD_REPLACE, + "wellKnownObjects" ) + samdb.modify(delta) + +def add_missing_entries(ref_samdb, samdb, names, basedn, list): + """Add the missing object whose DN is the list + + The function add the object if the objects on which it depends are + already created. + + :param ref_samdb: Ldb object representing the SAM db of the reference + provision + :param samdb: Ldb object representing the SAM db of the upgraded + provision + :param dn: DN of the object to be added + :param names: List of key provision parameters + :param basedn: DN of the partition to be updated + :param list: List of DN to be added in the upgraded provision""" + + listMissing = [] + listDefered = list + + while(len(listDefered) != len(listMissing) and len(listDefered) > 0): + index = 0 + listMissing = listDefered + listDefered = [] + hashMissing = gen_dn_index_hash(listMissing) + for dn in listMissing: + ret = add_missing_object(ref_samdb, samdb, dn, names, basedn, + hashMissing, index) + index = index + 1 + if ret == 0: + # DN can't be created because it depends on some + # other DN in the list + listDefered.append(dn) + + if len(listDefered) != 0: + raise ProvisioningError("Unable to insert missing elements: " + "circular references") + +def handle_links(samdb, att, basedn, dn, value, ref_value, delta): + """This function handle updates on links + + :param samdb: An LDB object pointing to the updated provision + :param att: Attribute to update + :param basedn: The root DN of the provision + :param dn: The DN of the inspected object + :param value: The value of the attribute + :param ref_value: The value of this attribute in the reference provision + :param delta: The MessageElement object that will be applied for + transforming the current provision""" + + res = samdb.search(base=dn, controls=["search_options:1:2", "reveal:1"], + attrs=[att]) + + blacklist = {} + hash = {} + newlinklist = [] + changed = False + + for v in value: + newlinklist.append(str(v)) + + for e in value: + hash[e] = 1 + # for w2k domain level the reveal won't reveal anything ... + # it means that we can readd links that were removed on purpose ... + # Also this function in fact just accept add not removal + + for e in res[0][att]: + if not e in hash: + # We put in the blacklist all the element that are in the "revealed" + # result and not in the "standard" result + # This element are links that were removed before and so that + # we don't wan't to readd + blacklist[e] = 1 + + for e in ref_value: + if not e in blacklist and not e in hash: + newlinklist.append(str(e)) + changed = True + if changed: + delta[att] = MessageElement(newlinklist, FLAG_MOD_REPLACE, att) + else: + delta.remove(att) + + return delta + + +def checkKeepAttributeWithMetadata(delta, att, message, reference, current, + hash_attr_usn, basedn, usns, samdb): + """ Check if we should keep the attribute modification or not + + :param delta: A message diff object + :param att: An attribute + :param message: A function to print messages + :param reference: A message object for the current entry comming from + the reference provision. + :param current: A message object for the current entry commin from + the current provision. + :param hash_attr_usn: A dictionary with attribute name as keys, + USN and invocation id as values. + :param basedn: The DN of the partition + :param usns: A dictionary with invocation ID as keys and USN ranges + as values. + :param samdb: A ldb object pointing to the sam DB + + :return: The modified message diff. + """ + global defSDmodified + isFirst = True + txt = "" + dn = current[0].dn + + for att in list(delta): + if att in ["dn", "objectSid"]: + delta.remove(att) + continue + + # We have updated by provision usn information so let's exploit + # replMetadataProperties + if att in forwardlinked: + curval = current[0].get(att, ()) + refval = reference[0].get(att, ()) + delta = handle_links(samdb, att, basedn, current[0]["dn"], + curval, refval, delta) + continue + + + if isFirst and len(list(delta)) > 1: + isFirst = False + txt = "%s\n" % (str(dn)) + + if handle_special_case(att, delta, reference, current, True, None, None): + # This attribute is "complicated" to handle and handling + # was done in handle_special_case + continue + + attrUSN = None + if hash_attr_usn.get(att): + [attrUSN, attInvId] = hash_attr_usn.get(att) + + if attrUSN is None: + # If it's a replicated attribute and we don't have any USN + # information about it. It means that we never saw it before + # so let's add it ! + # If it is a replicated attribute but we are not master on it + # (ie. not initially added in the provision we masterize). + # attrUSN will be -1 + if isReplicated(att): + continue + else: + message(CHANGE, "Non replicated attribute %s changed" % att) + continue + + if att == "nTSecurityDescriptor": + cursd = ndr_unpack(security.descriptor, + current[0]["nTSecurityDescriptor"][0]) + refsd = ndr_unpack(security.descriptor, + reference[0]["nTSecurityDescriptor"][0]) + + diff = get_diff_sds(refsd, cursd, names.domainsid) + if diff == "": + # FIXME find a way to have it only with huge huge verbose mode + # message(CHANGE, "%ssd are identical" % txt) + # txt = "" + delta.remove(att) + continue + else: + delta.remove(att) + message(CHANGESD, "%ssd are not identical:\n%s" % (txt, diff)) + txt = "" + if attrUSN == -1: + message(CHANGESD, "But the SD has been changed by someonelse " + "so it's impossible to know if the difference" + " cames from the modification or from a previous bug") + global dnNotToRecalculateFound + dnNotToRecalculateFound = True + else: + dnToRecalculate.append(dn) + continue + + if attrUSN == -1: + # This attribute was last modified by another DC forget + # about it + message(CHANGE, "%sAttribute: %s has been " + "created/modified/deleted by another DC. " + "Doing nothing" % (txt, att)) + txt = "" + delta.remove(att) + continue + elif not usn_in_range(int(attrUSN), usns.get(attInvId)): + message(CHANGE, "%sAttribute: %s was not " + "created/modified/deleted during a " + "provision or upgradeprovision. Current " + "usn: %d. Doing nothing" % (txt, att, + attrUSN)) + txt = "" + delta.remove(att) + continue + else: + if att == "defaultSecurityDescriptor": + defSDmodified = True + if attrUSN: + message(CHANGE, "%sAttribute: %s will be modified" + "/deleted it was last modified " + "during a provision. Current usn: " + "%d" % (txt, att, attrUSN)) + txt = "" + else: + message(CHANGE, "%sAttribute: %s will be added because " + "it did not exist before" % (txt, att)) + txt = "" + continue + + return delta + +def update_present(ref_samdb, samdb, basedn, listPresent, usns): + """ This function updates the object that are already present in the + provision + + :param ref_samdb: An LDB object pointing to the reference provision + :param samdb: An LDB object pointing to the updated provision + :param basedn: A string with the value of the base DN for the provision + (ie. DC=foo, DC=bar) + :param listPresent: A list of object that is present in the provision + :param usns: A list of USN range modified by previous provision and + upgradeprovision grouped by invocation ID + """ + + # This hash is meant to speedup lookup of attribute name from an oid, + # it's for the replPropertyMetaData handling + hash_oid_name = {} + res = samdb.search(expression="objectClass=attributeSchema", base=basedn, + controls=["search_options:1:2"], attrs=["attributeID", + "lDAPDisplayName"]) + if len(res) > 0: + for e in res: + strDisplay = str(e.get("lDAPDisplayName")) + hash_oid_name[str(e.get("attributeID"))] = strDisplay + else: + msg = "Unable to insert missing elements: circular references" + raise ProvisioningError(msg) + + changed = 0 + sd_flags = SECINFO_OWNER | SECINFO_GROUP | SECINFO_DACL | SECINFO_SACL + controls = ["search_options:1:2", "sd_flags:1:%d" % sd_flags] + message(CHANGE, "Using replPropertyMetadata for change selection") + for dn in listPresent: + reference = ref_samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn, + scope=SCOPE_SUBTREE, + controls=controls) + current = samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn, + scope=SCOPE_SUBTREE, controls=controls) + + if ( + (str(current[0].dn) != str(reference[0].dn)) and + (str(current[0].dn).upper() == str(reference[0].dn).upper()) + ): + message(CHANGE, "Names are the same except for the case. " + "Renaming %s to %s" % (str(current[0].dn), + str(reference[0].dn))) + identic_rename(samdb, reference[0].dn) + current = samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn, + scope=SCOPE_SUBTREE, + controls=controls) + + delta = samdb.msg_diff(current[0], reference[0]) + + for att in backlinked: + delta.remove(att) + + for att in attrNotCopied: + delta.remove(att) + + delta.remove("name") + + nb_items = len(list(delta)) + + if nb_items == 1: + continue + + if nb_items > 1: + # Fetch the replPropertyMetaData + res = samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn, + scope=SCOPE_SUBTREE, controls=controls, + attrs=["replPropertyMetaData"]) + ctr = ndr_unpack(drsblobs.replPropertyMetaDataBlob, + res[0]["replPropertyMetaData"][0]).ctr + + hash_attr_usn = {} + for o in ctr.array: + # We put in this hash only modification + # made on the current host + att = hash_oid_name[samdb.get_oid_from_attid(o.attid)] + if str(o.originating_invocation_id) in usns.keys(): + hash_attr_usn[att] = [o.originating_usn, str(o.originating_invocation_id)] + else: + hash_attr_usn[att] = [-1, None] + + delta = checkKeepAttributeWithMetadata(delta, att, message, reference, + current, hash_attr_usn, + basedn, usns, samdb) + + delta.dn = dn + + + if len(delta) >1: + # Skip dn as the value is not really changed ... + attributes=", ".join(delta.keys()[1:]) + modcontrols = [] + relaxedatt = ['iscriticalsystemobject', 'grouptype'] + # Let's try to reduce as much as possible the use of relax control + for attr in delta.keys(): + if attr.lower() in relaxedatt: + modcontrols = ["relax:0", "provision:0"] + message(CHANGE, "%s is different from the reference one, changed" + " attributes: %s\n" % (dn, attributes)) + changed += 1 + samdb.modify(delta, modcontrols) + return changed + +def reload_full_schema(samdb, names): + """Load the updated schema with all the new and existing classes + and attributes. + + :param samdb: An LDB object connected to the sam.ldb of the update + provision + :param names: List of key provision parameters + """ + + schemadn = str(names.schemadn) + current = samdb.search(expression="objectClass=*", base=schemadn, + scope=SCOPE_SUBTREE) + + schema_ldif = "".join(samdb.write_ldif(ent, ldb.CHANGETYPE_NONE) for ent in current) + + prefixmap_data = b64encode(open(setup_path("prefixMap.txt"), 'rb').read()).decode('utf8') + + # We don't actually add this ldif, just parse it + prefixmap_ldif = "dn: %s\nprefixMap:: %s\n\n" % (schemadn, prefixmap_data) + + dsdb._dsdb_set_schema_from_ldif(samdb, prefixmap_ldif, schema_ldif, schemadn) + + +def update_partition(ref_samdb, samdb, basedn, names, schema, provisionUSNs, prereloadfunc): + """Check differences between the reference provision and the upgraded one. + + It looks for all objects which base DN is name. + + This function will also add the missing object and update existing object + to add or remove attributes that were missing. + + :param ref_sambdb: An LDB object conntected to the sam.ldb of the + reference provision + :param samdb: An LDB object connected to the sam.ldb of the update + provision + :param basedn: String value of the DN of the partition + :param names: List of key provision parameters + :param schema: A Schema object + :param provisionUSNs: A dictionary with range of USN modified during provision + or upgradeprovision. Ranges are grouped by invocationID. + :param prereloadfunc: A function that must be executed just before the reload + of the schema + """ + + hash_new = {} + hash = {} + listMissing = [] + listPresent = [] + reference = [] + current = [] + + # Connect to the reference provision and get all the attribute in the + # partition referred by name + reference = ref_samdb.search(expression="objectClass=*", base=basedn, + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + + current = samdb.search(expression="objectClass=*", base=basedn, + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + # Create a hash for speeding the search of new object + for i in range(0, len(reference)): + hash_new[str(reference[i]["dn"]).lower()] = reference[i]["dn"] + + # Create a hash for speeding the search of existing object in the + # current provision + for i in range(0, len(current)): + hash[str(current[i]["dn"]).lower()] = current[i]["dn"] + + + for k in hash_new.keys(): + if not k in hash: + if not str(hash_new[k]) == "CN=Deleted Objects, %s" % names.rootdn: + listMissing.append(hash_new[k]) + else: + listPresent.append(hash_new[k]) + + # Sort the missing object in order to have object of the lowest level + # first (which can be containers for higher level objects) + listMissing.sort(key=cmp_to_key(dn_sort)) + listPresent.sort(key=cmp_to_key(dn_sort)) + + # The following lines is to load the up to + # date schema into our current LDB + # a complete schema is needed as the insertion of attributes + # and class is done against it + # and the schema is self validated + samdb.set_schema(schema) + try: + message(SIMPLE, "There are %d missing objects" % (len(listMissing))) + add_deletedobj_containers(ref_samdb, samdb, names) + + add_missing_entries(ref_samdb, samdb, names, basedn, listMissing) + + prereloadfunc() + message(SIMPLE, "Reloading a merged schema, which might trigger " + "reindexing so please be patient") + reload_full_schema(samdb, names) + message(SIMPLE, "Schema reloaded!") + + changed = update_present(ref_samdb, samdb, basedn, listPresent, + provisionUSNs) + message(SIMPLE, "There are %d changed objects" % (changed)) + return 1 + + except Exception as err: + message(ERROR, "Exception during upgrade of samdb:") + (typ, val, tb) = sys.exc_info() + traceback.print_exception(typ, val, tb) + return 0 + + +def check_updated_sd(ref_sam, cur_sam, names): + """Check if the security descriptor in the upgraded provision are the same + as the reference + + :param ref_sam: A LDB object connected to the sam.ldb file used as + the reference provision + :param cur_sam: A LDB object connected to the sam.ldb file used as + upgraded provision + :param names: List of key provision parameters""" + reference = ref_sam.search(expression="objectClass=*", base=str(names.rootdn), + scope=SCOPE_SUBTREE, + attrs=["dn", "nTSecurityDescriptor"], + controls=["search_options:1:2"]) + current = cur_sam.search(expression="objectClass=*", base=str(names.rootdn), + scope=SCOPE_SUBTREE, + attrs=["dn", "nTSecurityDescriptor"], + controls=["search_options:1:2"]) + hash = {} + for i in range(0, len(reference)): + refsd_blob = reference[i]["nTSecurityDescriptor"][0] + hash[str(reference[i]["dn"]).lower()] = refsd_blob + + + for i in range(0, len(current)): + key = str(current[i]["dn"]).lower() + if key in hash: + cursd_blob = current[i]["nTSecurityDescriptor"][0] + cursd = ndr_unpack(security.descriptor, + cursd_blob) + if cursd_blob != hash[key]: + refsd = ndr_unpack(security.descriptor, + hash[key]) + txt = get_diff_sds(refsd, cursd, names.domainsid, False) + if txt != "": + message(CHANGESD, "On object %s ACL is different" + " \n%s" % (current[i]["dn"], txt)) + + + +def fix_wellknown_sd(samdb, names): + """This function fix the SD for partition/wellknown containers (basedn, configdn, ...) + This is needed because some provision use to have broken SD on containers + + :param samdb: An LDB object pointing to the sam of the current provision + :param names: A list of key provision parameters + """ + + list_wellknown_dns = [] + + subcontainers = get_wellknown_sds(samdb) + + for [dn, descriptor_fn] in subcontainers: + list_wellknown_dns.append(dn) + if dn in dnToRecalculate: + delta = Message() + delta.dn = dn + descr = descriptor_fn(names.domainsid, name_map=names.name_map) + delta["nTSecurityDescriptor"] = MessageElement(descr, FLAG_MOD_REPLACE, + "nTSecurityDescriptor" ) + samdb.modify(delta) + message(CHANGESD, "nTSecurityDescriptor updated on wellknown DN: %s" % delta.dn) + + return list_wellknown_dns + +def rebuild_sd(samdb, names): + """Rebuild security descriptor of the current provision from scratch + + During the different pre release of samba4 security descriptors + (SD) were notarly broken (up to alpha11 included) + + This function allows one to get them back in order, this function works + only after the database comparison that --full mode uses and which + populates the dnToRecalculate and dnNotToRecalculate lists. + + The idea is that the SD can be safely recalculated from scratch to get it right. + + :param names: List of key provision parameters""" + + listWellknown = fix_wellknown_sd(samdb, names) + + if len(dnToRecalculate) != 0: + message(CHANGESD, "%d DNs have been marked as needed to be recalculated" + % (len(dnToRecalculate))) + + for dn in dnToRecalculate: + # well known SDs have already been reset + if dn in listWellknown: + continue + delta = Message() + delta.dn = dn + sd_flags = SECINFO_OWNER | SECINFO_GROUP | SECINFO_DACL | SECINFO_SACL + try: + descr = get_empty_descriptor(names.domainsid) + delta["nTSecurityDescriptor"] = MessageElement(descr, FLAG_MOD_REPLACE, + "nTSecurityDescriptor") + samdb.modify(delta, ["sd_flags:1:%d" % sd_flags,"relax:0","local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]) + except LdbError as e: + samdb.transaction_cancel() + res = samdb.search(expression="objectClass=*", base=str(delta.dn), + scope=SCOPE_BASE, + attrs=["nTSecurityDescriptor"], + controls=["sd_flags:1:%d" % sd_flags]) + badsd = ndr_unpack(security.descriptor, + res[0]["nTSecurityDescriptor"][0]) + message(ERROR, "On %s bad stuff %s" % (str(delta.dn),badsd.as_sddl(names.domainsid))) + return + +def hasATProvision(samdb): + entry = samdb.search(expression="(distinguishedName=@PROVISION)", base = "", + scope=SCOPE_BASE, + attrs=["dn"]) + + if entry is not None and len(entry) == 1: + return True + else: + return False + +def removeProvisionUSN(samdb): + attrs = [samba.provision.LAST_PROVISION_USN_ATTRIBUTE, "dn"] + entry = samdb.search(expression="(distinguishedName=@PROVISION)", base = "", + scope=SCOPE_BASE, + attrs=attrs) + empty = Message() + empty.dn = entry[0].dn + delta = samdb.msg_diff(entry[0], empty) + delta.remove("dn") + delta.dn = entry[0].dn + samdb.modify(delta) + +def remove_stored_generated_attrs(paths, creds, session, lp): + """Remove previously stored constructed attributes + + :param paths: List of paths for different provision objects + from the upgraded provision + :param creds: A credential object + :param session: A session object + :param lp: A line parser object + :return: An associative array whose key are the different constructed + attributes and the value the dn where this attributes were found. + """ + + +def simple_update_basesamdb(newpaths, paths, names): + """Update the provision container db: sam.ldb + This function is aimed at very old provision (before alpha9) + + :param newpaths: List of paths for different provision objects + from the reference provision + :param paths: List of paths for different provision objects + from the upgraded provision + :param names: List of key provision parameters""" + + message(SIMPLE, "Copy samdb") + tdb_util.tdb_copy(newpaths.samdb, paths.samdb) + + message(SIMPLE, "Update partitions filename if needed") + schemaldb = os.path.join(paths.private_dir, "schema.ldb") + configldb = os.path.join(paths.private_dir, "configuration.ldb") + usersldb = os.path.join(paths.private_dir, "users.ldb") + samldbdir = os.path.join(paths.private_dir, "sam.ldb.d") + + if not os.path.isdir(samldbdir): + os.mkdir(samldbdir) + os.chmod(samldbdir, 0o700) + if os.path.isfile(schemaldb): + tdb_util.tdb_copy(schemaldb, os.path.join(samldbdir, + "%s.ldb"%str(names.schemadn).upper())) + os.remove(schemaldb) + if os.path.isfile(usersldb): + tdb_util.tdb_copy(usersldb, os.path.join(samldbdir, + "%s.ldb"%str(names.rootdn).upper())) + os.remove(usersldb) + if os.path.isfile(configldb): + tdb_util.tdb_copy(configldb, os.path.join(samldbdir, + "%s.ldb"%str(names.configdn).upper())) + os.remove(configldb) + + +def update_samdb(ref_samdb, samdb, names, provisionUSNs, schema, prereloadfunc): + """Upgrade the SAM DB contents for all the provision partitions + + :param ref_sambdb: An LDB object conntected to the sam.ldb of the reference + provision + :param samdb: An LDB object connected to the sam.ldb of the update + provision + :param names: List of key provision parameters + :param provisionUSNs: A dictionary with range of USN modified during provision + or upgradeprovision. Ranges are grouped by invocationID. + :param schema: A Schema object that represent the schema of the provision + :param prereloadfunc: A function that must be executed just before the reload + of the schema + """ + + message(SIMPLE, "Starting update of samdb") + ret = update_partition(ref_samdb, samdb, str(names.rootdn), names, + schema, provisionUSNs, prereloadfunc) + if ret: + message(SIMPLE, "Update of samdb finished") + return 1 + else: + message(SIMPLE, "Update failed") + return 0 + + +def backup_provision(samdb, paths, dir, only_db): + """This function backup the provision files so that a rollback + is possible + + :param paths: Paths to different objects + :param dir: Directory where to store the backup + :param only_db: Skip sysvol for users with big sysvol + """ + + # Currently we default to tdb for the backend store type + # + backend_store = "tdb" + res = samdb.search(base="@PARTITION", + scope=ldb.SCOPE_BASE, + attrs=["backendStore"]) + if "backendStore" in res[0]: + backend_store = str(res[0]["backendStore"][0]) + + + if paths.sysvol and not only_db: + copytree_with_xattrs(paths.sysvol, os.path.join(dir, "sysvol")) + + tdb_util.tdb_copy(paths.samdb, os.path.join(dir, os.path.basename(paths.samdb))) + tdb_util.tdb_copy(paths.secrets, os.path.join(dir, os.path.basename(paths.secrets))) + tdb_util.tdb_copy(paths.idmapdb, os.path.join(dir, os.path.basename(paths.idmapdb))) + tdb_util.tdb_copy(paths.privilege, os.path.join(dir, os.path.basename(paths.privilege))) + if os.path.isfile(os.path.join(paths.private_dir,"eadb.tdb")): + tdb_util.tdb_copy(os.path.join(paths.private_dir,"eadb.tdb"), os.path.join(dir, "eadb.tdb")) + shutil.copy2(paths.smbconf, dir) + shutil.copy2(os.path.join(paths.private_dir,"secrets.keytab"), dir) + + samldbdir = os.path.join(paths.private_dir, "sam.ldb.d") + if not os.path.isdir(samldbdir): + samldbdir = paths.private_dir + schemaldb = os.path.join(paths.private_dir, "schema.ldb") + configldb = os.path.join(paths.private_dir, "configuration.ldb") + usersldb = os.path.join(paths.private_dir, "users.ldb") + tdb_util.tdb_copy(schemaldb, os.path.join(dir, "schema.ldb")) + tdb_util.tdb_copy(usersldb, os.path.join(dir, "configuration.ldb")) + tdb_util.tdb_copy(configldb, os.path.join(dir, "users.ldb")) + else: + os.mkdir(os.path.join(dir, "sam.ldb.d"), 0o700) + + for ldb_name in os.listdir(samldbdir): + if not ldb_name.endswith("-lock"): + if backend_store == "mdb" and ldb_name != "metadata.tdb": + mdb_util.mdb_copy(os.path.join(samldbdir, ldb_name), + os.path.join(dir, "sam.ldb.d", ldb_name)) + else: + tdb_util.tdb_copy(os.path.join(samldbdir, ldb_name), + os.path.join(dir, "sam.ldb.d", ldb_name)) + + +def sync_calculated_attributes(samdb, names): + """Synchronize attributes used for constructed ones, with the + old constructed that were stored in the database. + + This apply for instance to msds-keyversionnumber that was + stored and that is now constructed from replpropertymetadata. + + :param samdb: An LDB object attached to the currently upgraded samdb + :param names: Various key parameter about current provision. + """ + listAttrs = ["msDs-KeyVersionNumber"] + hash = search_constructed_attrs_stored(samdb, names.rootdn, listAttrs) + if "msDs-KeyVersionNumber" in hash: + increment_calculated_keyversion_number(samdb, names.rootdn, + hash["msDs-KeyVersionNumber"]) + +# Synopsis for updateprovision +# 1) get path related to provision to be update (called current) +# 2) open current provision ldbs +# 3) fetch the key provision parameter (domain sid, domain guid, invocationid +# of the DC ....) +# 4) research of lastProvisionUSN in order to get ranges of USN modified +# by either upgradeprovision or provision +# 5) creation of a new provision the latest version of provision script +# (called reference) +# 6) get reference provision paths +# 7) open reference provision ldbs +# 8) setup helpers data that will help the update process +# 9) (SKIPPED) we no longer update the privilege ldb by copying the one of referecence provision to +# the current provision, because a shutil.copy would break the transaction locks both databases are under +# and this database has not changed between 2009 and Samba 4.0.3 in Feb 2013 (at least) +# 10)get the oemInfo field, this field contains information about the different +# provision that have been done +# 11)Depending on if the --very-old-pre-alpha9 flag is set the following things are done +# A) When alpha9 or alphaxx not specified (default) +# The base sam.ldb file is updated by looking at the difference between +# referrence one and the current one. Everything is copied with the +# exception of lastProvisionUSN attributes. +# B) Other case (it reflect that that provision was done before alpha9) +# The base sam.ldb of the reference provision is copied over +# the current one, if necessary ldb related to partitions are moved +# and renamed +# The highest used USN is fetched so that changed by upgradeprovision +# usn can be tracked +# 12)A Schema object is created, it will be used to provide a complete +# schema to current provision during update (as the schema of the +# current provision might not be complete and so won't allow some +# object to be created) +# 13)Proceed to full update of sam DB (see the separate paragraph about i) +# 14)The secrets db is updated by pull all the difference from the reference +# provision into the current provision +# 15)As the previous step has most probably modified the password stored in +# in secret for the current DC, a new password is generated, +# the kvno is bumped and the entry in samdb is also updated +# 16)For current provision older than alpha9, we must fix the SD a little bit +# administrator to update them because SD used to be generated with the +# system account before alpha9. +# 17)The highest usn modified so far is searched in the database it will be +# the upper limit for usn modified during provision. +# This is done before potential SD recalculation because we do not want +# SD modified during recalculation to be marked as modified during provision +# (and so possibly remplaced at next upgradeprovision) +# 18)Rebuilt SD if the flag indicate to do so +# 19)Check difference between SD of reference provision and those of the +# current provision. The check is done by getting the sddl representation +# of the SD. Each sddl in chuncked into parts (user,group,dacl,sacl) +# Each part is verified separetly, for dacl and sacl ACL is splited into +# ACEs and each ACE is verified separately (so that a permutation in ACE +# didn't raise as an error). +# 20)The oemInfo field is updated to add information about the fact that the +# provision has been updated by the upgradeprovision version xxx +# (the version is the one obtained when starting samba with the --version +# parameter) +# 21)Check if the current provision has all the settings needed for dynamic +# DNS update to work (that is to say the provision is newer than +# january 2010). If not dns configuration file from reference provision +# are copied in a sub folder and the administrator is invited to +# do what is needed. +# 22)If the lastProvisionUSN attribute was present it is updated to add +# the range of usns modified by the current upgradeprovision + + +# About updating the sam DB +# The update takes place in update_partition function +# This function read both current and reference provision and list all +# the available DN of objects +# If the string representation of a DN in reference provision is +# equal to the string representation of a DN in current provision +# (without taking care of case) then the object is flaged as being +# present. If the object is not present in current provision the object +# is being flaged as missing in current provision. Object present in current +# provision but not in reference provision are ignored. +# Once the list of objects present and missing is done, the deleted object +# containers are created in the differents partitions (if missing) +# +# Then the function add_missing_entries is called +# This function will go through the list of missing entries by calling +# add_missing_object for the given object. If this function returns 0 +# it means that the object needs some other object in order to be created +# The object is reappended at the end of the list to be created later +# (and preferably after all the needed object have been created) +# The function keeps on looping on the list of object to be created until +# it's empty or that the number of deferred creation is equal to the number +# of object that still needs to be created. + +# The function add_missing_object will first check if the object can be created. +# That is to say that it didn't depends other not yet created objects +# If requisit can't be fullfilled it exists with 0 +# Then it will try to create the missing entry by creating doing +# an ldb_message_diff between the object in the reference provision and +# an empty object. +# This resulting object is filtered to remove all the back link attribute +# (ie. memberOf) as they will be created by the other linked object (ie. +# the one with the member attribute) +# All attributes specified in the attrNotCopied array are +# also removed it's most of the time generated attributes + +# After missing entries have been added the update_partition function will +# take care of object that exist but that need some update. +# In order to do so the function update_present is called with the list +# of object that are present in both provision and that might need an update. + +# This function handle first case mismatch so that the DN in the current +# provision have the same case as in reference provision + +# It will then construct an associative array consiting of attributes as +# key and invocationid as value( if the originating invocation id is +# different from the invocation id of the current DC the value is -1 instead). + +# If the range of provision modified attributes is present, the function will +# use the replMetadataProperty update method which is the following: +# Removing attributes that should not be updated: rIDAvailablePool, objectSid, +# creationTime, msDs-KeyVersionNumber, oEMInformation +# Check for each attribute if its usn is within one of the modified by +# provision range and if its originating id is the invocation id of the +# current DC, then validate the update from reference to current. +# If not or if there is no replMetatdataProperty for this attribute then we +# do not update it. +# Otherwise (case the range of provision modified attribute is not present) it +# use the following process: +# All attributes that need to be added are accepted at the exeption of those +# listed in hashOverwrittenAtt, in this case the attribute needs to have the +# correct flags specified. +# For attributes that need to be modified or removed, a check is performed +# in OverwrittenAtt, if the attribute is present and the modification flag +# (remove, delete) is one of those listed for this attribute then modification +# is accepted. For complicated handling of attribute update, the control is passed +# to handle_special_case + + + +if __name__ == '__main__': + defSDmodified = False + + # From here start the big steps of the program + # 1) First get files paths + paths = get_paths(param, smbconf=smbconf) + # Get ldbs with the system session, it is needed for searching + # provision parameters + session = system_session() + + # This variable will hold the last provision USN once if it exists. + minUSN = 0 + # 2) + ldbs = get_ldbs(paths, creds, session, lp) + backupdir = tempfile.mkdtemp(dir=paths.private_dir, + prefix="backupprovision") + backup_provision(ldbs.sam, paths, backupdir, opts.db_backup_only) + try: + ldbs.startTransactions() + + # 3) Guess all the needed names (variables in fact) from the current + # provision. + names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap, + paths, smbconf, lp) + # 4) + lastProvisionUSNs = get_last_provision_usn(ldbs.sam) + if lastProvisionUSNs is not None: + v = 0 + for k in lastProvisionUSNs.keys(): + for r in lastProvisionUSNs[k]: + v = v + 1 + + message(CHANGE, + "Find last provision USN, %d invocation(s) for a total of %d ranges" % + (len(lastProvisionUSNs.keys()), v /2 )) + + if lastProvisionUSNs.get("default") is not None: + message(CHANGE, "Old style for usn ranges used") + lastProvisionUSNs[str(names.invocation)] = lastProvisionUSNs["default"] + del lastProvisionUSNs["default"] + else: + message(SIMPLE, "Your provision lacks provision range information") + if confirm("Do you want to run findprovisionusnranges to try to find them ?", False): + ldbs.groupedRollback() + minobj = 5 + (hash_id, nb_obj) = findprovisionrange(ldbs.sam, ldb.Dn(ldbs.sam, str(names.rootdn))) + message(SIMPLE, "Here is a list of changes that modified more than %d objects in 1 minute." % minobj) + message(SIMPLE, "Usually changes made by provision and upgradeprovision are those who affect a couple" + " of hundred of objects or more") + message(SIMPLE, "Total number of objects: %d" % nb_obj) + message(SIMPLE, "") + + print_provision_ranges(hash_id, minobj, None, str(paths.samdb), str(names.invocation)) + + message(SIMPLE, "Once you applied/adapted the change(s) please restart the upgradeprovision script") + sys.exit(0) + + # Objects will be created with the admin session + # (not anymore system session) + adm_session = admin_session(lp, str(names.domainsid)) + # So we reget handle on objects + # ldbs = get_ldbs(paths, creds, adm_session, lp) + + if not sanitychecks(ldbs.sam, names): + message(SIMPLE, "Sanity checks for the upgrade have failed. " + "Check the messages and correct the errors " + "before rerunning upgradeprovision") + ldbs.groupedRollback() + sys.exit(1) + + # Let's see provision parameters + print_provision_key_parameters(names) + + # 5) With all this information let's create a fresh new provision used as + # reference + message(SIMPLE, "Creating a reference provision") + provisiondir = tempfile.mkdtemp(dir=paths.private_dir, + prefix="referenceprovision") + result = newprovision(names, session, smbconf, provisiondir, + provision_logger, base_schema="2008_R2") + result.report_logger(provision_logger) + + # TODO + # 6) and 7) + # We need to get a list of object which SD is directly computed from + # defaultSecurityDescriptor. + # This will allow us to know which object we can rebuild the SD in case + # of change of the parent's SD or of the defaultSD. + # Get file paths of this new provision + newpaths = get_paths(param, targetdir=provisiondir) + new_ldbs = get_ldbs(newpaths, creds, session, lp) + new_ldbs.startTransactions() + + populateNotReplicated(new_ldbs.sam, names.schemadn) + # 8) Populate some associative array to ease the update process + # List of attribute which are link and backlink + populate_links(new_ldbs.sam, names.schemadn) + # List of attribute with ASN DN synthax) + populate_dnsyntax(new_ldbs.sam, names.schemadn) + # 9) (now skipped, was copy of privileges.ldb) + # 10) + oem = getOEMInfo(ldbs.sam, str(names.rootdn)) + # Do some modification on sam.ldb + ldbs.groupedCommit() + new_ldbs.groupedCommit() + deltaattr = None + # 11) + message(GUESS, oem) + if oem is None or hasATProvision(ldbs.sam) or not opts.very_old_pre_alpha9: + # 11) A + # Starting from alpha9 we can consider that the structure is quite ok + # and that we should do only dela + deltaattr = delta_update_basesamdb(newpaths.samdb, + paths.samdb, + creds, + session, + lp, + message) + else: + # 11) B + simple_update_basesamdb(newpaths, paths, names) + ldbs = get_ldbs(paths, creds, session, lp) + removeProvisionUSN(ldbs.sam) + + ldbs.startTransactions() + minUSN = int(str(get_max_usn(ldbs.sam, str(names.rootdn)))) + 1 + new_ldbs.startTransactions() + + # 12) + schema = Schema(names.domainsid, schemadn=str(names.schemadn)) + # We create a closure that will be invoked just before schema reload + def schemareloadclosure(): + basesam = Ldb(paths.samdb, session_info=session, credentials=creds, lp=lp, + options=["modules:"]) + doit = False + if deltaattr is not None and len(deltaattr) > 1: + doit = True + if doit: + deltaattr.remove("dn") + for att in deltaattr: + if att.lower() == "dn": + continue + if (deltaattr.get(att) is not None + and deltaattr.get(att).flags() != FLAG_MOD_ADD): + doit = False + elif deltaattr.get(att) is None: + doit = False + if doit: + message(CHANGE, "Applying delta to @ATTRIBUTES") + deltaattr.dn = ldb.Dn(basesam, "@ATTRIBUTES") + basesam.modify(deltaattr) + else: + message(CHANGE, "Not applying delta to @ATTRIBUTES because " + "there is not only add") + # 13) + if opts.full: + if not update_samdb(new_ldbs.sam, ldbs.sam, names, lastProvisionUSNs, + schema, schemareloadclosure): + message(SIMPLE, "Rolling back all changes. Check the cause" + " of the problem") + message(SIMPLE, "Your system is as it was before the upgrade") + ldbs.groupedRollback() + new_ldbs.groupedRollback() + shutil.rmtree(provisiondir) + sys.exit(1) + else: + # Try to reapply the change also when we do not change the sam + # as the delta_upgrade + schemareloadclosure() + sync_calculated_attributes(ldbs.sam, names) + res = ldbs.sam.search(expression="(samaccountname=dns)", + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + if len(res) > 0: + message(SIMPLE, "You still have the old DNS object for managing " + "dynamic DNS, but you didn't supply --full so " + "a correct update can't be done") + ldbs.groupedRollback() + new_ldbs.groupedRollback() + shutil.rmtree(provisiondir) + sys.exit(1) + # 14) + update_secrets(new_ldbs.secrets, ldbs.secrets, message) + # 14bis) + res = ldbs.sam.search(expression="(samaccountname=dns)", + scope=SCOPE_SUBTREE, attrs=["dn"], + controls=["search_options:1:2"]) + + if (len(res) == 1): + ldbs.sam.delete(res[0]["dn"]) + res2 = ldbs.secrets.search(expression="(samaccountname=dns)", + scope=SCOPE_SUBTREE, attrs=["dn"]) + update_dns_account_password(ldbs.sam, ldbs.secrets, names) + message(SIMPLE, "IMPORTANT!!! " + "If you were using Dynamic DNS before you need " + "to update your configuration, so that the " + "tkey-gssapi-credential has the following value: " + "DNS/%s.%s" % (names.netbiosname.lower(), + names.realm.lower())) + # 15) + message(SIMPLE, "Update machine account") + update_machine_account_password(ldbs.sam, ldbs.secrets, names) + + # 16) SD should be created with admin but as some previous acl were so wrong + # that admin can't modify them we have first to recreate them with the good + # form but with system account and then give the ownership to admin ... + if opts.very_old_pre_alpha9: + message(SIMPLE, "Fixing very old provision SD") + rebuild_sd(ldbs.sam, names) + + # We calculate the max USN before recalculating the SD because we might + # touch object that have been modified after a provision and we do not + # want that the next upgradeprovision thinks that it has a green light + # to modify them + + # 17) + maxUSN = get_max_usn(ldbs.sam, str(names.rootdn)) + + # 18) We rebuild SD if a we have a list of DN to recalculate or if the + # defSDmodified is set. + if opts.full and (defSDmodified or len(dnToRecalculate) >0): + message(SIMPLE, "Some (default) security descriptors (SDs) have " + "changed, recalculating them") + ldbs.sam.set_session_info(adm_session) + rebuild_sd(ldbs.sam, names) + + # 19) + # Now we are quite confident in the recalculate process of the SD, we make + # it optional. And we don't do it if there is DN that we must touch + # as we are assured that on this DNs we will have differences ! + # Also the check must be done in a clever way as for the moment we just + # compare SDDL + if dnNotToRecalculateFound == False and (opts.debugchangesd or opts.debugall): + message(CHANGESD, "Checking recalculated SDs") + check_updated_sd(new_ldbs.sam, ldbs.sam, names) + + # 20) + updateOEMInfo(ldbs.sam, str(names.rootdn)) + # 21) + check_for_DNS(newpaths.private_dir, paths.private_dir, + newpaths.binddns_dir, paths.binddns_dir, + names.dns_backend) + # 22) + update_provision_usn(ldbs.sam, minUSN, maxUSN, names.invocation) + if opts.full and (names.policyid is None or names.policyid_dc is None): + update_policyids(names, ldbs.sam) + + if opts.full: + try: + update_gpo(paths, ldbs.sam, names, lp, message) + except ProvisioningError as e: + message(ERROR, "The policy for domain controller is missing. " + "You should restart upgradeprovision with --full") + + ldbs.groupedCommit() + new_ldbs.groupedCommit() + message(SIMPLE, "Upgrade finished!") + # remove reference provision now that everything is done ! + # So we have reindexed first if need when the merged schema was reloaded + # (as new attributes could have quick in) + # But the second part of the update (when we update existing objects + # can also have an influence on indexing as some attribute might have their + # searchflag modificated + message(SIMPLE, "Reopening samdb to trigger reindexing if needed " + "after modification") + samdb = Ldb(paths.samdb, session_info=session, credentials=creds, lp=lp) + message(SIMPLE, "Reindexing finished") + + shutil.rmtree(provisiondir) + except Exception as err: + message(ERROR, "A problem occurred while trying to upgrade your " + "provision. A full backup is located at %s" % backupdir) + if opts.debugall or opts.debugchange: + (typ, val, tb) = sys.exc_info() + traceback.print_exception(typ, val, tb) + sys.exit(1) diff --git a/source4/scripting/bin/setup_dns.sh b/source4/scripting/bin/setup_dns.sh new file mode 100755 index 0000000..143f2c2 --- /dev/null +++ b/source4/scripting/bin/setup_dns.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# example script to setup DNS for a vampired domain + +[ $# = 3 ] || { + echo "Usage: setup_dns.sh HOSTNAME DOMAIN IP" + exit 1 +} + +HOSTNAME="$(echo $1 | tr '[a-z]' '[A-Z]')" +DOMAIN="$(echo $2 | tr '[a-z]' '[A-Z]')" +IP="$3" + +RSUFFIX=$(echo $DOMAIN | sed s/[\.]/,DC=/g) + +[ -z "$PRIVATEDIR" ] && { + PRIVATEDIR=$(bin/samba-tool testparm --section-name=global --parameter-name='private dir' --suppress-prompt 2>/dev/null) +} + +OBJECTGUID=$(bin/ldbsearch --scope=base -H "$PRIVATEDIR/sam.ldb" -b "CN=NTDS Settings,CN=$HOSTNAME,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=$RSUFFIX" objectguid | grep ^objectGUID | cut -d: -f2) + +samba4kinit=kinit +if test -x $BINDIR/samba4kinit; then + samba4kinit=bin/samba4kinit +fi + +echo "Found objectGUID $OBJECTGUID" + +echo "Running kinit for $HOSTNAME\$@$DOMAIN" +$samba4kinit -e arcfour-hmac-md5 -k -t "$PRIVATEDIR/secrets.keytab" $HOSTNAME\$@$DOMAIN || exit 1 +echo "Adding $HOSTNAME.$DOMAIN" +scripting/bin/nsupdate-gss --noverify $HOSTNAME $DOMAIN $IP 300 || { + echo "Failed to add A record" + exit 1 +} +echo "Adding $OBJECTGUID._msdcs.$DOMAIN => $HOSTNAME.$DOMAIN" +scripting/bin/nsupdate-gss --realm=$DOMAIN --noverify --ntype="CNAME" $OBJECTGUID _msdcs.$DOMAIN $HOSTNAME.$DOMAIN 300 || { + echo "Failed to add CNAME" + exit 1 +} +echo "Checking" +rndc flush +host $HOSTNAME.$DOMAIN +host $OBJECTGUID._msdcs.$DOMAIN diff --git a/source4/scripting/bin/subunitrun b/source4/scripting/bin/subunitrun new file mode 100755 index 0000000..7bfa851 --- /dev/null +++ b/source4/scripting/bin/subunitrun @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 + +# Simple subunit testrunner for python + +# NOTE: This is deprecated - Using the standard subunit runner is +# preferred - e.g. "python -m samba.subunit.run YOURMODULE". +# +# This wrapper will be removed once all tests can be run +# without it. At the moment there are various tests which still +# get e.g. credentials passed via command-line options to this +# script. + +# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2014 +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import sys + +# make sure the script dies immediately when hitting control-C, +# rather than raising KeyboardInterrupt. As we do all database +# operations using transactions, this is safe. +import signal +signal.signal(signal.SIGINT, signal.SIG_DFL) + +# Find right directory when running from source tree +sys.path.insert(0, "bin/python") + +import optparse +import samba +from samba.tests.subunitrun import TestProgram, SubunitOptions + +import samba.getopt as options +import samba.tests + + +usage = 'subunitrun [options] <tests>' +description = ''' +This runs a Samba python test suite. The tests are typically located in +python/samba/tests/*.py + +To run the tests from one of those modules, specify the test as +samba.tests.MODULE. For example, to run the tests in common.py: + + subunitrun samba.tests.common + +To list the tests in that module, use: + + subunitrun -l samba.tests.common + +NOTE: This script is deprecated in favor of "python -m subunit.run". Don't use +it unless it can be avoided. +''' + +def format_description(formatter): + '''hack to prevent textwrap of the description''' + return description + +parser = optparse.OptionParser(usage=usage, description=description) +parser.format_description = format_description +credopts = options.CredentialsOptions(parser) +sambaopts = options.SambaOptions(parser) +subunitopts = SubunitOptions(parser) +parser.add_option_group(credopts) +parser.add_option_group(sambaopts) +parser.add_option_group(subunitopts) + +opts, args = parser.parse_args() + +if not getattr(opts, "listtests", False): + lp = sambaopts.get_loadparm() + samba.tests.cmdline_credentials = credopts.get_credentials(lp) +if getattr(opts, 'load_list', None): + args.insert(0, "--load-list=%s" % opts.load_list) + +TestProgram(module=None, args=args, opts=subunitopts) diff --git a/source4/scripting/bin/wscript_build b/source4/scripting/bin/wscript_build new file mode 100644 index 0000000..d31afb2 --- /dev/null +++ b/source4/scripting/bin/wscript_build @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 + +if bld.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'): + for script in ['samba_dnsupdate', + 'samba_spnupdate', + 'samba_kcc', + 'samba_upgradeprovision', + 'samba_upgradedns', + 'gen_output.py', + 'samba_downgrade_db']: + bld.SAMBA_SCRIPT(script, pattern=script, installdir='.') +if bld.CONFIG_SET('WITH_ADS'): + bld.SAMBA_SCRIPT('samba-tool', pattern='samba-tool', installdir='.') +bld.SAMBA_SCRIPT('samba-gpupdate', pattern='samba-gpupdate', installdir='.') |