summaryrefslogtreecommitdiffstats
path: root/source4/script
diff options
context:
space:
mode:
Diffstat (limited to '')
-rwxr-xr-xsource4/script/buildtree.pl40
-rwxr-xr-xsource4/script/depfilter.py53
-rwxr-xr-xsource4/script/extract_allparms.sh2
-rwxr-xr-xsource4/script/find_unused_options.sh37
-rwxr-xr-xsource4/script/minimal_includes.pl171
-rwxr-xr-xsource4/script/mkproto.pl252
-rwxr-xr-xsource4/script/update-proto.pl242
-rwxr-xr-xsource4/scripting/bin/enablerecyclebin53
-rwxr-xr-xsource4/scripting/bin/findprovisionusnranges78
-rw-r--r--source4/scripting/bin/gen_error_common.py82
-rwxr-xr-xsource4/scripting/bin/gen_hresult.py228
-rwxr-xr-xsource4/scripting/bin/gen_ntstatus.py148
-rwxr-xr-xsource4/scripting/bin/gen_output.py53
-rwxr-xr-xsource4/scripting/bin/gen_werror.py149
-rwxr-xr-xsource4/scripting/bin/get-descriptors154
-rwxr-xr-xsource4/scripting/bin/ktpass.sh122
-rwxr-xr-xsource4/scripting/bin/machineaccountccache30
-rwxr-xr-xsource4/scripting/bin/machineaccountpw42
-rwxr-xr-xsource4/scripting/bin/nsupdate-gss352
-rwxr-xr-xsource4/scripting/bin/rebuildextendeddn135
-rwxr-xr-xsource4/scripting/bin/renamedc191
-rwxr-xr-xsource4/scripting/bin/samba-gpupdate138
-rwxr-xr-xsource4/scripting/bin/samba-tool50
-rwxr-xr-xsource4/scripting/bin/samba3dump180
-rwxr-xr-xsource4/scripting/bin/samba_dnsupdate960
-rwxr-xr-xsource4/scripting/bin/samba_downgrade_db135
-rwxr-xr-xsource4/scripting/bin/samba_kcc345
-rwxr-xr-xsource4/scripting/bin/samba_spnupdate254
-rwxr-xr-xsource4/scripting/bin/samba_upgradedns589
-rwxr-xr-xsource4/scripting/bin/samba_upgradeprovision1848
-rwxr-xr-xsource4/scripting/bin/setup_dns.sh43
-rwxr-xr-xsource4/scripting/bin/subunitrun87
-rw-r--r--source4/scripting/bin/wscript_build14
-rw-r--r--source4/scripting/devel/addlotscontacts95
-rw-r--r--source4/scripting/devel/chgkrbtgtpass60
-rwxr-xr-xsource4/scripting/devel/chgtdcpass63
-rwxr-xr-xsource4/scripting/devel/config_base39
-rwxr-xr-xsource4/scripting/devel/crackname77
-rwxr-xr-xsource4/scripting/devel/demodirsync.py159
-rw-r--r--source4/scripting/devel/drs/fsmo.ldif.template75
-rw-r--r--source4/scripting/devel/drs/named.conf.ad.template6
-rwxr-xr-xsource4/scripting/devel/drs/revampire_ad.sh23
-rwxr-xr-xsource4/scripting/devel/drs/unvampire_ad.sh14
-rwxr-xr-xsource4/scripting/devel/drs/vampire_ad.sh28
-rw-r--r--source4/scripting/devel/drs/vars12
-rwxr-xr-xsource4/scripting/devel/enumprivs58
-rwxr-xr-xsource4/scripting/devel/getncchanges143
-rwxr-xr-xsource4/scripting/devel/nmfind15
-rwxr-xr-xsource4/scripting/devel/pfm_verify.py192
-rwxr-xr-xsource4/scripting/devel/rebuild_zone.sh109
-rwxr-xr-xsource4/scripting/devel/repl_cleartext_pwd.py412
-rwxr-xr-xsource4/scripting/devel/rodcdns43
-rwxr-xr-xsource4/scripting/devel/speedtest.py235
-rwxr-xr-xsource4/scripting/devel/tmpfs.sh16
-rw-r--r--source4/scripting/devel/watch_servers.sh14
-rw-r--r--source4/scripting/man/samba-gpupdate.8.xml128
-rw-r--r--source4/scripting/wscript_build24
57 files changed, 9297 insertions, 0 deletions
diff --git a/source4/script/buildtree.pl b/source4/script/buildtree.pl
new file mode 100755
index 0000000..a40036a
--- /dev/null
+++ b/source4/script/buildtree.pl
@@ -0,0 +1,40 @@
+#! /usr/bin/env perl -w
+ eval 'exec /usr/bin/env perl -S $0 ${1+"$@"}'
+ if 0; #$running_under_some_shell
+
+use strict;
+use File::Find ();
+use File::Path qw(mkpath);
+use Cwd 'abs_path';
+
+# Set the variable $File::Find::dont_use_nlink if you're using AFS,
+# since AFS cheats.
+
+# for the convenience of &wanted calls, including -eval statements:
+use vars qw/*name *dir *prune/;
+*name = *File::Find::name;
+*dir = *File::Find::dir;
+*prune = *File::Find::prune;
+my $builddir = abs_path($ENV{builddir});
+my $srcdir = abs_path($ENV{srcdir});
+sub wanted;
+
+
+
+# Traverse desired filesystems
+File::Find::find({wanted => \&wanted, no_chdir => 1}, $srcdir);
+exit;
+
+
+sub wanted {
+ my ($dev,$ino,$mode,$nlink,$uid,$gid,$newdir);
+
+ if ((($dev,$ino,$mode,$nlink,$uid,$gid) = lstat($_)) &&
+ (-d _) && (($newdir = abs_path($_)) !~ /$builddir/))
+ {
+ $newdir =~ s!$srcdir!$builddir!;
+ mkpath($newdir);
+ print("Creating $newdir\n");
+ }
+}
+
diff --git a/source4/script/depfilter.py b/source4/script/depfilter.py
new file mode 100755
index 0000000..ee2ce9d
--- /dev/null
+++ b/source4/script/depfilter.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+#
+# Filter out arcs in a dotty graph that are at or below a certain
+# node. This is useful for visualising parts of the dependency graph.
+#
+
+# Command line stuff
+
+import sys
+import re
+
+if len(sys.argv) != 2:
+ print('Usage: depfilter.py NODE')
+ sys.exit(1)
+
+top = sys.argv[1]
+
+# Read in dot file
+
+lines = sys.stdin.readlines()
+
+graph = {}
+
+for arc in lines[1:-1]:
+ match = re.search('"(.*)" -> "(.*)"', arc)
+ n1, n2 = match.group(1), match.group(2)
+ if n1 not in graph:
+ graph[n1] = []
+ graph[n1].append(n2)
+
+# Create subset of 'graph' rooted at 'top'
+
+subgraph = {}
+
+
+def add_deps(node):
+ if node in graph and node not in subgraph:
+ subgraph[node] = graph[node]
+ for n in graph[node]:
+ add_deps(n)
+
+
+add_deps(top)
+
+# Generate output
+
+print(lines[0], end=' ')
+
+for key, value in subgraph.items():
+ for n in value:
+ print('\t"%s" -> "%s"' % (key, n))
+
+print(lines[-1], end=' ')
diff --git a/source4/script/extract_allparms.sh b/source4/script/extract_allparms.sh
new file mode 100755
index 0000000..f16068b
--- /dev/null
+++ b/source4/script/extract_allparms.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+grep '{".*P_[GL]' param/loadparm.c | sed -e 's/&.*$//g' -e 's/",.*P_LOCAL.*$/ S/' -e 's/",.*P_GLOBAL.*$/ G/' -e 's/^ .*{"//g' | sort -f
diff --git a/source4/script/find_unused_options.sh b/source4/script/find_unused_options.sh
new file mode 100755
index 0000000..36e9771
--- /dev/null
+++ b/source4/script/find_unused_options.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# this script finds unused lp_*() functions
+#
+# use it like this:
+#
+# user@host:~/samba/source>./script/find_unused_options.sh
+#
+
+LIST_GLOBAL=$(grep '^FN_GLOBAL' param/loadparm.c | sed -e's/^FN_GLOBAL.*(\(.*\).*,.*\(&Globals\..*\)).*/\1:\2/')
+
+LIST_LOCAL=$(grep '^FN_LOCAL' param/loadparm.c | sed -e's/^FN_LOCAL.*(\(.*\).*,[ ]*\(.*\)).*/\1:\2/')
+
+CFILES=$(find . -name "*.c")
+
+for i in $LIST_GLOBAL; do
+ key=$(echo $i | cut -d ':' -f1)
+ val=$(echo $i | cut -d ':' -f2)
+
+ found=$(grep "$key[ ]*()" $CFILES)
+ if test -z "$found"; then
+ echo "Not Used Global: $key() -> $val"
+ fi
+done
+
+for i in $LIST_LOCAL; do
+ key=$(echo $i | cut -d ':' -f1)
+ val=$(echo $i | cut -d ':' -f2)
+
+ found=$(grep "$key[ ]*(" $CFILES)
+
+ if test -z "$found"; then
+ echo "Not Used LOCAL: $key() -> $val"
+ fi
+done
+
+echo "# do a 'make clean;make everything' before removing anything!"
diff --git a/source4/script/minimal_includes.pl b/source4/script/minimal_includes.pl
new file mode 100755
index 0000000..4203d00
--- /dev/null
+++ b/source4/script/minimal_includes.pl
@@ -0,0 +1,171 @@
+#!/usr/bin/perl -w
+# find a list of #include lines in C code that might not be needed
+# usually called with something like this:
+# minimal_includes.pl `find . -name "*.c"`
+# Andrew Tridgell <tridge@samba.org>
+
+use strict;
+use Data::Dumper;
+use Getopt::Long;
+
+my $opt_help = 0;
+my $opt_remove = 0;
+my $opt_skip_system = 0;
+my $opt_waf = 0;
+
+#####################################################################
+# write a string into a file
+sub FileSave($$)
+{
+ my($filename) = shift;
+ my($v) = shift;
+ local(*FILE);
+ open(FILE, ">$filename") || die "can't open $filename";
+ print FILE $v;
+ close(FILE);
+}
+
+sub load_lines($)
+{
+ my $fname = shift;
+ my @lines = split(/^/m, `cat $fname`);
+ return @lines;
+}
+
+sub save_lines($$)
+{
+ my $fname = shift;
+ my $lines = shift;
+ my $data = join('', @{$lines});
+ FileSave($fname, $data);
+}
+
+sub test_compile($)
+{
+ my $fname = shift;
+ my $obj;
+ if ($opt_waf) {
+ my $ret = `../buildtools/bin/waf $fname 2>&1`;
+ return $ret
+ }
+ if ($fname =~ s/(.*)\..*$/$1.o/) {
+ $obj = "$1.o";
+ } else {
+ return "NOT A C FILE";
+ }
+ unlink($obj);
+ my $ret = `make $obj 2>&1`;
+ if (!unlink("$obj")) {
+ return "COMPILE FAILED";
+ }
+ return $ret;
+}
+
+sub test_include($$$$)
+{
+ my $fname = shift;
+ my $lines = shift;
+ my $i = shift;
+ my $original = shift;
+ my $line = $lines->[$i];
+ my $testfname;
+
+ $lines->[$i] = "";
+
+ my $mname = $fname . ".misaved";
+
+ unlink($mname);
+ rename($fname, $mname) || die "failed to rename $fname";
+ save_lines($fname, $lines);
+
+ my $out = test_compile($fname);
+
+ if ($out eq $original) {
+ if ($opt_remove) {
+ if ($opt_skip_system &&
+ $line =~ /system\//) {
+ print "$fname: not removing system include $line\n";
+ } else {
+ print "$fname: removing $line\n";
+ unlink($mname);
+ return;
+ }
+ } else {
+ print "$fname: might be able to remove $line\n";
+ }
+ }
+
+ $lines->[$i] = $line;
+ rename($mname, $fname) || die "failed to restore $fname";
+}
+
+sub process_file($)
+{
+ my $fname = shift;
+ my @lines = load_lines($fname);
+ my $num_lines = $#lines;
+
+ my $original = test_compile($fname);
+
+ if ($original eq "COMPILE FAILED") {
+ print "Failed to compile $fname\n";
+ return;
+ }
+
+ print "Processing $fname (with $num_lines lines)\n";
+
+ my $if_level = 0;
+
+ for (my $i=0;$i<=$num_lines;$i++) {
+ my $line = $lines[$i];
+ if ($line =~ /^\#\s*if/) {
+ $if_level++;
+ }
+ if ($line =~ /^\#\s*endif/) {
+ $if_level--;
+ }
+ if ($if_level == 0 &&
+ $line =~ /^\#\s*include/ &&
+ !($line =~ /needed/)) {
+ test_include($fname, \@lines, $i, $original);
+ }
+ }
+}
+
+
+#########################################
+# display help text
+sub ShowHelp()
+{
+ print "
+ minimise includes
+ Copyright (C) tridge\@samba.org
+
+ Usage: minimal_includes.pl [options] <C files....>
+
+ Options:
+ --help show help
+ --remove remove includes, don't just list them
+ --skip-system don't remove system/ includes
+ --waf use waf target conventions
+";
+}
+
+
+# main program
+GetOptions (
+ 'h|help|?' => \$opt_help,
+ 'remove' => \$opt_remove,
+ 'skip-system' => \$opt_skip_system,
+ 'waf' => \$opt_waf,
+ );
+
+if ($opt_help) {
+ ShowHelp();
+ exit(0);
+}
+
+for (my $i=0;$i<=$#ARGV;$i++) {
+ my $fname = $ARGV[$i];
+ process_file($fname);
+}
diff --git a/source4/script/mkproto.pl b/source4/script/mkproto.pl
new file mode 100755
index 0000000..2c3ebac
--- /dev/null
+++ b/source4/script/mkproto.pl
@@ -0,0 +1,252 @@
+#!/usr/bin/perl
+# Simple script for generating prototypes for C functions
+# Written by Jelmer Vernooij
+# based on the original mkproto.sh by Andrew Tridgell
+
+use strict;
+
+# don't use warnings module as it is not portable enough
+# use warnings;
+
+use Getopt::Long;
+use File::Basename;
+use File::Path;
+
+#####################################################################
+# read a file into a string
+
+my $public_file = undef;
+my $private_file = undef;
+my $all_file = undef;
+my $public_define = undef;
+my $private_define = undef;
+my $_public = "";
+my $_private = "";
+my $public_data = \$_public;
+my $private_data = \$_private;
+my $builddir = ".";
+my $srcdir = ".";
+
+sub public($)
+{
+ my ($d) = @_;
+ $$public_data .= $d;
+}
+
+sub private($)
+{
+ my ($d) = @_;
+ $$private_data .= $d;
+}
+
+sub usage()
+{
+ print "Usage: mkproto.pl [options] [c files]\n";
+ print "OPTIONS:\n";
+ print " --public=FILE Write prototypes for public functions to FILE\n";
+ print " --private=FILE Write prototypes for private functions to FILE\n";
+ print " --define=DEF Use DEF to check whether header was already included\n";
+ print " --public-define=DEF Same as --define, but just for public header\n";
+ print " --private-define=DEF Same as --define, but just for private header\n";
+ print " --srcdir=path Read files relative to this directory\n";
+ print " --builddir=path Write file relative to this directory\n";
+ print " --help Print this help message\n\n";
+ exit 0;
+}
+
+GetOptions(
+ 'public=s' => sub { my ($f,$v) = @_; $public_file = $v; },
+ 'all=s' => sub { my ($f,$v) = @_; $public_file = $v; $private_file = $v; },
+ 'private=s' => sub { my ($f,$v) = @_; $private_file = $v; },
+ 'define=s' => sub {
+ my ($f,$v) = @_;
+ $public_define = $v;
+ $private_define = "$v\_PRIVATE";
+ },
+ 'public-define=s' => \$public_define,
+ 'private-define=s' => \$private_define,
+ 'srcdir=s' => sub { my ($f,$v) = @_; $srcdir = $v; },
+ 'builddir=s' => sub { my ($f,$v) = @_; $builddir = $v; },
+ 'help' => \&usage
+) or exit(1);
+
+sub normalize_define($$)
+{
+ my ($define, $file) = @_;
+
+ if (not defined($define) and defined($file)) {
+ $define = "__" . uc($file) . "__";
+ $define =~ tr{./}{__};
+ $define =~ tr{\-}{_};
+ } elsif (not defined($define)) {
+ $define = '_PROTO_H_';
+ }
+
+ return $define;
+}
+
+$public_define = normalize_define($public_define, $public_file);
+$private_define = normalize_define($private_define, $private_file);
+
+if ((defined($private_file) and defined($public_file) and ($private_file eq $public_file)) or
+ (not defined($private_file) and not defined($public_file))) {
+ $private_data = $public_data;
+}
+
+sub file_load($)
+{
+ my($filename) = @_;
+ local(*INPUTFILE);
+ open(INPUTFILE, $filename) or return undef;
+ my($saved_delim) = $/;
+ undef $/;
+ my($data) = <INPUTFILE>;
+ close(INPUTFILE);
+ $/ = $saved_delim;
+ return $data;
+}
+
+sub print_header($$)
+{
+ my ($file, $header_name) = @_;
+ $file->("#ifndef $header_name\n");
+ $file->("#define $header_name\n\n");
+ $file->("#undef _PRINTF_ATTRIBUTE\n");
+ $file->("#define _PRINTF_ATTRIBUTE(a1, a2) PRINTF_ATTRIBUTE(a1, a2)\n");
+ $file->("/* This file was automatically generated by mkproto.pl. DO NOT EDIT */\n\n");
+}
+
+sub print_footer($$)
+{
+ my ($file, $header_name) = @_;
+ $file->("#undef _PRINTF_ATTRIBUTE\n");
+ $file->("#define _PRINTF_ATTRIBUTE(a1, a2)\n");
+ $file->("\n#endif /* $header_name */\n\n");
+}
+
+sub process_file($$$)
+{
+ my ($public_file, $private_file, $filename) = @_;
+
+ $filename =~ s/\.o$/\.c/g;
+
+ if ($filename =~ /^\//) {
+ open(FH, "<$filename") or die("Failed to open $filename");
+ } elsif (!open(FH, "< $builddir/$filename")) {
+ open(FH, "< $srcdir/$filename") || die "Failed to open $filename";
+ }
+
+ $private_file->("\n/* The following definitions come from $filename */\n\n");
+
+ my $comment = undef;
+ my $incomment = 0;
+ while (my $line = <FH>) {
+ my $target = \&private;
+ my $is_public = 0;
+
+ if ($line =~ /^\/\*\*/) {
+ $comment = "";
+ $incomment = 1;
+ }
+
+ if ($incomment) {
+ $comment .= $line;
+ if ($line =~ /\*\//) {
+ $incomment = 0;
+ }
+ }
+
+ # these are ordered for maximum speed
+ next if ($line =~ /^\s/);
+
+ next unless ($line =~ /\(/);
+
+ next if ($line =~ /^\/|[;]/);
+
+ if ($line =~ /^FN_/) {
+ next;
+ }
+
+ if ($line =~ /^_PUBLIC_[\t ]/) {
+ $target = \&public;
+ $is_public = 1;
+ }
+
+ next unless ( $is_public || $line =~ /
+ ^(_DEPRECATED_ |_NORETURN_ |_WARN_UNUSED_RESULT_ |_PURE_ )*(
+ void|bool|int|struct|char|const|\w+_[tT]\s|uint|unsigned|long|NTSTATUS|
+ ADS_STATUS|enum\s.*\(|DATA_BLOB|WERROR|XFILE|FILE|DIR|
+ double|TDB_CONTEXT|TDB_DATA|TALLOC_CTX|NTTIME|FN_|init_module|
+ GtkWidget|GType|smb_ucs2_t|krb5_error_code|NET_API_STATUS)
+ /xo);
+
+ next if ($line =~ /^int\s*main/);
+
+ $target->("\n$comment") if (defined($comment)); $comment = undef;
+
+ if ( $line =~ /\(.*\)\s*$/o ) {
+ chomp $line;
+ $target->("$line;\n");
+ next;
+ }
+
+ $target->($line);
+
+ while ($line = <FH>) {
+ if ($line =~ /\)\s*$/o) {
+ chomp $line;
+ $target->("$line;\n");
+ last;
+ }
+ $target->($line);
+ }
+ }
+
+ close(FH);
+}
+
+
+print_header(\&public, $public_define);
+if (defined($private_file) and defined($public_file) and $public_file ne $private_file) {
+ print_header(\&private, $private_define);
+
+ private("/* this file contains prototypes for functions that " .
+ "are private \n * to this subsystem or library. These functions " .
+ "should not be \n * used outside this particular subsystem! */\n\n");
+
+ public("/* this file contains prototypes for functions that " .
+ "are part of \n * the public API of this subsystem or library. */\n\n");
+
+}
+
+public("#ifndef _PUBLIC_\n#define _PUBLIC_\n#endif\n\n");
+public("#ifndef _PURE_\n#define _PURE_\n#endif\n\n");
+public("#ifndef _NORETURN_\n#define _NORETURN_\n#endif\n\n");
+public("#ifndef _DEPRECATED_\n#define _DEPRECATED_\n#endif\n\n");
+public("#ifndef _WARN_UNUSED_RESULT_\n#define _WARN_UNUSED_RESULT_\n#endif\n\n");
+
+process_file(\&public, \&private, $_) foreach (@ARGV);
+print_footer(\&public, $public_define);
+if (defined($private_file) and $public_file ne $private_file) {
+ print_footer(\&private, $private_define);
+}
+
+if (not defined($public_file)) {
+ print STDOUT $$public_data;
+}
+
+if (not defined($private_file) and defined($public_file)) {
+ print STDOUT $$private_data;
+}
+
+mkpath(dirname($public_file), 0, 0755);
+open(PUBLIC, ">$public_file") or die("Can't open `$public_file': $!");
+print PUBLIC "$$public_data";
+close(PUBLIC);
+
+if (defined($private_file) and $public_file ne $private_file) {
+ mkpath(dirname($private_file), 0, 0755);
+ open(PRIVATE, ">$private_file") or die("Can't open `$private_file': $!");
+ print PRIVATE "$$private_data";
+ close(PRIVATE);
+}
diff --git a/source4/script/update-proto.pl b/source4/script/update-proto.pl
new file mode 100755
index 0000000..c130650
--- /dev/null
+++ b/source4/script/update-proto.pl
@@ -0,0 +1,242 @@
+#!/usr/bin/perl
+# Simple script for updating the prototypes in a C header file
+#
+# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU GPL
+
+use strict;
+use warnings;
+use Getopt::Long;
+
+=head1 NAME
+
+update-proto - automatically update prototypes in header files
+
+=head1 SYNOPSIS
+
+update-proto [OPTIONS] <HEADER> <C-FILE>...
+
+update-proto [OPTIONS] <HEADER>
+
+=head1 DESCRIPTION
+
+Update-proto makes sure the prototypes in a C header file are current
+by comparing the existing prototypes in a header with the function definition
+in the source file. It aims to keep the diff between the original header
+and generated one as small as possible.
+
+New prototypes are inserted before any line that contains the following comment:
+
+/* New prototypes are inserted above this line */
+
+It will automatically parse C files after it encounters a line that contains:
+
+/* The following definitions come from FILE */
+
+When two or more prototypes exist for a function, only the first one
+will be kept.
+
+=head1 OPTIONS
+
+=over 4
+
+=item I<--verbose|-v>
+
+Increase verbosity. Currently only two levels of verbosity are used.
+
+=item I<--help>
+
+Show list of options
+
+=back
+
+=head1 BUGS
+
+Strange complex functions are not recognized. In particular those
+created by macros or returning (without typedef) function pointers.
+
+=head1 LICENSE
+
+update-proto is licensed under the GNU General Public License L<http://www.gnu.org/licenses/gpl.html>.
+
+=head1 AUTHOR
+
+update-proto was written by Jelmer Vernooij L<jelmer@samba.org>.
+
+=cut
+
+sub Usage()
+{
+ print "Usage: update-proto.pl [OPTIONS] <HEADER> <C-FILE>...\n";
+ exit 1;
+}
+
+sub Help()
+{
+ print "Usage: update-proto.pl [OPTIONS] <HEADER> <C-FILE>...\n";
+ print "Options:\n";
+ print " --help Show this help message\n";
+ print " --verbose Write changes made to standard error\n\n";
+ exit 0;
+}
+
+my %new_protos = ();
+
+my $verbose = 0;
+
+GetOptions(
+ 'help|h' => \&Help,
+ 'v|verbose' => sub { $verbose += 1; }
+) or Usage();
+
+sub count($$)
+{
+ my ($t, $s) = @_;
+ my $count = 0;
+ while($s =~ s/^(.)//) { $count++ if $1 eq $t; }
+ return $count;
+}
+
+my $header = shift @ARGV;
+
+sub process_file($)
+{
+ my $file = shift;
+ open (IN, "<$file");
+ while (my $line = <IN>) {
+ $_ = $line;
+ next if /^\s/;
+ next unless /\(/;
+ next if /^\/|[;]|^#|}|^\s*static/;
+ s/\/\*(.*?)\*\///g;
+ my $public = s/_PUBLIC_//g;
+ s/_PRINTF_ATTRIBUTE\([^)]+\)//g;
+ next unless /^(struct\s+\w+|union\s+\w+|\w+)\s+\**\s*(\w+)\s*\((.*)$/;
+
+ my $name = $2;
+
+ next if ($name eq "main");
+
+ # Read continuation lines if any
+ my $prn = 1 + count("(", $3) - count(")", $3);
+
+ while ($prn) {
+ my $l = <IN>;
+ $l or die("EOF while parsing function prototype");
+ $line .= $l;
+ $prn += count("(", $l) - count(")", $l);
+ }
+
+ $line =~ s/\n$//;
+
+ # Strip off possible start of function
+ $line =~ s/{\s*$//g;
+
+ $new_protos{$name} = "$line;";
+ }
+ close(IN);
+}
+
+process_file($_) foreach (@ARGV);
+
+my $added = 0;
+my $modified = 0;
+my $deleted = 0;
+my $kept = 0;
+
+sub insert_new_protos()
+{
+ foreach (keys %new_protos) {
+ print "$new_protos{$_}\n";
+ print STDERR "Inserted prototype for `$_'\n" if ($verbose);
+ $added+=1;
+ }
+ %new_protos = ();
+}
+
+my $blankline_due = 0;
+
+open (HDR, "<$header");
+while (my $line = <HDR>) {
+ if ($line eq "\n") {
+ $blankline_due = 1;
+ $line = <HDR>;
+ }
+
+ # Recognize C files that prototypes came from
+ if ($line =~ /\/\* The following definitions come from (.*) \*\//) {
+ insert_new_protos();
+ if ($blankline_due) {
+ print "\n";
+ $blankline_due = 0;
+ }
+ process_file($1);
+ print "$line";
+ next;
+ }
+
+ if ($blankline_due) {
+ print "\n";
+ $blankline_due = 0;
+ }
+
+ # Insert prototypes that weren't in the header before
+ if ($line =~ /\/\* New prototypes are inserted above this line.*\*\/\s*/) {
+ insert_new_protos();
+ print "$line\n";
+ next;
+ }
+
+ if ($line =~ /^\s*typedef |^\#|^\s*static/) {
+ print "$line";
+ next;
+ }
+
+ $_ = $line;
+ s/\/\*(.*?)\*\///g;
+ my $public = s/_PUBLIC_//g;
+ s/_PRINTF_ATTRIBUTE\([^)]+\)//g;
+ unless (/^(struct\s+\w+|union\s+\w+|\w+)\s+\**\s*(\w+)\s*\((.*)$/) {
+ print "$line";
+ next;
+ }
+
+ # Read continuation lines if any
+ my $prn = 1 + count("(", $3) - count(")", $3);
+
+ while ($prn) {
+ my $l = <HDR>;
+ $l or die("EOF while parsing function prototype");
+ $line .= $l;
+ $prn += count("(", $l) - count(")", $l);
+ }
+
+ my $name = $2;
+
+ # This prototype is for a function that was removed
+ unless (defined($new_protos{$name})) {
+ $deleted+=1;
+ print STDERR "Removed prototype for `$name'\n" if ($verbose);
+ next;
+ }
+
+ my $nline = $line;
+ chop($nline);
+
+ if ($new_protos{$name} ne $nline) {
+ $modified+=1;
+ print STDERR "Updated prototype for `$name'\n" if ($verbose);
+ print "$new_protos{$name}\n";
+ } else {
+ $kept+=1;
+ print STDERR "Prototype for `$name' didn't change\n" if ($verbose > 1);
+ print "$line";
+ }
+
+ delete $new_protos{$name};
+}
+close(HDR);
+
+print STDERR "$added added, $modified modified, $deleted deleted, $kept unchanged.\n";
+
+1;
diff --git a/source4/scripting/bin/enablerecyclebin b/source4/scripting/bin/enablerecyclebin
new file mode 100755
index 0000000..3477f90
--- /dev/null
+++ b/source4/scripting/bin/enablerecyclebin
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+#
+# enabled the Recycle Bin optional feature
+#
+import optparse
+import sys
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import samba
+from samba import getopt as options, Ldb
+from ldb import SCOPE_BASE
+import sys
+import ldb
+from samba.auth import system_session
+
+parser = optparse.OptionParser("enablerecyclebin <URL>")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+parser.add_option_group(options.VersionOptions(parser))
+
+opts, args = parser.parse_args()
+opts.dump_all = True
+
+if len(args) != 1:
+ parser.print_usage()
+ sys.exit(1)
+
+url = args[0]
+
+lp_ctx = sambaopts.get_loadparm()
+
+creds = credopts.get_credentials(lp_ctx)
+sam_ldb = Ldb(url, session_info=system_session(), credentials=creds, lp=lp_ctx)
+
+# get the rootDSE
+res = sam_ldb.search(base="", expression="", scope=SCOPE_BASE, attrs=["configurationNamingContext"])
+rootDse = res[0]
+
+configbase=rootDse["configurationNamingContext"]
+
+# enable the feature
+msg = ldb.Message()
+msg.dn = ldb.Dn(sam_ldb, "")
+msg["enableOptionalFeature"] = ldb.MessageElement(
+ "CN=Partitions," + str(configbase) + ":766ddcd8-acd0-445e-f3b9-a7f9b6744f2a",
+ ldb.FLAG_MOD_ADD, "enableOptionalFeature")
+res = sam_ldb.modify(msg)
+
+print("Recycle Bin feature enabled")
diff --git a/source4/scripting/bin/findprovisionusnranges b/source4/scripting/bin/findprovisionusnranges
new file mode 100755
index 0000000..b05b5ce
--- /dev/null
+++ b/source4/scripting/bin/findprovisionusnranges
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#
+# Helper for determining USN ranges created of modified by provision and
+# upgradeprovision.
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2011
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import sys
+import optparse
+sys.path.insert(0, "bin/python")
+
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session
+from samba import Ldb
+import ldb
+
+import samba.getopt as options
+from samba import param
+from samba.upgradehelpers import get_paths, print_provision_ranges, findprovisionrange
+from samba.ndr import ndr_unpack
+from samba.dcerpc import misc
+
+parser = optparse.OptionParser("findprovisionusnranges [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+parser.add_option("--storedir", type="string", help="Directory where to store result files")
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+opts = parser.parse_args()[0]
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+
+creds = credopts.get_credentials(lp)
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+session = system_session()
+paths = get_paths(param, smbconf=smbconf)
+basedn="DC=" + lp.get("realm").replace(".",",DC=")
+samdb = Ldb(paths.samdb, session_info=session, credentials=creds,lp=lp)
+
+res = samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
+
+invocation = None
+if res and len(res) == 1 and res[0]["dsServiceName"] != None:
+ dn = ldb.Dn(samdb, str(res[0]["dsServiceName"]))
+ res = samdb.search(base=str(dn), scope=ldb.SCOPE_BASE, attrs=["invocationId"],
+ controls=["search_options:1:2"])
+
+ if res and len(res) == 1 and res[0]["invocationId"]:
+ invocation = str(ndr_unpack(misc.GUID, res[0]["invocationId"][0]))
+ else:
+ print("Unable to find invocation ID")
+ sys.exit(1)
+else:
+ print("Unable to find attribute dsServiceName in rootDSE")
+ sys.exit(1)
+
+minobj = 5
+(hash_id, nb_obj) = findprovisionrange(samdb, basedn)
+print("Here is a list of changes that modified more than %d objects in 1 minute." % minobj)
+print("Usually changes made by provision and upgradeprovision are those who affect a couple"
+ " of hundred of objects or more")
+print("Total number of objects: %d\n" % nb_obj)
+
+print_provision_ranges(hash_id, minobj, opts.storedir, str(paths.samdb), invocation)
diff --git a/source4/scripting/bin/gen_error_common.py b/source4/scripting/bin/gen_error_common.py
new file mode 100644
index 0000000..aa71afa
--- /dev/null
+++ b/source4/scripting/bin/gen_error_common.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+
+#
+# Unix SMB/CIFS implementation.
+#
+# Utility methods for generating error codes from a file.
+#
+# Copyright (C) Noel Power <noel.power@suse.com> 2014
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# error data model
+class ErrorDef:
+ def __init__(self):
+ self.err_code = None
+ self.err_define = None
+ self.err_string = ""
+ self.linenum = ""
+
+def escapeString( input ):
+ output = input.replace('"','\\"')
+ output = output.replace("\\<","\\\\<")
+ output = output.replace('\t',"")
+ return output
+
+# Parse error descriptions from a file which is the content
+# of an HTML table.
+# The file must be formatted as:
+# [error code hex]
+# [error name short]
+# [error description]
+# Blank lines are allowed and errors do not have to have a
+# description.
+# Returns a list of ErrorDef objects.
+def parseErrorDescriptions( file_contents, isWinError, transformErrorFunction ):
+ errors = []
+ count = 0
+ for line in file_contents:
+ if line is None or line == '\t' or line == "" or line == '\n':
+ continue
+ content = line.strip().split(None,1)
+ # start new error definition ?
+ if line.startswith("0x"):
+ newError = ErrorDef()
+ newError.err_code = int(content[0],0)
+ # escape the usual suspects
+ if len(content) > 1:
+ newError.err_string = escapeString(content[1])
+ newError.linenum = count
+ newError.isWinError = isWinError
+ errors.append(newError)
+ else:
+ if len(errors) == 0:
+ continue
+ err = errors[-1]
+ if err.err_define is None:
+ err.err_define = transformErrorFunction(content[0])
+ else:
+ if len(content) > 0:
+ desc = escapeString(line.strip())
+ if len(desc):
+ if err.err_string == "":
+ err.err_string = desc
+ else:
+ err.err_string = err.err_string + " " + desc
+ count = count + 1
+ print("parsed %d lines generated %d error definitions"%(count,len(errors)))
+ return errors
+
diff --git a/source4/scripting/bin/gen_hresult.py b/source4/scripting/bin/gen_hresult.py
new file mode 100755
index 0000000..6a75c37
--- /dev/null
+++ b/source4/scripting/bin/gen_hresult.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python3
+
+#
+# Unix SMB/CIFS implementation.
+#
+# HRESULT Error definitions
+#
+# Copyright (C) Noel Power <noel.power@suse.com> 2014
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import sys, os.path, io, string
+
+# parsed error data
+Errors = []
+
+# error data model
+class ErrorDef:
+
+ def __init__(self):
+ self.err_code = ""
+ self.err_define = None
+ self.err_string = ""
+ self.isWinError = False
+ self.linenum = ""
+
+def escapeString( input ):
+ output = input.replace('"','\\"')
+ output = output.replace("\\<","\\\\<")
+ output = output.replace('\t',"")
+ return output
+
+def parseErrorDescriptions( input_file, isWinError ):
+ # read in the data
+ fileContents = open(input_file,"r")
+ count = 0;
+ for line in fileContents:
+ content = line.strip().split(None,1)
+ # start new error definition ?
+ if len(content) == 0:
+ continue
+ if line.startswith("0x"):
+ newError = ErrorDef()
+ newError.err_code = content[0]
+ # escape the usual suspects
+ if len(content) > 1:
+ newError.err_string = escapeString(content[1])
+ newError.linenum = count
+ newError.isWinError = isWinError
+ Errors.append(newError)
+ else:
+ if len(Errors) == 0:
+ print("Error parsing file as line %d"%count)
+ sys.exit()
+ err = Errors[-1]
+ if err.err_define is None:
+ err.err_define = "HRES_" + content[0]
+ else:
+ if len(content) > 0:
+ desc = escapeString(line.strip())
+ if len(desc):
+ if err.err_string == "":
+ err.err_string = desc
+ else:
+ err.err_string = err.err_string + " " + desc
+ count = count + 1
+ fileContents.close()
+ print("parsed %d lines generated %d error definitions"%(count,len(Errors)))
+
+def write_license(out_file):
+ out_file.write("/*\n")
+ out_file.write(" * Unix SMB/CIFS implementation.\n")
+ out_file.write(" *\n")
+ out_file.write(" * HRESULT Error definitions\n")
+ out_file.write(" *\n")
+ out_file.write(" * Copyright (C) Noel Power <noel.power@suse.com> 2014\n")
+ out_file.write(" *\n")
+ out_file.write(" * This program is free software; you can redistribute it and/or modify\n")
+ out_file.write(" * it under the terms of the GNU General Public License as published by\n")
+ out_file.write(" * the Free Software Foundation; either version 3 of the License, or\n")
+ out_file.write(" * (at your option) any later version.\n")
+ out_file.write(" *\n")
+ out_file.write(" * This program is distributed in the hope that it will be useful,\n")
+ out_file.write(" * but WITHOUT ANY WARRANTY; without even the implied warranty of\n")
+ out_file.write(" * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n")
+ out_file.write(" * GNU General Public License for more details.\n")
+ out_file.write(" *\n")
+ out_file.write(" * You should have received a copy of the GNU General Public License\n")
+ out_file.write(" * along with this program. If not, see <http://www.gnu.org/licenses/>.\n")
+ out_file.write(" */\n")
+ out_file.write("\n")
+
+def generateHeaderFile(out_file):
+ write_license(out_file)
+ out_file.write("#ifndef _HRESULT_H_\n")
+ out_file.write("#define _HRESULT_H_\n\n")
+ macro_magic = "#if defined(HAVE_IMMEDIATE_STRUCTURES)\n"
+ macro_magic += "typedef struct {uint32_t h;} HRESULT;\n"
+ macro_magic += "#define HRES_ERROR(x) ((HRESULT) { x })\n"
+ macro_magic += "#define HRES_ERROR_V(x) ((x).h)\n"
+ macro_magic += "#else\n"
+ macro_magic += "typedef uint32_t HRESULT;\n"
+ macro_magic += "#define HRES_ERROR(x) (x)\n"
+ macro_magic += "#define HRES_ERROR_V(x) (x)\n"
+ macro_magic += "#endif\n"
+ macro_magic += "\n"
+ macro_magic += "#define HRES_IS_OK(x) (HRES_ERROR_V(x) == 0)\n"
+ macro_magic += "#define HRES_IS_EQUAL(x,y) (HRES_ERROR_V(x) == HRES_ERROR_V(y))\n"
+
+ out_file.write(macro_magic)
+ out_file.write("\n\n")
+ out_file.write("/*\n")
+ out_file.write(" * The following error codes are autogenerated from [MS-ERREF]\n")
+ out_file.write(" * see http://msdn.microsoft.com/en-us/library/cc704587.aspx\n")
+ out_file.write(" */\n")
+ out_file.write("\n")
+
+ for err in Errors:
+ line = "#define {0:49} HRES_ERROR({1})\n".format(err.err_define ,err.err_code)
+ out_file.write(line)
+ out_file.write("\nconst char *hresult_errstr_const(HRESULT err_code);\n")
+ out_file.write("\nconst char *hresult_errstr(HRESULT err_code);\n")
+ out_file.write("\n#define FACILITY_WIN32 0x0007\n")
+ out_file.write("#define WIN32_FROM_HRESULT(x) (HRES_ERROR_V(x) == 0 ? HRES_ERROR_V(x) : ~((FACILITY_WIN32 << 16) | 0x80000000) & HRES_ERROR_V(x))\n")
+ out_file.write("#define HRESULT_IS_LIKELY_WERR(x) ((HRES_ERROR_V(x) & 0xFFFF0000) == 0x80070000)\n")
+ out_file.write("#define HRESULT_FROM_WERROR(x) (HRES_ERROR(0x80070000 | W_ERROR_V(x)))\n")
+ out_file.write("\n\n\n#endif /*_HRESULT_H_*/")
+
+
+def generateSourceFile(out_file):
+ write_license(out_file)
+ out_file.write("#include \"includes.h\"\n")
+ out_file.write("#include \"hresult.h\"\n")
+ out_file.write("/*\n")
+ out_file.write(" * The following error codes and descriptions are autogenerated from [MS-ERREF]\n")
+ out_file.write(" * see http://msdn.microsoft.com/en-us/library/cc704587.aspx\n")
+ out_file.write(" */\n")
+ out_file.write("\n")
+ out_file.write("static const struct {\n")
+ out_file.write(" HRESULT error_code;\n")
+ out_file.write(" const char *error_str;\n")
+ out_file.write(" const char *error_message;\n")
+ out_file.write("} hresult_errs[] = {\n")
+
+ for err in Errors:
+ out_file.write(" {\n")
+ if err.isWinError:
+ out_file.write(" HRESULT_FROM_WIN32(%s),\n"%err.err_define)
+ out_file.write(" \"HRESULT_FROM_WIN32(%s)\",\n"%err.err_define)
+ else:
+ out_file.write(" %s,\n"%err.err_define)
+ out_file.write(" \"%s\",\n"%err.err_define)
+ out_file.write(" \"%s\"\n"%err.err_string)
+ out_file.write(" },\n")
+ out_file.write("};\n")
+ out_file.write("\n")
+ out_file.write("const char *hresult_errstr_const(HRESULT err_code)\n")
+ out_file.write("{\n");
+ out_file.write(" const char *result = NULL;\n")
+ out_file.write(" int i;\n")
+ out_file.write(" for (i = 0; i < ARRAY_SIZE(hresult_errs); ++i) {\n")
+ out_file.write(" if (HRES_IS_EQUAL(err_code, hresult_errs[i].error_code)) {\n")
+ out_file.write(" result = hresult_errs[i].error_message;\n")
+ out_file.write(" break;\n")
+ out_file.write(" }\n")
+ out_file.write(" }\n")
+ out_file.write(" /* convert & check win32 error space? */\n")
+ out_file.write(" if (result == NULL && HRESULT_IS_LIKELY_WERR(err_code)) {\n")
+ out_file.write(" WERROR wErr = W_ERROR(WIN32_FROM_HRESULT(err_code));\n")
+ out_file.write(" result = get_friendly_werror_msg(wErr);\n")
+ out_file.write(" }\n")
+ out_file.write(" return result;\n")
+ out_file.write("};\n")
+ out_file.write("\n")
+ out_file.write("const char *hresult_errstr(HRESULT err_code)\n")
+ out_file.write("{\n");
+ out_file.write(" static char msg[22];\n")
+ out_file.write(" int i;\n")
+ out_file.write("\n")
+ out_file.write(" for (i = 0; i < ARRAY_SIZE(hresult_errs); i++) {\n")
+ out_file.write(" if (HRES_IS_EQUAL(err_code, hresult_errs[i].error_code)) {\n")
+ out_file.write(" return hresult_errs[i].error_str;\n")
+ out_file.write(" }\n")
+ out_file.write(" }\n")
+ out_file.write(" snprintf(msg, sizeof(msg), \"HRES code 0x%08x\", HRES_ERROR_V(err_code));\n")
+ out_file.write(" return msg;\n")
+ out_file.write("};\n")
+
+# Very simple script to generate files hresult.c & hresult.h
+# The script simply takes a text file as input, format of input file is
+# very simple and is just the content of a html table ( such as that found
+# in http://msdn.microsoft.com/en-us/library/cc704587.aspx ) copied and
+# pasted into a text file
+
+def main ():
+ input_file1 = None;
+ filename = "hresult"
+ headerfile_name = filename + ".h"
+ sourcefile_name = filename + ".c"
+ if len(sys.argv) > 1:
+ input_file1 = sys.argv[1]
+ else:
+ print("usage: %s winerrorfile"%(sys.argv[0]))
+ sys.exit()
+
+ parseErrorDescriptions(input_file1, False)
+ out_file = open(headerfile_name,"w")
+ generateHeaderFile(out_file)
+ out_file.close()
+ out_file = open(sourcefile_name,"w")
+ generateSourceFile(out_file)
+
+if __name__ == '__main__':
+
+ main()
diff --git a/source4/scripting/bin/gen_ntstatus.py b/source4/scripting/bin/gen_ntstatus.py
new file mode 100755
index 0000000..b4a9bfc
--- /dev/null
+++ b/source4/scripting/bin/gen_ntstatus.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python3
+
+#
+# Unix SMB/CIFS implementation.
+#
+# HRESULT Error definitions
+#
+# Copyright (C) Noel Power <noel.power@suse.com> 2014
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys, os.path, io, string
+from gen_error_common import parseErrorDescriptions, ErrorDef
+
+def generateHeaderFile(out_file, errors):
+ out_file.write("/*\n")
+ out_file.write(" * Descriptions for errors generated from\n")
+ out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n")
+ out_file.write(" */\n\n")
+ out_file.write("#ifndef _NTSTATUS_GEN_H\n")
+ out_file.write("#define _NTSTATUS_GEN_H\n")
+ for err in errors:
+ line = "#define %s NT_STATUS(%#x)\n" % (err.err_define, err.err_code)
+ out_file.write(line)
+ out_file.write("\n#endif /* _NTSTATUS_GEN_H */\n")
+
+def generateSourceFile(out_file, errors):
+ out_file.write("/*\n")
+ out_file.write(" * Names for errors generated from\n")
+ out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n")
+ out_file.write(" */\n")
+
+ out_file.write("static const nt_err_code_struct nt_errs[] = \n")
+ out_file.write("{\n")
+ for err in errors:
+ out_file.write("\t{ \"%s\", %s },\n" % (err.err_define, err.err_define))
+ out_file.write("{ 0, NT_STATUS(0) }\n")
+ out_file.write("};\n")
+
+ out_file.write("\n/*\n")
+ out_file.write(" * Descriptions for errors generated from\n")
+ out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n")
+ out_file.write(" */\n")
+
+ out_file.write("static const nt_err_code_struct nt_err_desc[] = \n")
+ out_file.write("{\n")
+ for err in errors:
+ # Account for the possibility that some errors may not have descriptions
+ if err.err_string == "":
+ continue
+ out_file.write("\t{ N_(\"%s\"), %s },\n"%(err.err_string, err.err_define))
+ out_file.write("{ 0, NT_STATUS(0) }\n")
+ out_file.write("};")
+
+def generatePythonFile(out_file, errors):
+ out_file.write("/*\n")
+ out_file.write(" * New descriptions for existing errors generated from\n")
+ out_file.write(" * [MS-ERREF] http://msdn.microsoft.com/en-us/library/cc704588.aspx\n")
+ out_file.write(" */\n")
+ out_file.write("#include <Python.h>\n")
+ out_file.write("#include \"python/py3compat.h\"\n")
+ out_file.write("#include \"includes.h\"\n\n")
+ # This is needed to avoid a missing prototype error from the C
+ # compiler. There is never a prototype for this function, it is a
+ # module loaded by python with dlopen() and found with dlsym().
+ out_file.write("static struct PyModuleDef moduledef = {\n")
+ out_file.write("\tPyModuleDef_HEAD_INIT,\n")
+ out_file.write("\t.m_name = \"ntstatus\",\n")
+ out_file.write("\t.m_doc = \"NTSTATUS error defines\",\n")
+ out_file.write("\t.m_size = -1,\n")
+ out_file.write("};\n\n")
+ out_file.write("MODULE_INIT_FUNC(ntstatus)\n")
+ out_file.write("{\n")
+ out_file.write("\tPyObject *m;\n\n")
+ out_file.write("\tm = PyModule_Create(&moduledef);\n");
+ out_file.write("\tif (m == NULL)\n");
+ out_file.write("\t\treturn NULL;\n\n");
+ for err in errors:
+ line = """\tPyModule_AddObject(m, \"%s\",
+ \t\tPyLong_FromUnsignedLongLong(NT_STATUS_V(%s)));\n""" % (err.err_define, err.err_define)
+ out_file.write(line)
+ out_file.write("\n");
+ out_file.write("\treturn m;\n");
+ out_file.write("}\n");
+
+def transformErrorName( error_name ):
+ if error_name.startswith("STATUS_"):
+ error_name = error_name.replace("STATUS_", "", 1)
+ elif error_name.startswith("RPC_NT_"):
+ error_name = error_name.replace("RPC_NT_", "RPC_", 1)
+ elif error_name.startswith("EPT_NT_"):
+ error_name = error_name.replace("EPT_NT_", "EPT_", 1)
+ return "NT_STATUS_" + error_name
+
+# Very simple script to generate files nterr_gen.c & ntstatus_gen.h.
+# These files contain generated definitions.
+# This script takes four inputs:
+# [1]: The name of the text file which is the content of an HTML table
+# (e.g. the one found at http://msdn.microsoft.com/en-us/library/cc231200.aspx)
+# copied and pasted.
+# [2]: The name of the output generated header file with NTStatus #defines
+# [3]: The name of the output generated source file with C arrays
+# [4]: The name of the output generated python file
+def main ():
+ input_file = None;
+
+ if len(sys.argv) == 5:
+ input_file = sys.argv[1]
+ gen_headerfile_name = sys.argv[2]
+ gen_sourcefile_name = sys.argv[3]
+ gen_pythonfile_name = sys.argv[4]
+ else:
+ print("usage: %s winerrorfile headerfile sourcefile pythonfile" % (sys.argv[0]))
+ sys.exit()
+
+ # read in the data
+ file_contents = io.open(input_file, "rt", encoding='utf8')
+
+ errors = parseErrorDescriptions(file_contents, False, transformErrorName)
+
+ print("writing new header file: %s" % gen_headerfile_name)
+ out_file = io.open(gen_headerfile_name, "wt", encoding='utf8')
+ generateHeaderFile(out_file, errors)
+ out_file.close()
+ print("writing new source file: %s" % gen_sourcefile_name)
+ out_file = io.open(gen_sourcefile_name, "wt", encoding='utf8')
+ generateSourceFile(out_file, errors)
+ out_file.close()
+ print("writing new python file: %s" % gen_pythonfile_name)
+ out_file = io.open(gen_pythonfile_name, "wt", encoding='utf8')
+ generatePythonFile(out_file, errors)
+ out_file.close()
+
+if __name__ == '__main__':
+
+ main()
diff --git a/source4/scripting/bin/gen_output.py b/source4/scripting/bin/gen_output.py
new file mode 100755
index 0000000..8f5239f
--- /dev/null
+++ b/source4/scripting/bin/gen_output.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+A data generator to help tests.
+
+Generate large output to stdout by repeating input data.
+Usage:
+
+ python gen_output.py --data @ --repeat 1024 --retcode 1
+
+The above command will output @ x 1024 (1K) and exit with 1.
+"""
+
+import sys
+import argparse
+
+parser = argparse.ArgumentParser(description='Generate output data')
+
+parser.add_argument(
+ '--data', type=str, default='$',
+ help='Characters used to generate data by repeating them'
+)
+
+parser.add_argument(
+ '--repeat', type=int, default=1024 * 1024,
+ help='How many times to repeat the data'
+)
+
+parser.add_argument(
+ '--retcode', type=int, default=0,
+ help='Specify the exit code for this script'
+)
+
+args = parser.parse_args()
+
+sys.stdout.write(args.data * args.repeat)
+
+sys.exit(args.retcode)
diff --git a/source4/scripting/bin/gen_werror.py b/source4/scripting/bin/gen_werror.py
new file mode 100755
index 0000000..1ac9d33
--- /dev/null
+++ b/source4/scripting/bin/gen_werror.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python3
+
+#
+# Unix SMB/CIFS implementation.
+#
+# WERROR error definition generation
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys, os.path, io, string
+from gen_error_common import parseErrorDescriptions, ErrorDef
+
+def generateHeaderFile(out_file, errors):
+ out_file.write("/*\n")
+ out_file.write(" * Descriptions for errors generated from\n")
+ out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n")
+ out_file.write(" */\n\n")
+ out_file.write("#ifndef _WERR_GEN_H\n")
+ out_file.write("#define _WERR_GEN_H\n")
+ for err in errors:
+ line = "#define %s W_ERROR(%s)\n" % (err.err_define, hex(err.err_code))
+ out_file.write(line)
+ out_file.write("\n#endif /* _WERR_GEN_H */\n")
+
+def generateSourceFile(out_file, errors):
+ out_file.write("#include \"werror.h\"\n")
+
+ out_file.write("/*\n")
+ out_file.write(" * Names for errors generated from\n")
+ out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n")
+ out_file.write(" */\n")
+
+ out_file.write("static const struct werror_code_struct dos_errs[] = \n")
+ out_file.write("{\n")
+ for err in errors:
+ out_file.write("\t{ \"%s\", %s },\n" % (err.err_define, err.err_define))
+ out_file.write("{ 0, W_ERROR(0) }\n")
+ out_file.write("};\n")
+
+ out_file.write("\n/*\n")
+ out_file.write(" * Descriptions for errors generated from\n")
+ out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n")
+ out_file.write(" */\n")
+
+ out_file.write("static const struct werror_str_struct dos_err_strs[] = \n")
+ out_file.write("{\n")
+ for err in errors:
+ # Account for the possibility that some errors may not have descriptions
+ if err.err_string == "":
+ continue
+ out_file.write("\t{ %s, \"%s\" },\n"%(err.err_define, err.err_string))
+ out_file.write("\t{ W_ERROR(0), 0 }\n")
+ out_file.write("};")
+
+def generatePythonFile(out_file, errors):
+ out_file.write("/*\n")
+ out_file.write(" * Errors generated from\n")
+ out_file.write(" * [MS-ERREF] https://msdn.microsoft.com/en-us/library/cc231199.aspx\n")
+ out_file.write(" */\n")
+ out_file.write("#include <Python.h>\n")
+ out_file.write("#include \"python/py3compat.h\"\n")
+ out_file.write("#include \"includes.h\"\n\n")
+ # This is needed to avoid a missing prototype error from the C
+ # compiler. There is never a prototype for this function, it is a
+ # module loaded by python with dlopen() and found with dlsym().
+ out_file.write("static struct PyModuleDef moduledef = {\n")
+ out_file.write("\tPyModuleDef_HEAD_INIT,\n")
+ out_file.write("\t.m_name = \"werror\",\n")
+ out_file.write("\t.m_doc = \"WERROR defines\",\n")
+ out_file.write("\t.m_size = -1,\n")
+ out_file.write("};\n\n")
+ out_file.write("MODULE_INIT_FUNC(werror)\n")
+ out_file.write("{\n")
+ out_file.write("\tPyObject *m;\n\n")
+ out_file.write("\tm = PyModule_Create(&moduledef);\n");
+ out_file.write("\tif (m == NULL)\n");
+ out_file.write("\t\treturn NULL;\n\n");
+ for err in errors:
+ line = """\tPyModule_AddObject(m, \"%s\",
+ \t\tPyLong_FromUnsignedLongLong(W_ERROR_V(%s)));\n""" % (err.err_define, err.err_define)
+ out_file.write(line)
+ out_file.write("\n");
+ out_file.write("\treturn m;\n");
+ out_file.write("}\n");
+
+def transformErrorName( error_name ):
+ if error_name.startswith("WERR_"):
+ error_name = error_name.replace("WERR_", "", 1)
+ elif error_name.startswith("ERROR_"):
+ error_name = error_name.replace("ERROR_", "", 1)
+ return "WERR_" + error_name.upper()
+
+# Script to generate files werror_gen.h, doserr_gen.c and
+# py_werror.c.
+#
+# These files contain generated definitions for WERRs and
+# their descriptions/names.
+#
+# This script takes four inputs:
+# [1]: The name of the text file which is the content of an HTML table
+# (e.g. the one found at https://msdn.microsoft.com/en-us/library/cc231199.aspx)
+# copied and pasted.
+# [2]: [[output werror_gen.h]]
+# [3]: [[output doserr_gen.c]]
+# [4]: [[output py_werror.c]]
+def main():
+ if len(sys.argv) == 5:
+ input_file_name = sys.argv[1]
+ gen_headerfile_name = sys.argv[2]
+ gen_sourcefile_name = sys.argv[3]
+ gen_pythonfile_name = sys.argv[4]
+ else:
+ print("usage: %s winerrorfile headerfile sourcefile pythonfile" % sys.argv[0])
+ sys.exit()
+
+ input_file = io.open(input_file_name, "rt", encoding='utf8')
+ errors = parseErrorDescriptions(input_file, True, transformErrorName)
+ input_file.close()
+
+ print("writing new header file: %s" % gen_headerfile_name)
+ out_file = io.open(gen_headerfile_name, "wt", encoding='utf8')
+ generateHeaderFile(out_file, errors)
+ out_file.close()
+ print("writing new source file: %s" % gen_sourcefile_name)
+ out_file = io.open(gen_sourcefile_name, "wt", encoding='utf8')
+ generateSourceFile(out_file, errors)
+ out_file.close()
+ print("writing new python file: %s" % gen_pythonfile_name)
+ out_file = io.open(gen_pythonfile_name, "wt", encoding='utf8')
+ generatePythonFile(out_file, errors)
+ out_file.close()
+
+if __name__ == '__main__':
+
+ main()
diff --git a/source4/scripting/bin/get-descriptors b/source4/scripting/bin/get-descriptors
new file mode 100755
index 0000000..6e69222
--- /dev/null
+++ b/source4/scripting/bin/get-descriptors
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+#
+# Unix SMB/CIFS implementation.
+# A script to compare differences of security descriotors between
+# a remote host and the local Ldb
+# Needs the local domain, the remote domain, IP of the remote host
+# Username and password for the remote domain, must be at least
+# Domain Administrator
+#
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+# Copyright (C) Nadezhda Ivanova <nadezhda.ivanova@postpath.com> 2009
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import optparse
+import sys
+import base64
+
+sys.path.insert(0, "bin/python")
+
+import samba
+from samba.auth import system_session
+import samba.getopt as options
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.dcerpc import security
+from samba import Ldb
+from samba.samdb import SamDB
+from ldb import SCOPE_SUBTREE, SCOPE_BASE
+
+parser = optparse.OptionParser("get-descriptors [options]")
+sambaopts = options.SambaOptions(parser)
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+
+parser.add_option("--local-domain", type="string", metavar="LOCALDOMAIN",
+ help="set local domain")
+parser.add_option("--remote-domain", type="string", metavar="REMOTEDOMAIN",
+ help="set remote domain")
+parser.add_option("--host", type="string", metavar="HOST",
+ help="Ip of the remote host used for comparison")
+parser.add_option("--as-ldif", help="Output in LDIF format", action="store_true")
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+opts = parser.parse_args()[0]
+
+if not opts.host or not opts.localdomain or not opts.remote_domain:
+ parser.print_usage()
+ sys.exit(1)
+
+class DescrGetter:
+
+ def __init__(self, localdomain, remotedomain):
+ self.samdb = SamDB(session_info=system_session(), lp=lp, options=["modules:paged_searches"])
+ self.remote_ldb= Ldb("ldap://" + opts.host + ":389", credentials=creds, lp=lp,
+ options=["modules:paged_searches"])
+ self.local_domain = localdomain.replace(".", ",DC=")
+ self.local_domain = "DC=" + self.local_domain
+ self.remote_domain = remotedomain.replace(".", ",DC=")
+ self.remote_domain = "DC=" + self.remote_domain
+ self.local_map = {}
+ self.remote_map = {}
+
+ def get_domain_local_sid(self):
+ res = self.samdb.search(base=self.local_domain,expression="(objectClass=*)", scope=SCOPE_BASE)
+ self.local_sid = ndr_unpack( security.dom_sid,res[0]["objectSid"][0])
+
+ def get_domain_remote_sid(self):
+ res = self.remote_ldb.search(base=self.remote_domain, expression="(objectClass=*)", scope=SCOPE_BASE)
+ self.remote_sid = ndr_unpack( security.dom_sid,res[0]["objectSid"][0])
+
+ def add_to_ldif(self, dn, descr):
+ ldif_entry = ["dn: " + dn,
+ "changetype: modify",
+ "replace: nTSecurityDescriptor",
+ "nTSecurityDescriptor:: " + base64.b64encode(ndr_pack(descr)).decode('utf8')]
+
+ for line in ldif_entry:
+ length = 79
+ if len(line) <= length + 1:
+ print(line)
+ else:
+ for i in range(len(line) / length + 1):
+ if i == 0:
+ l = line[i * length:((i + 1) * length)]
+ else:
+ l = " " + line[(i * length):((i + 1) * length)]
+ print(l)
+ print("\n")
+
+ def write_as_sddl(self, dn, descr):
+ print(dn)
+ print(descr + "\n")
+
+ def read_descr_by_base(self, search_base):
+ res = self.samdb.search(base=search_base + self.local_domain, expression="(objectClass=*)", scope=SCOPE_SUBTREE, attrs=["nTSecurityDescriptor"])
+ for entry in res:
+ dn = entry["dn"].__str__().replace(self.local_domain, "")
+
+ if "nTSecurityDescriptor" in entry:
+ desc_obj = ndr_unpack(security.descriptor, entry["nTSecurityDescriptor"][0])
+ self.local_map[dn] = desc_obj
+
+ res = self.remote_ldb.search(base=search_base + self.remote_domain, expression="(objectClass=*)", scope=SCOPE_SUBTREE, attrs=["nTSecurityDescriptor"])
+ for entry in res:
+ dn = entry["dn"].__str__().replace(self.remote_domain, "")
+
+ if "nTSecurityDescriptor" in entry:
+ desc_obj = ndr_unpack(security.descriptor, entry["nTSecurityDescriptor"][0])
+ self.remote_map[dn] = desc_obj
+
+ def read_desc(self):
+ self.read_descr_by_base("CN=Schema,CN=Configuration,")
+ self.read_descr_by_base("CN=Configuration,")
+ self.read_descr_by_base("")
+
+ def write_desc_to_ldif(self):
+ key_list_local = self.local_map.keys()
+ key_list_remote = self.remote_map.keys()
+ for key in key_list_remote:
+ if key in key_list_local:
+ sddl = self.remote_map[key].as_sddl(self.remote_sid)
+ sddl_local = self.local_map[key].as_sddl(self.local_sid)
+ if sddl != sddl_local:
+ descr = security.descriptor.from_sddl(sddl, self.local_sid)
+ if opts.as_ldif:
+ self.add_to_ldif(key + self.local_domain, descr)
+ else:
+ self.write_as_sddl(key, descr.as_sddl(self.local_sid))
+
+ def run(self):
+ self.get_domain_local_sid()
+ self.get_domain_remote_sid()
+ self.read_desc()
+ self.write_desc_to_ldif()
+
+desc = DescrGetter(opts.local_domain, opts.remote_domain)
+desc.run()
diff --git a/source4/scripting/bin/ktpass.sh b/source4/scripting/bin/ktpass.sh
new file mode 100755
index 0000000..a165816
--- /dev/null
+++ b/source4/scripting/bin/ktpass.sh
@@ -0,0 +1,122 @@
+#!/bin/sh
+# vim: expandtab
+#
+# Copyright (C) Matthieu Patou <mat@matws.net> 2010
+#
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+name="ktpass.sh"
+TEMP=$(getopt -o h --long princ:,pass:,out:,host:,ptype:,enc:,path-to-ldbsearch: \
+ -n "$name" -- "$@")
+eval set -- "$TEMP"
+
+usage()
+{
+ echo -ne "$name --out <keytabfile> --princ <principal> --pass <password>|*\n"
+ echo -ne " [--host hostname] [--enc <encryption>]\n"
+ echo -ne " [--ptype <type>] [--path-to-ldbsearch <path>]\n"
+ echo -ne "\nEncoding should be one of:\n"
+ echo -ne " * des-cbc-crc\n"
+ echo -ne " * des-cbc-md5\n"
+ echo -ne " * rc4-hmac (default)\n"
+ echo -ne " * aes128-cts\n"
+ echo -ne " * aes256-cts\n"
+ exit 0
+}
+while true; do
+ case "$1" in
+ --out)
+ outfile=$2
+ shift 2
+ ;;
+ --princ)
+ princ=$2
+ shift 2
+ ;;
+ --pass)
+ pass=$2
+ shift 2
+ ;;
+ --host)
+ host=$2
+ shift 2
+ ;;
+ --ptype) shift 2 ;;
+ --enc)
+ enc=$2
+ shift 2
+ ;;
+ --path-to-ldbsearch)
+ path="$2/"
+ shift 2
+ ;;
+ -h) usage ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ echo "Internal error!"
+ exit 1
+ ;;
+ esac
+done
+#RC4-HMAC-NT|AES256-SHA1|AES128-SHA
+if [ -z "$enc" ]; then
+ enc="rc4-hmac"
+fi
+if [ -z "$path" ]; then
+ path=$(dirname $0)/../bin/
+ if [ ! -f ${path}ldbsearch ]; then
+ path=$(dirname $0)/../../bin/
+ fi
+fi
+if [ -z "$outfile" -o -z "$princ" -o -z "$pass" ]; then
+ echo "At least one mandatory parameter (--out, --princ, --pass) was not specified"
+ usage
+fi
+if [ -z $host ]; then
+ host=$(hostname)
+fi
+
+kvno=$(${path}ldbsearch -H ldap://$host "(|(samaccountname=$princ)(serviceprincipalname=$princ)(userprincipalname=$princ))" msds-keyversionnumber -k 1 -N 2>/dev/null | grep -i msds-keyversionnumber)
+if [ x"$kvno" = x"" ]; then
+ echo -ne "Unable to find kvno for principal $princ\n"
+ echo -ne " check that you are authentified with kerberos\n"
+ exit 1
+else
+ kvno=$(echo $kvno | sed 's/^.*: //')
+fi
+
+if [ "$pass" = "*" ]; then
+ echo -n "Enter password for $princ: "
+ stty -echo
+ read pass
+ stty echo
+ echo ""
+fi
+
+ktutil >/dev/null <<EOF
+add_entry -password -p $princ -k $kvno -e $enc
+$pass
+wkt $outfile
+EOF
+
+if [ $? -eq 0 ]; then
+ echo "Keytab file $outfile created with success"
+else
+ echo "Error while creating the keytab file $outfile"
+fi
diff --git a/source4/scripting/bin/machineaccountccache b/source4/scripting/bin/machineaccountccache
new file mode 100755
index 0000000..5e6d3c5
--- /dev/null
+++ b/source4/scripting/bin/machineaccountccache
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+import optparse
+import sys
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+
+import samba
+from samba import getopt as options
+from samba.credentials import Credentials
+parser = optparse.OptionParser("machineaccountccache <ccache name>")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+opts, args = parser.parse_args()
+
+if len(args) != 1:
+ parser.print_usage()
+ sys.exit(1)
+
+ccachename = args[0]
+
+lp_ctx = sambaopts.get_loadparm()
+
+creds = Credentials()
+
+creds.guess(lp_ctx)
+creds.set_machine_account(lp_ctx)
+creds.get_named_ccache(lp_ctx, ccachename)
diff --git a/source4/scripting/bin/machineaccountpw b/source4/scripting/bin/machineaccountpw
new file mode 100755
index 0000000..eab773e
--- /dev/null
+++ b/source4/scripting/bin/machineaccountpw
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+import optparse
+import sys
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+
+import samba
+from samba import getopt as options
+from samba import NTSTATUSError
+from samba.credentials import Credentials
+parser = optparse.OptionParser("machineaccountpw")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+opts, args = parser.parse_args()
+
+if len(args) != 0:
+ parser.print_usage()
+ sys.exit(1)
+
+try:
+ lp_ctx = sambaopts.get_loadparm()
+except RuntimeError as error:
+ print("Unable to load smb.conf %s: %s" % (sambaopts.get_loadparm_path(),
+ error),
+ file=sys.stderr)
+ sys.exit(1)
+
+creds = Credentials()
+
+creds.guess(lp_ctx)
+try:
+ creds.set_machine_account(lp_ctx)
+except NTSTATUSError as error:
+ print("Failed to find a stored machine account credential on this system: %s" \
+ % error.args[1],
+ file=sys.stderr)
+ sys.exit(1)
+
+print(creds.get_password())
diff --git a/source4/scripting/bin/nsupdate-gss b/source4/scripting/bin/nsupdate-gss
new file mode 100755
index 0000000..509220d
--- /dev/null
+++ b/source4/scripting/bin/nsupdate-gss
@@ -0,0 +1,352 @@
+#!/usr/bin/perl -w
+# update a win2000 DNS server using gss-tsig
+# tridge@samba.org, October 2002
+
+# jmruiz@animatika.net
+# updated, 2004-Enero
+
+# tridge@samba.org, September 2009
+# added --verbose, --noverify, --ntype and --nameserver
+
+# See draft-ietf-dnsext-gss-tsig-02, RFC2845 and RFC2930
+
+use strict;
+use lib "GSSAPI";
+use Net::DNS;
+use GSSAPI;
+use Getopt::Long;
+
+my $opt_wipe = 0;
+my $opt_add = 0;
+my $opt_noverify = 0;
+my $opt_verbose = 0;
+my $opt_help = 0;
+my $opt_nameserver;
+my $opt_realm;
+my $opt_ntype = "A";
+
+# main program
+GetOptions (
+ 'h|help|?' => \$opt_help,
+ 'wipe' => \$opt_wipe,
+ 'realm=s' => \$opt_realm,
+ 'nameserver=s' => \$opt_nameserver,
+ 'ntype=s' => \$opt_ntype,
+ 'add' => \$opt_add,
+ 'noverify' => \$opt_noverify,
+ 'verbose' => \$opt_verbose
+ );
+
+#########################################
+# display help text
+sub ShowHelp()
+{
+ print "
+ nsupdate with gssapi
+ Copyright (C) tridge\@samba.org
+
+ Usage: nsupdate-gss [options] HOST DOMAIN TARGET TTL
+
+ Options:
+ --wipe wipe all records for this name
+ --add add to any existing records
+ --ntype=TYPE specify name type (default A)
+ --nameserver=server specify a specific nameserver
+ --noverify don't verify the MIC of the reply
+ --verbose show detailed steps
+
+";
+ exit(0);
+}
+
+if ($opt_help) {
+ ShowHelp();
+}
+
+if ($#ARGV != 3) {
+ ShowHelp();
+}
+
+
+my $host = $ARGV[0];
+my $domain = $ARGV[1];
+my $target = $ARGV[2];
+my $ttl = $ARGV[3];
+my $alg = "gss.microsoft.com";
+
+
+
+#######################################################################
+# signing callback function for TSIG module
+sub gss_sign($$)
+{
+ my $key = shift;
+ my $data = shift;
+ my $sig;
+ $key->get_mic(0, $data, $sig);
+ return $sig;
+}
+
+
+
+#####################################################################
+# write a string into a file
+sub FileSave($$)
+{
+ my($filename) = shift;
+ my($v) = shift;
+ local(*FILE);
+ open(FILE, ">$filename") || die "can't open $filename";
+ print FILE $v;
+ close(FILE);
+}
+
+
+#######################################################################
+# verify a TSIG signature from a DNS server reply
+#
+sub sig_verify($$)
+{
+ my $context = shift;
+ my $packet = shift;
+
+ my $tsig = ($packet->additional)[0];
+ $opt_verbose && print "calling sig_data\n";
+ my $sigdata = $tsig->sig_data($packet);
+
+ $opt_verbose && print "sig_data_done\n";
+
+ return $context->verify_mic($sigdata, $tsig->{"mac"}, 0);
+}
+
+
+#######################################################################
+# find the nameserver for the domain
+#
+sub find_nameserver($)
+{
+ my $server_name = shift;
+ return Net::DNS::Resolver->new(
+ nameservers => [$server_name],
+ recurse => 0,
+ debug => 0);
+}
+
+
+#######################################################################
+# find a server name for a domain - currently uses the NS record
+sub find_server_name($)
+{
+ my $domain = shift;
+ my $res = Net::DNS::Resolver->new;
+ my $srv_query = $res->query("$domain.", "NS");
+ if (!defined($srv_query)) {
+ return undef;
+ }
+ my $server_name;
+ foreach my $rr (grep { $_->type eq 'NS' } $srv_query->answer) {
+ $server_name = $rr->nsdname;
+ }
+ return $server_name;
+}
+
+#######################################################################
+#
+#
+sub negotiate_tkey($$$$)
+{
+
+ my $nameserver = shift;
+ my $domain = shift;
+ my $server_name = shift;
+ my $key_name = shift;
+
+ my $status;
+
+ my $context = GSSAPI::Context->new;
+ my $name = GSSAPI::Name->new;
+
+ # use a principal name of dns/server@REALM
+ $opt_verbose &&
+ print "Using principal dns/" . $server_name . "@" . uc($opt_realm) . "\n";
+ $status = $name->import($name, "dns/" . $server_name . "@" . uc($opt_realm));
+ if (! $status) {
+ print "import name: $status\n";
+ return undef;
+ }
+
+ my $flags =
+ GSS_C_REPLAY_FLAG | GSS_C_MUTUAL_FLAG |
+ GSS_C_SEQUENCE_FLAG | GSS_C_CONF_FLAG |
+ GSS_C_INTEG_FLAG;
+
+
+ $status = GSSAPI::Cred::acquire_cred(undef, 120, undef, GSS_C_INITIATE,
+ my $cred, my $oidset, my $time);
+
+ if (! $status) {
+ print "acquire_cred: $status\n";
+ return undef;
+ }
+
+ $opt_verbose && print "creds acquired\n";
+
+ # call gss_init_sec_context()
+ $status = $context->init($cred, $name, undef, $flags,
+ 0, undef, "", undef, my $tok,
+ undef, undef);
+ if (! $status) {
+ print "init_sec_context: $status\n";
+ return undef;
+ }
+
+ $opt_verbose && print "init done\n";
+
+ my $gss_query = Net::DNS::Packet->new("$key_name", "TKEY", "IN");
+
+ # note that Windows2000 uses a SPNEGO wrapping on GSSAPI data sent to the nameserver.
+ # I tested using the gen_negTokenTarg() call from Samba 3.0 and it does work, but
+ # for this utility it is better to use plain GSSAPI/krb5 data so as to reduce the
+ # dependence on external libraries. If we ever want to sign DNS packets using
+ # NTLMSSP instead of krb5 then the SPNEGO wrapper could be used
+
+ $opt_verbose && print "calling RR new\n";
+
+ $a = Net::DNS::RR->new(
+ Name => "$key_name",
+ Type => "TKEY",
+ TTL => 0,
+ Class => "ANY",
+ mode => 3,
+ algorithm => $alg,
+ inception => time,
+ expiration => time + 24*60*60,
+ key => $tok,
+ other_data => "",
+ );
+
+ $gss_query->push("answer", $a);
+
+ my $reply = $nameserver->send($gss_query);
+
+ if (!defined($reply) || $reply->header->{'rcode'} ne 'NOERROR') {
+ print "failed to send TKEY\n";
+ return undef;
+ }
+
+ my $key2 = ($reply->answer)[0]->{"key"};
+
+ # call gss_init_sec_context() again. Strictly speaking
+ # we should loop until this stops returning CONTINUE
+ # but I'm a lazy bastard
+ $status = $context->init($cred, $name, undef, $flags,
+ 0, undef, $key2, undef, $tok,
+ undef, undef);
+ if (! $status) {
+ print "init_sec_context step 2: $status\n";
+ return undef;
+ }
+
+ if (!$opt_noverify) {
+ $opt_verbose && print "verifying\n";
+
+ # check the signature on the TKEY reply
+ my $rc = sig_verify($context, $reply);
+ if (! $rc) {
+ print "Failed to verify TKEY reply: $rc\n";
+# return undef;
+ }
+
+ $opt_verbose && print "verifying done\n";
+ }
+
+ return $context;
+}
+
+
+#######################################################################
+# MAIN
+#######################################################################
+
+if (!$opt_realm) {
+ $opt_realm = $domain;
+}
+
+# find the name of the DNS server
+if (!$opt_nameserver) {
+ $opt_nameserver = find_server_name($domain);
+ if (!defined($opt_nameserver)) {
+ print "Failed to find a DNS server name for $domain\n";
+ exit 1;
+ }
+}
+$opt_verbose && print "Using DNS server name $opt_nameserver\n";
+
+# connect to the nameserver
+my $nameserver = find_nameserver($opt_nameserver);
+if (!defined($nameserver) || $nameserver->{'errorstring'} ne 'NOERROR') {
+ print "Failed to connect to nameserver for domain $domain\n";
+ exit 1;
+}
+
+
+# use a long random key name
+my $key_name = int(rand 10000000000000);
+
+# negotiate a TKEY key
+my $gss_context = negotiate_tkey($nameserver, $domain, $opt_nameserver, $key_name);
+if (!defined($gss_context)) {
+ print "Failed to negotiate a TKEY\n";
+ exit 1;
+}
+$opt_verbose && print "Negotiated TKEY $key_name\n";
+
+# construct a signed update
+my $update = Net::DNS::Update->new($domain);
+
+$update->push("pre", yxdomain("$domain"));
+if (!$opt_add) {
+ $update->push("update", rr_del("$host.$domain. $opt_ntype"));
+}
+if (!$opt_wipe) {
+ $update->push("update", rr_add("$host.$domain. $ttl $opt_ntype $target"));
+}
+
+my $sig = Net::DNS::RR->new(
+ Name => $key_name,
+ Type => "TSIG",
+ TTL => 0,
+ Class => "ANY",
+ Algorithm => $alg,
+ Time_Signed => time,
+ Fudge => 36000,
+ Mac_Size => 0,
+ Mac => "",
+ Key => $gss_context,
+ Sign_Func => \&gss_sign,
+ Other_Len => 0,
+ Other_Data => "",
+ Error => 0,
+ mode => 3,
+ );
+
+$update->push("additional", $sig);
+
+# send the dynamic update
+my $update_reply = $nameserver->send($update);
+
+if (! defined($update_reply)) {
+ print "No reply to dynamic update\n";
+ exit 1;
+}
+
+# make sure it worked
+my $result = $update_reply->header->{"rcode"};
+
+($opt_verbose || $result ne 'NOERROR') && print "Update gave rcode $result\n";
+
+if ($result ne 'NOERROR') {
+ exit 1;
+}
+
+exit 0;
diff --git a/source4/scripting/bin/rebuildextendeddn b/source4/scripting/bin/rebuildextendeddn
new file mode 100755
index 0000000..d5c0ecb
--- /dev/null
+++ b/source4/scripting/bin/rebuildextendeddn
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+#
+# Unix SMB/CIFS implementation.
+# Extended attributes (re)building
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009
+#
+# Based on provision a Samba4 server by
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import optparse
+import os
+import sys
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import samba
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session
+from samba import Ldb
+from ldb import SCOPE_SUBTREE, SCOPE_BASE
+import ldb
+import samba.getopt as options
+from samba import param
+from samba.provision import ProvisionNames, provision_paths_from_lp
+from samba.schema import get_dnsyntax_attributes, get_linked_attributes
+
+parser = optparse.OptionParser("rebuildextendeddn [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+parser.add_option("--targetdir", type="string", metavar="DIR",
+ help="Set target directory")
+
+opts = parser.parse_args()[0]
+
+def message(text):
+ """print a message if quiet is not set."""
+ if not opts.quiet:
+ print(text)
+
+if len(sys.argv) == 1:
+ opts.interactive = True
+
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+
+creds = credopts.get_credentials(lp)
+
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+session = system_session()
+
+
+def get_paths(targetdir=None,smbconf=None):
+ if targetdir is not None:
+ if (not os.path.exists(os.path.join(targetdir, "etc"))):
+ os.makedirs(os.path.join(targetdir, "etc"))
+ smbconf = os.path.join(targetdir, "etc", "smb.conf")
+ if smbconf is None:
+ smbconf = param.default_path()
+
+ if not os.path.exists(smbconf):
+ print("Unable to find smb.conf .. "+smbconf, file=sys.stderr)
+ parser.print_usage()
+ sys.exit(1)
+
+ lp = param.LoadParm()
+ lp.load(smbconf)
+ paths = provision_paths_from_lp(lp,"foo")
+ return paths
+
+
+
+def rebuild_en_dn(credentials,session_info,paths):
+ lp = param.LoadParm()
+ lp.load(paths.smbconf)
+ names = ProvisionNames()
+ names.domain = lp.get("workgroup")
+ names.realm = lp.get("realm")
+ names.rootdn = "DC=" + names.realm.replace(".",",DC=")
+
+ attrs = ["dn" ]
+ dn = ""
+ sam_ldb = Ldb(paths.samdb, session_info=session_info, credentials=credentials,lp=lp)
+ attrs2 = ["schemaNamingContext"]
+ res2 = sam_ldb.search(expression="(objectClass=*)",base="", scope=SCOPE_BASE, attrs=attrs2)
+ attrs.extend(get_linked_attributes(ldb.Dn(sam_ldb,str(res2[0]["schemaNamingContext"])),sam_ldb).keys())
+ attrs.extend(get_dnsyntax_attributes(ldb.Dn(sam_ldb,str(res2[0]["schemaNamingContext"])),sam_ldb))
+ sam_ldb.transaction_start()
+ res = sam_ldb.search(expression="(cn=*)", scope=SCOPE_SUBTREE, attrs=attrs,controls=["search_options:1:2"])
+ mod = ""
+ for i in range (0,len(res)):
+ #print >>sys.stderr,res[i].dn
+ dn = res[i].dn
+ for att in res[i]:
+ if ( (att != "dn" and att != "cn") and not (res[i][att] is None) ):
+ m = ldb.Message()
+ m.dn = ldb.Dn(sam_ldb, str(dn))
+ saveatt = []
+ for j in range (0,len( res[i][att])):
+ mod = mod +att +": "+str(res[i][att][j])+"\n"
+ saveatt.append(str(res[i][att][j]))
+ m[att] = ldb.MessageElement(saveatt, ldb.FLAG_MOD_REPLACE, att)
+ sam_ldb.modify(m)
+ res3 = sam_ldb.search(expression="(&(distinguishedName=%s)(%s=*))"%(dn,att),scope=SCOPE_SUBTREE, attrs=[att],controls=["search_options:1:2"])
+ if( len(res3) == 0 or (len(res3[0][att])!= len(saveatt))):
+ print(str(dn) + " has no attr " +att+ " or a wrong value",
+ file=sys.stderr)
+ for satt in saveatt:
+ print("%s = %s" % (att, satt),
+ file=sys.stderr)
+ sam_ldb.transaction_cancel()
+ sam_ldb.transaction_commit()
+
+
+paths = get_paths(targetdir=opts.targetdir, smbconf=smbconf)
+
+rebuild_en_dn(creds,session,paths)
+
diff --git a/source4/scripting/bin/renamedc b/source4/scripting/bin/renamedc
new file mode 100755
index 0000000..e5e8a2c
--- /dev/null
+++ b/source4/scripting/bin/renamedc
@@ -0,0 +1,191 @@
+#!/usr/bin/env python3
+# vim: expandtab
+#
+# Copyright (C) Matthieu Patou <mat@matws.net> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import optparse
+import sys
+# Allow to run from s4 source directory (without installing samba)
+sys.path.insert(0, "bin/python")
+
+import ldb
+import samba
+import samba.getopt as options
+import os
+
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session
+from samba import param
+from samba.provision import find_provision_key_parameters, secretsdb_self_join
+from samba.upgradehelpers import get_ldbs, get_paths
+
+
+__docformat__ = "restructuredText"
+
+parser = optparse.OptionParser("renamedc [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+parser.add_option("--oldname",
+ help="Old DC name")
+parser.add_option("--newname",
+ help="New DC name")
+
+opts = parser.parse_args()[0]
+
+if len(sys.argv) == 1:
+ opts.interactive = True
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+
+creds = credopts.get_credentials(lp)
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+
+if __name__ == '__main__':
+ defSDmodified = False
+ # 1) First get files paths
+ paths = get_paths(param, smbconf=smbconf)
+ # Get ldbs with the system session, it is needed for searching
+ # provision parameters
+ session = system_session()
+
+ ldbs = get_ldbs(paths, creds, session, lp)
+ ldbs.sam.transaction_start()
+ ldbs.secrets.transaction_start()
+
+ if opts.oldname is None or opts.newname is None:
+ raise Exception("Option oldname or newname is missing")
+ res = ldbs.sam.search(expression="(&(name=%s)(serverReferenceBL=*))" % opts.oldname)
+ if len(res) != 1:
+ raise Exception("Wrong number of result returned (%d), are you sure of the old name %s" %
+ (len(res), opts.oldname))
+
+ # Ok got it then check that the new name is not used as well
+ res2 = ldbs.sam.search(expression="(&(name=%s)(objectclass=computer))" % opts.newname)
+ if len(res2) != 0:
+ raise Exception("Seems that %s is a name that already exists, pick another one" %
+ opts.newname)
+
+ names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
+ paths, smbconf, lp)
+
+ # First rename the entry
+ # provision put the name in upper case so let's do it too !
+ newdn = ldb.Dn(ldbs.sam, str(res[0].dn))
+ newdn.set_component(0, "cn", opts.newname.upper())
+ ldbs.sam.rename(res[0].dn, newdn)
+
+ # Then change password and samaccountname and dnshostname
+ msg = ldb.Message(newdn)
+ machinepass = samba.generate_random_machine_password(120, 120)
+ mputf16 = machinepass.encode('utf-16-le')
+
+ account = "%s$" % opts.newname.upper()
+ msg["clearTextPassword"] = ldb.MessageElement(mputf16,
+ ldb.FLAG_MOD_REPLACE,
+ "clearTextPassword")
+
+ msg["sAMAccountName"] = ldb.MessageElement(account,
+ ldb.FLAG_MOD_REPLACE,
+ "sAMAccountName")
+
+ msg["dNSHostName"] = ldb.MessageElement("%s.%s" % (opts.newname,
+ names.dnsdomain),
+ ldb.FLAG_MOD_REPLACE,
+ "dNSHostName")
+ ldbs.sam.modify(msg)
+
+ # Do a self join one more time to resync the secrets file
+ res = ldbs.sam.search(base=newdn, scope=ldb.SCOPE_BASE,
+ attrs=["msDs-keyVersionNumber", "serverReferenceBL"])
+ assert(len(res) == 1)
+ kvno = int(str(res[0]["msDs-keyVersionNumber"]))
+ serverbldn = ldb.Dn(ldbs.sam, str(res[0]["serverReferenceBL"]))
+
+ secrets_msg = ldbs.secrets.search(expression="sAMAccountName=%s$" %
+ opts.oldname.upper(),
+ attrs=["secureChannelType"])
+
+ secChanType = int(secrets_msg[0]["secureChannelType"][0])
+
+ secretsdb_self_join(ldbs.secrets, domain=names.domain,
+ realm=names.realm,
+ domainsid=names.domainsid,
+ dnsdomain=names.dnsdomain,
+ netbiosname=opts.newname.upper(),
+ machinepass=machinepass,
+ key_version_number=kvno,
+ secure_channel_type=secChanType)
+
+ # Update RID set reference so we don't have to runtime fixup until the next dbcheck as there is no back link.
+
+ res = ldbs.sam.search(expression="(objectClass=rIDSet)", base=newdn, scope=ldb.SCOPE_ONELEVEL, attrs=[])
+ assert(len(res) == 1)
+ newridset = str(res[0].dn)
+ msg = ldb.Message(newdn)
+
+ msg["rIDSetReferences"] = ldb.MessageElement(newridset,
+ ldb.FLAG_MOD_REPLACE,
+ "rIDSetReferences")
+ ldbs.sam.modify(msg)
+
+ # Update the server's sites configuration
+ newserverrefdn = ldb.Dn(ldbs.sam, str(serverbldn))
+ newserverrefdn.set_component(0, "cn", opts.newname.upper())
+
+ ldbs.sam.rename(serverbldn, newserverrefdn)
+
+ msg = ldb.Message(newserverrefdn)
+ msg["dNSHostName"] = ldb.MessageElement("%s.%s" % (opts.newname,
+ names.dnsdomain),
+ ldb.FLAG_MOD_REPLACE,
+ "dNSHostName")
+ ldbs.sam.modify(msg)
+
+ try:
+ ldbs.sam.transaction_prepare_commit()
+ ldbs.secrets.transaction_prepare_commit()
+ except Exception:
+ ldbs.sam.rollback()
+ ldbs.secrets.rollback()
+ raise
+
+ try:
+ ldbs.sam.transaction_commit()
+ ldbs.secrets.transaction_commit()
+ except Exception:
+ ldbs.sam.rollback()
+ ldbs.secrets.rollback()
+ raise
+
+ # All good so far
+ #print lp.get("private dir")
+ cf = open(lp.configfile)
+ ncfname = "%s.new" % lp.configfile
+ newconf = open(ncfname, 'w')
+ for l in cf.readlines():
+ if l.find("netbios name") > 0:
+ newconf.write("\tnetbios name = %s\n" % opts.newname.upper())
+ else:
+ newconf.write(l)
+ newconf.close()
+ cf.close()
+ os.rename(ncfname, lp.configfile)
+
diff --git a/source4/scripting/bin/samba-gpupdate b/source4/scripting/bin/samba-gpupdate
new file mode 100755
index 0000000..4b3f057
--- /dev/null
+++ b/source4/scripting/bin/samba-gpupdate
@@ -0,0 +1,138 @@
+#!/usr/bin/env python3
+# Copyright Luke Morrison <luc785@.hotmail.com> July 2013
+# Co-Edited by Matthieu Pattou July 2013 from original August 2013
+# Edited by Garming Sam Feb. 2014
+# Edited by Luke Morrison April 2014
+# Edited by David Mulder May 2017
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''This script reads a log file of previous GPO, gets all GPO from sysvol
+and sorts them by container. Then, it applies the ones that haven't been
+applied, have changed, or is in the right container'''
+
+import os
+import sys
+
+sys.path.insert(0, "bin/python")
+
+import optparse
+from samba import getopt as options
+from samba.gp.gpclass import apply_gp, unapply_gp, GPOStorage, rsop
+from samba.gp.gp_sec_ext import gp_krb_ext, gp_access_ext
+from samba.gp.gp_ext_loader import get_gp_client_side_extensions
+from samba.gp.gp_scripts_ext import gp_scripts_ext, gp_user_scripts_ext
+from samba.gp.gp_sudoers_ext import gp_sudoers_ext
+from samba.gp.vgp_sudoers_ext import vgp_sudoers_ext
+from samba.gp.gp_smb_conf_ext import gp_smb_conf_ext
+from samba.gp.gp_msgs_ext import gp_msgs_ext
+from samba.gp.vgp_symlink_ext import vgp_symlink_ext
+from samba.gp.vgp_files_ext import vgp_files_ext
+from samba.gp.vgp_openssh_ext import vgp_openssh_ext
+from samba.gp.vgp_motd_ext import vgp_motd_ext
+from samba.gp.vgp_issue_ext import vgp_issue_ext
+from samba.gp.vgp_startup_scripts_ext import vgp_startup_scripts_ext
+from samba.gp.vgp_access_ext import vgp_access_ext
+from samba.gp.gp_gnome_settings_ext import gp_gnome_settings_ext
+from samba.gp.gp_cert_auto_enroll_ext import gp_cert_auto_enroll_ext
+from samba.gp.gp_firefox_ext import gp_firefox_ext
+from samba.gp.gp_chromium_ext import gp_chromium_ext, gp_chrome_ext
+from samba.gp.gp_firewalld_ext import gp_firewalld_ext
+from samba.gp.gp_centrify_sudoers_ext import gp_centrify_sudoers_ext
+from samba.gp.gp_centrify_crontab_ext import gp_centrify_crontab_ext, \
+ gp_user_centrify_crontab_ext
+from samba.credentials import Credentials
+from samba.gp.util.logging import logger_init
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser('samba-gpupdate [options]')
+ sambaopts = options.Samba3Options(parser)
+
+ # Get the command line options
+ parser.add_option_group(sambaopts)
+ parser.add_option_group(options.VersionOptions(parser))
+ credopts = options.CredentialsOptions(parser)
+ parser.add_option('-X', '--unapply', help='Unapply Group Policy',
+ action='store_true')
+ parser.add_option('--target', default='Computer', help='{Computer | User}',
+ choices=['Computer', 'User'])
+ parser.add_option('--force', help='Reapplies all policy settings',
+ action='store_true')
+ parser.add_option('--rsop', help='Print the Resultant Set of Policy',
+ action='store_true')
+ parser.add_option_group(credopts)
+
+ # Set the options and the arguments
+ (opts, args) = parser.parse_args()
+
+ # Set the loadparm context
+ lp = sambaopts.get_loadparm()
+
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ # Apply policy to the command line specified user
+ if opts.target == 'Computer':
+ username = creds.get_username()
+ elif opts.target == 'User':
+ username = '%s\\%s' % (creds.get_domain(), creds.get_username())
+ # Always supply the machine creds for fetching the gpo list
+ creds = Credentials()
+ creds.guess(lp)
+ creds.set_machine_account(lp)
+
+ # Set up logging
+ logger_init('samba-gpupdate', lp.log_level())
+
+ cache_dir = lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_exts, user_exts = get_gp_client_side_extensions(lp.configfile)
+ gp_extensions = []
+ if opts.target == 'Computer':
+ gp_extensions.append(gp_access_ext)
+ gp_extensions.append(gp_krb_ext)
+ gp_extensions.append(gp_scripts_ext)
+ gp_extensions.append(gp_sudoers_ext)
+ gp_extensions.append(vgp_sudoers_ext)
+ gp_extensions.append(gp_centrify_sudoers_ext)
+ gp_extensions.append(gp_centrify_crontab_ext)
+ gp_extensions.append(gp_smb_conf_ext)
+ gp_extensions.append(gp_msgs_ext)
+ gp_extensions.append(vgp_symlink_ext)
+ gp_extensions.append(vgp_files_ext)
+ gp_extensions.append(vgp_openssh_ext)
+ gp_extensions.append(vgp_motd_ext)
+ gp_extensions.append(vgp_issue_ext)
+ gp_extensions.append(vgp_startup_scripts_ext)
+ gp_extensions.append(vgp_access_ext)
+ gp_extensions.append(gp_gnome_settings_ext)
+ gp_extensions.append(gp_cert_auto_enroll_ext)
+ gp_extensions.append(gp_firefox_ext)
+ gp_extensions.append(gp_chromium_ext)
+ gp_extensions.append(gp_chrome_ext)
+ gp_extensions.append(gp_firewalld_ext)
+ gp_extensions.extend(machine_exts)
+ elif opts.target == 'User':
+ gp_extensions.append(gp_user_scripts_ext)
+ gp_extensions.append(gp_user_centrify_crontab_ext)
+ gp_extensions.extend(user_exts)
+
+ if opts.rsop:
+ rsop(lp, creds, store, gp_extensions, username, opts.target)
+ elif not opts.unapply:
+ apply_gp(lp, creds, store, gp_extensions, username,
+ opts.target, opts.force)
+ else:
+ unapply_gp(lp, creds, store, gp_extensions, username,
+ opts.target)
+
diff --git a/source4/scripting/bin/samba-tool b/source4/scripting/bin/samba-tool
new file mode 100755
index 0000000..f8a70a6
--- /dev/null
+++ b/source4/scripting/bin/samba-tool
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008-2012
+# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
+# Copyright (C) Giampaolo Lauria <lauria2@yahoo.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+
+# Find right direction when running from source tree
+sys.path.insert(0, "bin/python")
+
+# make sure the script dies immediately when hitting control-C,
+# rather than raising KeyboardInterrupt. As we do all database
+# operations using transactions, this is safe.
+import signal
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+from samba.netcmd.main import cmd_sambatool
+cmd = cmd_sambatool()
+subcommand = None
+args = ()
+
+if len(sys.argv) > 1:
+ subcommand = sys.argv[1]
+ if len(sys.argv) > 2:
+ args = sys.argv[2:]
+
+try:
+ retval = cmd._run("samba-tool", subcommand, *args)
+except SystemExit as e:
+ retval = e.code
+except Exception as e:
+ cmd.show_command_error(e)
+ retval = 1
+sys.exit(retval)
diff --git a/source4/scripting/bin/samba3dump b/source4/scripting/bin/samba3dump
new file mode 100755
index 0000000..1a5d74f
--- /dev/null
+++ b/source4/scripting/bin/samba3dump
@@ -0,0 +1,180 @@
+#!/usr/bin/env python3
+#
+# Dump Samba3 data
+# Copyright Jelmer Vernooij 2005-2007
+# Released under the GNU GPL v3 or later
+#
+
+import optparse
+import os, sys
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import samba
+import samba.samba3
+from samba.samba3 import param as s3param
+from samba.dcerpc import lsa
+
+parser = optparse.OptionParser("samba3dump <libdir> [<smb.conf>]")
+parser.add_option("--format", type="choice", metavar="FORMAT",
+ choices=["full", "summary"])
+
+opts, args = parser.parse_args()
+
+if opts.format is None:
+ opts.format = "summary"
+
+def print_header(txt):
+ print("\n%s" % txt)
+ print("=" * len(txt))
+
+def print_samba3_policy(pol):
+ print_header("Account Policies")
+ print("Min password length: %d" % pol['min password length'])
+ print("Password history length: %d" % pol['password history'])
+ if pol['user must logon to change password']:
+ print("User must logon to change password: %d" % pol['user must logon to change password'])
+ if pol['maximum password age']:
+ print("Maximum password age: %d" % pol['maximum password age'])
+ if pol['minimum password age']:
+ print("Minimum password age: %d" % pol['minimum password age'])
+ if pol['lockout duration']:
+ print("Lockout duration: %d" % pol['lockout duration'])
+ if pol['reset count minutes']:
+ print("Reset Count Minutes: %d" % pol['reset count minutes'])
+ if pol['bad lockout attempt']:
+ print("Bad Lockout Minutes: %d" % pol['bad lockout attempt'])
+ if pol['disconnect time']:
+ print("Disconnect Time: %d" % pol['disconnect time'])
+ if pol['refuse machine password change']:
+ print("Refuse Machine Password Change: %d" % pol['refuse machine password change'])
+
+def print_samba3_sam(samdb):
+ print_header("SAM Database")
+ for user in samdb.search_users(0):
+ print("%s (%d): %s" % (user['account_name'], user['rid'], user['fullname']))
+
+def print_samba3_shares(lp):
+ print_header("Configured shares")
+ for s in lp.services():
+ print("--- %s ---" % s)
+ for p in ['path']:
+ print("\t%s = %s" % (p, lp.get(p, s)))
+ print("")
+
+def print_samba3_secrets(secrets):
+ print_header("Secrets")
+
+ if secrets.get_auth_user():
+ print("IPC Credentials:")
+ if secrets.get_auth_user():
+ print(" User: %s\n" % secrets.get_auth_user())
+ if secrets.get_auth_password():
+ print(" Password: %s\n" % secrets.get_auth_password())
+ if secrets.get_auth_domain():
+ print(" Domain: %s\n" % secrets.get_auth_domain())
+
+ if len(list(secrets.ldap_dns())) > 0:
+ print("LDAP passwords:")
+ for dn in secrets.ldap_dns():
+ print("\t%s -> %s" % (dn, secrets.get_ldap_bind_pw(dn)))
+ print("")
+
+ print("Domains:")
+ for domain in secrets.domains():
+ print("\t--- %s ---" % domain)
+ print("\tSID: %s" % secrets.get_sid(domain))
+ print("\tGUID: %s" % secrets.get_domain_guid(domain))
+ print("\tPlaintext pwd: %s" % secrets.get_machine_password(domain))
+ if secrets.get_machine_last_change_time(domain):
+ print("\tLast Changed: %lu" % secrets.get_machine_last_change_time(domain))
+ if secrets.get_machine_sec_channel_type(domain):
+ print("\tSecure Channel Type: %d\n" % secrets.get_machine_sec_channel_type(domain))
+
+ print("Trusted domains:")
+ for td in secrets.trusted_domains():
+ print(td)
+
+def print_samba3_regdb(regdb):
+ print_header("Registry")
+ from samba.registry import str_regtype
+
+ for k in regdb.keys():
+ print("[%s]" % k)
+ for (value_name, (type, value)) in regdb.values(k).items():
+ print("\"%s\"=%s:%s" % (value_name, str_regtype(type), value))
+
+def print_samba3_winsdb(winsdb):
+ print_header("WINS Database")
+
+ for name in winsdb:
+ (ttl, ips, nb_flags) = winsdb[name]
+ print("%s, nb_flags: %s, ttl: %lu, %d ips, fst: %s" % (name, nb_flags, ttl, len(ips), ips[0]))
+
+def print_samba3_groupmappings(groupdb):
+ print_header("Group Mappings")
+
+ for g in groupdb.enum_group_mapping(samba.samba3.passdb.get_global_sam_sid(),
+ lsa.SID_NAME_DOM_GRP):
+ print("\t--- Group: %s ---" % g.sid)
+
+def print_samba3_aliases(groupdb):
+ for g in groupdb.enum_group_mapping(samba.samba3.passdb.get_global_sam_sid(),
+ lsa.SID_NAME_ALIAS):
+ print("\t--- Alias: %s ---" % g.sid)
+
+def print_samba3_idmapdb(idmapdb):
+ print_header("Winbindd SID<->GID/UID mappings")
+
+ print("User High Water Mark: %d" % idmapdb.get_user_hwm())
+ print("Group High Water Mark: %d\n" % idmapdb.get_group_hwm())
+
+ for uid in idmapdb.uids():
+ print("%s -> UID %d" % (idmapdb.get_user_sid(uid), uid))
+
+ for gid in idmapdb.gids():
+ print("%s -> GID %d" % (idmapdb.get_group_sid(gid), gid))
+
+def print_samba3(samba3):
+ passdb = samba3.get_sam_db()
+ print_samba3_policy(passdb.get_account_policy())
+ print_samba3_winsdb(samba3.get_wins_db())
+ print_samba3_regdb(samba3.get_registry())
+ print_samba3_secrets(samba3.get_secrets_db())
+ print_samba3_idmapdb(samba3.get_idmap_db())
+ print_samba3_sam(passdb)
+ print_samba3_groupmappings(passdb)
+ print_samba3_aliases(passdb)
+ print_samba3_shares(samba3.lp)
+
+def print_samba3_summary(samba3):
+ print("WINS db entries: %d" % len(samba3.get_wins_db()))
+ print("Registry key count: %d" % len(samba3.get_registry()))
+ passdb = samba3.get_sam_db()
+ print("Groupmap count: %d" % len(passdb.enum_group_mapping()))
+ print("Alias count: %d" % len(passdb.search_aliases()))
+ idmapdb = samba3.get_idmap_db()
+ print("Idmap count: %d" % (len(list(idmapdb.uids())) + len(list(idmapdb.gids()))))
+
+if len(args) < 1:
+ parser.print_help()
+ sys.exit(1)
+
+libdir = args[0]
+if len(args) < 1:
+ smbconf = args[1]
+else:
+ smbconf = os.path.join(libdir, "smb.conf")
+
+s3_lp = s3param.get_context()
+s3_lp.set("private dir", libdir)
+s3_lp.set("state directory", libdir)
+s3_lp.set("lock directory", libdir)
+s3_lp.load(smbconf)
+samba3 = samba.samba3.Samba3(smbconf, s3_lp)
+
+if opts.format == "summary":
+ print_samba3_summary(samba3)
+elif opts.format == "full":
+ print_samba3(samba3)
diff --git a/source4/scripting/bin/samba_dnsupdate b/source4/scripting/bin/samba_dnsupdate
new file mode 100755
index 0000000..1ce53f5
--- /dev/null
+++ b/source4/scripting/bin/samba_dnsupdate
@@ -0,0 +1,960 @@
+#!/usr/bin/env python3
+# vim: expandtab
+#
+# update our DNS names using TSIG-GSS
+#
+# Copyright (C) Andrew Tridgell 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import os
+import fcntl
+import sys
+import tempfile
+import subprocess
+
+# ensure we get messages out immediately, so they get in the samba logs,
+# and don't get swallowed by a timeout
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+# forcing GMT avoids a problem in some timezones with kerberos. Both MIT
+# heimdal can get mutual authentication errors due to the 24 second difference
+# between UTC and GMT when using some zone files (eg. the PDT zone from
+# the US)
+os.environ["TZ"] = "GMT"
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import samba
+import optparse
+from samba import getopt as options
+from ldb import SCOPE_BASE
+from samba import dsdb
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.dcerpc import netlogon, winbind
+from samba.netcmd.dns import cmd_dns
+from samba import gensec
+from samba.kcc import kcc_utils
+from samba.common import get_string
+import ldb
+
+from samba.dnsresolver import DNSResolver
+import dns.resolver
+import dns.exception
+
+default_ttl = 900
+am_rodc = False
+error_count = 0
+
+parser = optparse.OptionParser("samba_dnsupdate [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+parser.add_option("--verbose", action="store_true")
+parser.add_option("--use-samba-tool", action="store_true", help="Use samba-tool to make updates over RPC, rather than over DNS")
+parser.add_option("--use-nsupdate", action="store_true", help="Use nsupdate command to make updates over DNS (default, if kinit successful)")
+parser.add_option("--all-names", action="store_true")
+parser.add_option("--all-interfaces", action="store_true")
+parser.add_option("--current-ip", action="append", help="IP address to update DNS to match (helpful if behind NAT, valid multiple times, defaults to values from interfaces=)")
+parser.add_option("--rpc-server-ip", type="string", help="IP address of server to use with samba-tool (defaults to first --current-ip)")
+parser.add_option("--use-file", type="string", help="Use a file, rather than real DNS calls")
+parser.add_option("--update-list", type="string", help="Add DNS names from the given file")
+parser.add_option("--update-cache", type="string", help="Cache database of already registered records")
+parser.add_option("--fail-immediately", action='store_true', help="Exit on first failure")
+parser.add_option("--no-credentials", dest='nocreds', action='store_true', help="don't try and get credentials")
+parser.add_option("--no-substitutions", dest='nosubs', action='store_true', help="don't try and expands variables in file specified by --update-list")
+
+creds = None
+ccachename = None
+
+opts, args = parser.parse_args()
+
+if len(args) != 0:
+ parser.print_usage()
+ sys.exit(1)
+
+lp = sambaopts.get_loadparm()
+
+domain = lp.get("realm")
+host = lp.get("netbios name")
+all_interfaces = opts.all_interfaces
+
+IPs = opts.current_ip or samba.interface_ips(lp, bool(all_interfaces)) or []
+
+nsupdate_cmd = lp.get('nsupdate command')
+dns_zone_scavenging = lp.get("dns zone scavenging")
+
+if len(IPs) == 0:
+ print("No IP interfaces - skipping DNS updates\n")
+ parser.print_usage()
+ sys.exit(0)
+
+rpc_server_ip = opts.rpc_server_ip or IPs[0]
+
+IP6s = [ip for ip in IPs if ':' in ip]
+IP4s = [ip for ip in IPs if ':' not in ip]
+
+smb_conf = sambaopts.get_loadparm_path()
+
+if opts.verbose:
+ print("IPs: %s" % IPs)
+
+def get_possible_rw_dns_server(creds, domain):
+ """Get a list of possible read-write DNS servers, starting with
+ the SOA. The SOA is the correct answer, but old Samba domains
+ (4.6 and prior) do not maintain this value, so add NS servers
+ as well"""
+
+ ans_soa = check_one_dns_name(domain, 'SOA')
+ # Actually there is only one
+ hosts_soa = [str(a.mname).rstrip('.') for a in ans_soa]
+
+ # This is not strictly legit, but old Samba domains may have an
+ # unmaintained SOA record, so go for any NS that we can get a
+ # ticket to.
+ ans_ns = check_one_dns_name(domain, 'NS')
+ # Actually there is only one
+ hosts_ns = [str(a.target).rstrip('.') for a in ans_ns]
+
+ return hosts_soa + hosts_ns
+
+def get_krb5_rw_dns_server(creds, domain):
+ """Get a list of read-write DNS servers that we can obtain a ticket
+ for, starting with the SOA. The SOA is the correct answer, but
+ old Samba domains (4.6 and prior) do not maintain this value,
+ so continue with the NS servers as well until we get one that
+ the KDC will issue a ticket to.
+ """
+
+ rw_dns_servers = get_possible_rw_dns_server(creds, domain)
+ # Actually there is only one
+ for i, target_hostname in enumerate(rw_dns_servers):
+ settings = {}
+ settings["lp_ctx"] = lp
+ settings["target_hostname"] = target_hostname
+
+ gensec_client = gensec.Security.start_client(settings)
+ gensec_client.set_credentials(creds)
+ gensec_client.set_target_service("DNS")
+ gensec_client.set_target_hostname(target_hostname)
+ gensec_client.want_feature(gensec.FEATURE_SEAL)
+ gensec_client.start_mech_by_sasl_name("GSSAPI")
+ server_to_client = b""
+ try:
+ (client_finished, client_to_server) = gensec_client.update(server_to_client)
+ if opts.verbose:
+ print("Successfully obtained Kerberos ticket to DNS/%s as %s" \
+ % (target_hostname, creds.get_username()))
+ return target_hostname
+ except RuntimeError:
+ # Only raise an exception if they all failed
+ if i == len(rw_dns_servers) - 1:
+ raise
+
+def get_credentials(lp):
+ """# get credentials if we haven't got them already."""
+ from samba import credentials
+ global ccachename
+ creds = credentials.Credentials()
+ creds.guess(lp)
+ creds.set_machine_account(lp)
+ creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
+ (tmp_fd, ccachename) = tempfile.mkstemp()
+ try:
+ if opts.use_file is not None:
+ return
+
+ creds.get_named_ccache(lp, ccachename)
+
+ # Now confirm we can get a ticket to the DNS server
+ get_krb5_rw_dns_server(creds, sub_vars['DNSDOMAIN'] + '.')
+ return creds
+
+ except RuntimeError as e:
+ os.unlink(ccachename)
+ raise e
+
+
+class dnsobj(object):
+ """an object to hold a parsed DNS line"""
+
+ def __init__(self, string_form):
+ list = string_form.split()
+ if len(list) < 3:
+ raise Exception("Invalid DNS entry %r" % string_form)
+ self.dest = None
+ self.port = None
+ self.ip = None
+ self.existing_port = None
+ self.existing_weight = None
+ self.existing_cname_target = None
+ self.rpc = False
+ self.zone = None
+ if list[0] == "RPC":
+ self.rpc = True
+ self.zone = list[1]
+ list = list[2:]
+ self.type = list[0]
+ self.name = list[1]
+ self.nameservers = []
+ if self.type == 'SRV':
+ if len(list) < 4:
+ raise Exception("Invalid DNS entry %r" % string_form)
+ self.dest = list[2]
+ self.port = list[3]
+ elif self.type in ['A', 'AAAA']:
+ self.ip = list[2] # usually $IP, which gets replaced
+ elif self.type == 'CNAME':
+ self.dest = list[2]
+ elif self.type == 'NS':
+ self.dest = list[2]
+ else:
+ raise Exception("Received unexpected DNS reply of type %s: %s" % (self.type, string_form))
+
+ def __str__(self):
+ if self.type == "A":
+ return "%s %s %s" % (self.type, self.name, self.ip)
+ if self.type == "AAAA":
+ return "%s %s %s" % (self.type, self.name, self.ip)
+ if self.type == "SRV":
+ return "%s %s %s %s" % (self.type, self.name, self.dest, self.port)
+ if self.type == "CNAME":
+ return "%s %s %s" % (self.type, self.name, self.dest)
+ if self.type == "NS":
+ return "%s %s %s" % (self.type, self.name, self.dest)
+
+
+def parse_dns_line(line, sub_vars):
+ """parse a DNS line from."""
+ if line.startswith("SRV _ldap._tcp.pdc._msdcs.") and not samdb.am_pdc():
+ # We keep this as compat to the dns_update_list of 4.0/4.1
+ if opts.verbose:
+ print("Skipping PDC entry (%s) as we are not a PDC" % line)
+ return None
+ subline = samba.substitute_var(line, sub_vars)
+ if subline == '' or subline[0] == "#":
+ return None
+ return dnsobj(subline)
+
+
+def hostname_match(h1, h2):
+ """see if two hostnames match."""
+ h1 = str(h1)
+ h2 = str(h2)
+ return h1.lower().rstrip('.') == h2.lower().rstrip('.')
+
+def get_resolver(d=None):
+ resolv_conf = os.getenv('RESOLV_CONF', default='/etc/resolv.conf')
+ resolver = DNSResolver(filename=resolv_conf, configure=True)
+
+ if d is not None and d.nameservers != []:
+ resolver.nameservers = d.nameservers
+
+ return resolver
+
+def check_one_dns_name(name, name_type, d=None):
+ resolver = get_resolver(d)
+ if d and not d.nameservers:
+ d.nameservers = resolver.nameservers
+ # dns.resolver.Answer
+ return resolver.resolve(name, name_type)
+
+def check_dns_name(d):
+ """check that a DNS entry exists."""
+ normalised_name = d.name.rstrip('.') + '.'
+ if opts.verbose:
+ print("Looking for DNS entry %s as %s" % (d, normalised_name))
+
+ if opts.use_file is not None:
+ try:
+ dns_file = open(opts.use_file, "r")
+ except IOError:
+ return False
+
+ for line in dns_file:
+ line = line.strip()
+ if line == '' or line[0] == "#":
+ continue
+ if line.lower() == str(d).lower():
+ return True
+ return False
+
+ try:
+ ans = check_one_dns_name(normalised_name, d.type, d)
+ except dns.exception.Timeout:
+ raise Exception("Timeout while waiting to contact a working DNS server while looking for %s as %s" % (d, normalised_name))
+ except dns.resolver.NoNameservers:
+ raise Exception("Unable to contact a working DNS server while looking for %s as %s" % (d, normalised_name))
+ except dns.resolver.NXDOMAIN:
+ if opts.verbose:
+ print("The DNS entry %s, queried as %s does not exist" % (d, normalised_name))
+ return False
+ except dns.resolver.NoAnswer:
+ if opts.verbose:
+ print("The DNS entry %s, queried as %s does not hold this record type" % (d, normalised_name))
+ return False
+ except dns.exception.DNSException:
+ raise Exception("Failure while trying to resolve %s as %s" % (d, normalised_name))
+ if d.type in ['A', 'AAAA']:
+ # we need to be sure that our IP is there
+ for rdata in ans:
+ if str(rdata) == str(d.ip):
+ return True
+ elif d.type == 'CNAME':
+ for i in range(len(ans)):
+ if hostname_match(ans[i].target, d.dest):
+ return True
+ else:
+ d.existing_cname_target = str(ans[i].target)
+ elif d.type == 'NS':
+ for i in range(len(ans)):
+ if hostname_match(ans[i].target, d.dest):
+ return True
+ elif d.type == 'SRV':
+ for rdata in ans:
+ if opts.verbose:
+ print("Checking %s against %s" % (rdata, d))
+ if hostname_match(rdata.target, d.dest):
+ if str(rdata.port) == str(d.port):
+ return True
+ else:
+ d.existing_port = str(rdata.port)
+ d.existing_weight = str(rdata.weight)
+
+ if opts.verbose:
+ print("Lookup of %s succeeded, but we failed to find a matching DNS entry for %s" % (normalised_name, d))
+
+ return False
+
+
+def get_subst_vars(samdb):
+ """get the list of substitution vars."""
+ global lp, am_rodc
+ vars = {}
+
+ vars['DNSDOMAIN'] = samdb.domain_dns_name()
+ vars['DNSFOREST'] = samdb.forest_dns_name()
+ vars['HOSTNAME'] = samdb.host_dns_name()
+ vars['NTDSGUID'] = samdb.get_ntds_GUID()
+ vars['SITE'] = samdb.server_site_name()
+ res = samdb.search(base=samdb.get_default_basedn(), scope=SCOPE_BASE, attrs=["objectGUID"])
+ guid = samdb.schema_format_value("objectGUID", res[0]['objectGUID'][0])
+ vars['DOMAINGUID'] = get_string(guid)
+
+ vars['IF_DC'] = ""
+ vars['IF_RWDC'] = "# "
+ vars['IF_RODC'] = "# "
+ vars['IF_PDC'] = "# "
+ vars['IF_GC'] = "# "
+ vars['IF_RWGC'] = "# "
+ vars['IF_ROGC'] = "# "
+ vars['IF_DNS_DOMAIN'] = "# "
+ vars['IF_RWDNS_DOMAIN'] = "# "
+ vars['IF_RODNS_DOMAIN'] = "# "
+ vars['IF_DNS_FOREST'] = "# "
+ vars['IF_RWDNS_FOREST'] = "# "
+ vars['IF_R0DNS_FOREST'] = "# "
+
+ am_rodc = samdb.am_rodc()
+ if am_rodc:
+ vars['IF_RODC'] = ""
+ else:
+ vars['IF_RWDC'] = ""
+
+ if samdb.am_pdc():
+ vars['IF_PDC'] = ""
+
+ # check if we "are DNS server"
+ res = samdb.search(base=samdb.get_config_basedn(),
+ expression='(objectguid=%s)' % vars['NTDSGUID'],
+ attrs=["options", "msDS-hasMasterNCs"])
+
+ if len(res) == 1:
+ if "options" in res[0]:
+ options = int(res[0]["options"][0])
+ if (options & dsdb.DS_NTDSDSA_OPT_IS_GC) != 0:
+ vars['IF_GC'] = ""
+ if am_rodc:
+ vars['IF_ROGC'] = ""
+ else:
+ vars['IF_RWGC'] = ""
+
+ basedn = str(samdb.get_default_basedn())
+ forestdn = str(samdb.get_root_basedn())
+
+ if "msDS-hasMasterNCs" in res[0]:
+ for e in res[0]["msDS-hasMasterNCs"]:
+ if str(e) == "DC=DomainDnsZones,%s" % basedn:
+ vars['IF_DNS_DOMAIN'] = ""
+ if am_rodc:
+ vars['IF_RODNS_DOMAIN'] = ""
+ else:
+ vars['IF_RWDNS_DOMAIN'] = ""
+ if str(e) == "DC=ForestDnsZones,%s" % forestdn:
+ vars['IF_DNS_FOREST'] = ""
+ if am_rodc:
+ vars['IF_RODNS_FOREST'] = ""
+ else:
+ vars['IF_RWDNS_FOREST'] = ""
+
+ return vars
+
+
+def call_nsupdate(d, op="add"):
+ """call nsupdate for an entry."""
+ global ccachename, nsupdate_cmd, krb5conf
+
+ assert(op in ["add", "delete"])
+
+ if opts.use_file is not None:
+ if opts.verbose:
+ print("Use File instead of nsupdate for %s (%s)" % (d, op))
+
+ try:
+ rfile = open(opts.use_file, 'r+')
+ except IOError:
+ # Perhaps create it
+ open(opts.use_file, 'w+')
+ # Open it for reading again, in case someone else got to it first
+ rfile = open(opts.use_file, 'r+')
+ fcntl.lockf(rfile, fcntl.LOCK_EX)
+ (file_dir, file_name) = os.path.split(opts.use_file)
+ (tmp_fd, tmpfile) = tempfile.mkstemp(dir=file_dir, prefix=file_name, suffix="XXXXXX")
+ wfile = os.fdopen(tmp_fd, 'a')
+ rfile.seek(0)
+ for line in rfile:
+ if op == "delete":
+ l = parse_dns_line(line, {})
+ if str(l).lower() == str(d).lower():
+ continue
+ wfile.write(line)
+ if op == "add":
+ wfile.write(str(d)+"\n")
+ os.rename(tmpfile, opts.use_file)
+ fcntl.lockf(rfile, fcntl.LOCK_UN)
+ return
+
+ if opts.verbose:
+ print("Calling nsupdate for %s (%s)" % (d, op))
+
+ normalised_name = d.name.rstrip('.') + '.'
+
+ (tmp_fd, tmpfile) = tempfile.mkstemp()
+ f = os.fdopen(tmp_fd, 'w')
+
+ resolver = get_resolver(d)
+
+ # Local the zone for this name
+ zone = dns.resolver.zone_for_name(normalised_name,
+ resolver=resolver)
+
+ # Now find the SOA, or if we can't get a ticket to the SOA,
+ # any server with an NS record we can get a ticket for.
+ #
+ # Thanks to the Kerberos Credentials cache this is not
+ # expensive inside the loop
+ server = get_krb5_rw_dns_server(creds, zone)
+ f.write('server %s\n' % server)
+
+ if d.type == "A":
+ f.write("update %s %s %u A %s\n" % (op, normalised_name, default_ttl, d.ip))
+ if d.type == "AAAA":
+ f.write("update %s %s %u AAAA %s\n" % (op, normalised_name, default_ttl, d.ip))
+ if d.type == "SRV":
+ if op == "add" and d.existing_port is not None:
+ f.write("update delete %s SRV 0 %s %s %s\n" % (normalised_name, d.existing_weight,
+ d.existing_port, d.dest))
+ f.write("update %s %s %u SRV 0 100 %s %s\n" % (op, normalised_name, default_ttl, d.port, d.dest))
+ if d.type == "CNAME":
+ f.write("update %s %s %u CNAME %s\n" % (op, normalised_name, default_ttl, d.dest))
+ if d.type == "NS":
+ f.write("update %s %s %u NS %s\n" % (op, normalised_name, default_ttl, d.dest))
+ if opts.verbose:
+ f.write("show\n")
+ f.write("send\n")
+ f.close()
+
+ # Set a bigger MTU size to work around a bug in nsupdate's doio_send()
+ os.environ["SOCKET_WRAPPER_MTU"] = "2000"
+
+ global error_count
+ if ccachename:
+ os.environ["KRB5CCNAME"] = ccachename
+ try:
+ cmd = nsupdate_cmd[:]
+ cmd.append(tmpfile)
+ env = os.environ
+ if krb5conf:
+ env["KRB5_CONFIG"] = krb5conf
+ if ccachename:
+ env["KRB5CCNAME"] = ccachename
+ ret = subprocess.call(cmd, shell=False, env=env)
+ if ret != 0:
+ if opts.fail_immediately:
+ if opts.verbose:
+ print("Failed update with %s" % tmpfile)
+ sys.exit(1)
+ error_count = error_count + 1
+ if opts.verbose:
+ print("Failed nsupdate: %d" % ret)
+ except Exception as estr:
+ if opts.fail_immediately:
+ sys.exit(1)
+ error_count = error_count + 1
+ if opts.verbose:
+ print("Failed nsupdate: %s : %s" % (str(d), estr))
+ os.unlink(tmpfile)
+
+ # Let socket_wrapper set the default MTU size
+ os.environ["SOCKET_WRAPPER_MTU"] = "0"
+
+
+def call_samba_tool(d, op="add", zone=None):
+ """call samba-tool dns to update an entry."""
+
+ assert(op in ["add", "delete"])
+
+ if (sub_vars['DNSFOREST'] != sub_vars['DNSDOMAIN']) and \
+ sub_vars['DNSFOREST'].endswith('.' + sub_vars['DNSDOMAIN']):
+ print("Refusing to use samba-tool when forest %s is under domain %s" \
+ % (sub_vars['DNSFOREST'], sub_vars['DNSDOMAIN']))
+
+ if opts.verbose:
+ print("Calling samba-tool dns for %s (%s)" % (d, op))
+
+ normalised_name = d.name.rstrip('.') + '.'
+ if zone is None:
+ if normalised_name == (sub_vars['DNSDOMAIN'] + '.'):
+ short_name = '@'
+ zone = sub_vars['DNSDOMAIN']
+ elif normalised_name == (sub_vars['DNSFOREST'] + '.'):
+ short_name = '@'
+ zone = sub_vars['DNSFOREST']
+ elif normalised_name == ('_msdcs.' + sub_vars['DNSFOREST'] + '.'):
+ short_name = '@'
+ zone = '_msdcs.' + sub_vars['DNSFOREST']
+ else:
+ if not normalised_name.endswith('.' + sub_vars['DNSDOMAIN'] + '.'):
+ print("Not Calling samba-tool dns for %s (%s), %s not in %s" % (d, op, normalised_name, sub_vars['DNSDOMAIN'] + '.'))
+ return False
+ elif normalised_name.endswith('._msdcs.' + sub_vars['DNSFOREST'] + '.'):
+ zone = '_msdcs.' + sub_vars['DNSFOREST']
+ else:
+ zone = sub_vars['DNSDOMAIN']
+ len_zone = len(zone)+2
+ short_name = normalised_name[:-len_zone]
+ else:
+ len_zone = len(zone)+2
+ short_name = normalised_name[:-len_zone]
+
+ if d.type == "A":
+ args = [rpc_server_ip, zone, short_name, "A", d.ip]
+ if d.type == "AAAA":
+ args = [rpc_server_ip, zone, short_name, "AAAA", d.ip]
+ if d.type == "SRV":
+ if op == "add" and d.existing_port is not None:
+ print("Not handling modify of existing SRV %s using samba-tool" % d)
+ return False
+ args = [rpc_server_ip, zone, short_name, "SRV",
+ "%s %s %s %s" % (d.dest, d.port, "0", "100")]
+ if d.type == "CNAME":
+ if d.existing_cname_target is None:
+ args = [rpc_server_ip, zone, short_name, "CNAME", d.dest]
+ else:
+ op = "update"
+ args = [rpc_server_ip, zone, short_name, "CNAME",
+ d.existing_cname_target.rstrip('.'), d.dest]
+
+ if d.type == "NS":
+ args = [rpc_server_ip, zone, short_name, "NS", d.dest]
+
+ if smb_conf and args:
+ args += ["--configfile=" + smb_conf]
+
+ global error_count
+ try:
+ cmd = cmd_dns()
+ if opts.verbose:
+ print(f'Calling samba-tool dns {op} --use-kerberos off -P {args}')
+ ret = cmd._run("dns", op, "--use-kerberos", "off", "-P", *args)
+ if ret == -1:
+ if opts.fail_immediately:
+ sys.exit(1)
+ error_count = error_count + 1
+ if opts.verbose:
+ print("Failed 'samba-tool dns' based update of %s" % (str(d)))
+ except Exception as estr:
+ if opts.fail_immediately:
+ sys.exit(1)
+ error_count = error_count + 1
+ if opts.verbose:
+ print("Failed 'samba-tool dns' based update: %s : %s" % (str(d), estr))
+ raise
+
+irpc_wb = None
+def cached_irpc_wb(lp):
+ global irpc_wb
+ if irpc_wb is not None:
+ return irpc_wb
+ irpc_wb = winbind.winbind("irpc:winbind_server", lp)
+ return irpc_wb
+
+def rodc_dns_update(d, t, op):
+ '''a single DNS update via the RODC netlogon call'''
+ global sub_vars
+
+ assert(op in ["add", "delete"])
+
+ if opts.verbose:
+ print("Calling netlogon RODC update for %s" % d)
+
+ typemap = {
+ netlogon.NlDnsLdapAtSite : netlogon.NlDnsInfoTypeNone,
+ netlogon.NlDnsGcAtSite : netlogon.NlDnsDomainNameAlias,
+ netlogon.NlDnsDsaCname : netlogon.NlDnsDomainNameAlias,
+ netlogon.NlDnsKdcAtSite : netlogon.NlDnsInfoTypeNone,
+ netlogon.NlDnsDcAtSite : netlogon.NlDnsInfoTypeNone,
+ netlogon.NlDnsRfc1510KdcAtSite : netlogon.NlDnsInfoTypeNone,
+ netlogon.NlDnsGenericGcAtSite : netlogon.NlDnsDomainNameAlias
+ }
+
+ w = cached_irpc_wb(lp)
+ dns_names = netlogon.NL_DNS_NAME_INFO_ARRAY()
+ dns_names.count = 1
+ name = netlogon.NL_DNS_NAME_INFO()
+ name.type = t
+ name.dns_domain_info_type = typemap[t]
+ name.priority = 0
+ name.weight = 0
+ if d.port is not None:
+ name.port = int(d.port)
+ if op == "add":
+ name.dns_register = True
+ else:
+ name.dns_register = False
+ dns_names.names = [ name ]
+ site_name = sub_vars['SITE']
+
+ global error_count
+
+ try:
+ ret_names = w.DsrUpdateReadOnlyServerDnsRecords(site_name, default_ttl, dns_names)
+ if ret_names.names[0].status != 0:
+ print("Failed to set DNS entry: %s (status %u)" % (d, ret_names.names[0].status))
+ error_count = error_count + 1
+ except RuntimeError as reason:
+ print("Error setting DNS entry of type %u: %s: %s" % (t, d, reason))
+ error_count = error_count + 1
+
+ if opts.verbose:
+ print("Called netlogon RODC update for %s" % d)
+
+ if error_count != 0 and opts.fail_immediately:
+ sys.exit(1)
+
+
+def call_rodc_update(d, op="add"):
+ '''RODCs need to use the netlogon API for nsupdate'''
+ global lp, sub_vars
+
+ assert(op in ["add", "delete"])
+
+ # we expect failure for 3268 if we aren't a GC
+ if d.port is not None and int(d.port) == 3268:
+ return
+
+ # map the DNS request to a netlogon update type
+ map = {
+ netlogon.NlDnsLdapAtSite : '_ldap._tcp.${SITE}._sites.${DNSDOMAIN}',
+ netlogon.NlDnsGcAtSite : '_ldap._tcp.${SITE}._sites.gc._msdcs.${DNSDOMAIN}',
+ netlogon.NlDnsDsaCname : '${NTDSGUID}._msdcs.${DNSFOREST}',
+ netlogon.NlDnsKdcAtSite : '_kerberos._tcp.${SITE}._sites.dc._msdcs.${DNSDOMAIN}',
+ netlogon.NlDnsDcAtSite : '_ldap._tcp.${SITE}._sites.dc._msdcs.${DNSDOMAIN}',
+ netlogon.NlDnsRfc1510KdcAtSite : '_kerberos._tcp.${SITE}._sites.${DNSDOMAIN}',
+ netlogon.NlDnsGenericGcAtSite : '_gc._tcp.${SITE}._sites.${DNSFOREST}'
+ }
+
+ for t in map:
+ subname = samba.substitute_var(map[t], sub_vars)
+ if subname.lower() == d.name.lower():
+ # found a match - do the update
+ rodc_dns_update(d, t, op)
+ return
+ if opts.verbose:
+ print("Unable to map to netlogon DNS update: %s" % d)
+
+
+# get the list of DNS entries we should have
+dns_update_list = opts.update_list or lp.private_path('dns_update_list')
+
+dns_update_cache = opts.update_cache or lp.private_path('dns_update_cache')
+
+krb5conf = None
+# only change the krb5.conf if we are not in selftest
+if 'SOCKET_WRAPPER_DIR' not in os.environ:
+ # use our private krb5.conf to avoid problems with the wrong domain
+ # bind9 nsupdate wants the default domain set
+ krb5conf = lp.private_path('krb5.conf')
+ os.environ['KRB5_CONFIG'] = krb5conf
+
+try:
+ file = open(dns_update_list, "r")
+except OSError as e:
+ if opts.update_cache:
+ print("The specified update list does not exist")
+ else:
+ print("The server update list was not found, "
+ "and --update-list was not provided.")
+ print(e)
+ print()
+ parser.print_usage()
+ sys.exit(1)
+
+if opts.nosubs:
+ sub_vars = {}
+else:
+ samdb = SamDB(url=lp.samdb_url(), session_info=system_session(), lp=lp)
+
+ # get the substitution dictionary
+ sub_vars = get_subst_vars(samdb)
+
+# build up a list of update commands to pass to nsupdate
+update_list = []
+dns_list = []
+cache_list = []
+delete_list = []
+
+dup_set = set()
+cache_set = set()
+
+rebuild_cache = False
+try:
+ cfile = open(dns_update_cache, 'r+')
+except IOError:
+ # Perhaps create it
+ open(dns_update_cache, 'w+')
+ # Open it for reading again, in case someone else got to it first
+ cfile = open(dns_update_cache, 'r+')
+fcntl.lockf(cfile, fcntl.LOCK_EX)
+for line in cfile:
+ line = line.strip()
+ if line == '' or line[0] == "#":
+ continue
+ c = parse_dns_line(line, {})
+ if c is None:
+ continue
+ if str(c) not in cache_set:
+ cache_list.append(c)
+ cache_set.add(str(c))
+
+site_specific_rec = []
+
+# read each line, and check that the DNS name exists
+for line in file:
+ line = line.strip()
+
+ if '${SITE}' in line:
+ site_specific_rec.append(line)
+
+ if line == '' or line[0] == "#":
+ continue
+ d = parse_dns_line(line, sub_vars)
+ if d is None:
+ continue
+ if d.type == 'A' and len(IP4s) == 0:
+ continue
+ if d.type == 'AAAA' and len(IP6s) == 0:
+ continue
+ if str(d) not in dup_set:
+ dns_list.append(d)
+ dup_set.add(str(d))
+
+# Perform automatic site coverage by default
+auto_coverage = True
+
+if not am_rodc and auto_coverage:
+ site_names = kcc_utils.uncovered_sites_to_cover(samdb,
+ samdb.server_site_name())
+
+ # Duplicate all site specific records for the uncovered site
+ for site in site_names:
+ to_add = [samba.substitute_var(line, {'SITE': site})
+ for line in site_specific_rec]
+
+ for site_line in to_add:
+ d = parse_dns_line(site_line,
+ sub_vars=sub_vars)
+ if d is not None and str(d) not in dup_set:
+ dns_list.append(d)
+ dup_set.add(str(d))
+
+# now expand the entries, if any are A record with ip set to $IP
+# then replace with multiple entries, one for each interface IP
+for d in dns_list:
+ if d.ip != "$IP":
+ continue
+ if d.type == 'A':
+ d.ip = IP4s[0]
+ for i in range(len(IP4s)-1):
+ d2 = dnsobj(str(d))
+ d2.ip = IP4s[i+1]
+ dns_list.append(d2)
+ if d.type == 'AAAA':
+ d.ip = IP6s[0]
+ for i in range(len(IP6s)-1):
+ d2 = dnsobj(str(d))
+ d2.ip = IP6s[i+1]
+ dns_list.append(d2)
+
+# now check if the entries already exist on the DNS server
+for d in dns_list:
+ found = False
+ for c in cache_list:
+ if str(c).lower() == str(d).lower():
+ found = True
+ break
+ if not found:
+ rebuild_cache = True
+ if opts.verbose:
+ print("need cache add: %s" % d)
+ if dns_zone_scavenging:
+ update_list.append(d)
+ if opts.verbose:
+ print("scavenging requires update: %s" % d)
+ elif opts.all_names:
+ update_list.append(d)
+ if opts.verbose:
+ print("force update: %s" % d)
+ elif not check_dns_name(d):
+ update_list.append(d)
+ if opts.verbose:
+ print("need update: %s" % d)
+
+for c in cache_list:
+ found = False
+ for d in dns_list:
+ if str(c).lower() == str(d).lower():
+ found = True
+ break
+ if found:
+ continue
+ rebuild_cache = True
+ if opts.verbose:
+ print("need cache remove: %s" % c)
+ if not opts.all_names and not check_dns_name(c):
+ continue
+ delete_list.append(c)
+ if opts.verbose:
+ print("need delete: %s" % c)
+
+if len(delete_list) == 0 and len(update_list) == 0 and not rebuild_cache:
+ if opts.verbose:
+ print("No DNS updates needed")
+ sys.exit(0)
+else:
+ if opts.verbose:
+ print("%d DNS updates and %d DNS deletes needed" % (len(update_list), len(delete_list)))
+
+use_samba_tool = opts.use_samba_tool
+use_nsupdate = opts.use_nsupdate
+# get our krb5 creds
+if (delete_list or update_list) and not opts.nocreds:
+ try:
+ creds = get_credentials(lp)
+ except RuntimeError as e:
+ ccachename = None
+
+ if sub_vars['IF_RWDNS_DOMAIN'] == "# ":
+ raise
+
+ if use_nsupdate:
+ raise
+
+ print("Failed to get Kerberos credentials, falling back to samba-tool: %s" % e)
+ use_samba_tool = True
+
+
+# ask nsupdate to delete entries as needed
+for d in delete_list:
+ if d.rpc or (not use_nsupdate and use_samba_tool):
+ if opts.verbose:
+ print("delete (samba-tool): %s" % d)
+ call_samba_tool(d, op="delete", zone=d.zone)
+
+ elif am_rodc:
+ if d.name.lower() == domain.lower():
+ if opts.verbose:
+ print("skip delete (rodc): %s" % d)
+ continue
+ if not d.type in [ 'A', 'AAAA' ]:
+ if opts.verbose:
+ print("delete (rodc): %s" % d)
+ call_rodc_update(d, op="delete")
+ else:
+ if opts.verbose:
+ print("delete (nsupdate): %s" % d)
+ call_nsupdate(d, op="delete")
+ else:
+ if opts.verbose:
+ print("delete (nsupdate): %s" % d)
+ call_nsupdate(d, op="delete")
+
+# ask nsupdate to add entries as needed
+for d in update_list:
+ if d.rpc or (not use_nsupdate and use_samba_tool):
+ if opts.verbose:
+ print("update (samba-tool): %s" % d)
+ call_samba_tool(d, zone=d.zone)
+
+ elif am_rodc:
+ if d.name.lower() == domain.lower():
+ if opts.verbose:
+ print("skip (rodc): %s" % d)
+ continue
+ if not d.type in [ 'A', 'AAAA' ]:
+ if opts.verbose:
+ print("update (rodc): %s" % d)
+ call_rodc_update(d)
+ else:
+ if opts.verbose:
+ print("update (nsupdate): %s" % d)
+ call_nsupdate(d)
+ else:
+ if opts.verbose:
+ print("update(nsupdate): %s" % d)
+ call_nsupdate(d)
+
+if rebuild_cache:
+ print("Rebuilding cache at %s" % dns_update_cache)
+ (file_dir, file_name) = os.path.split(dns_update_cache)
+ (tmp_fd, tmpfile) = tempfile.mkstemp(dir=file_dir, prefix=file_name, suffix="XXXXXX")
+ wfile = os.fdopen(tmp_fd, 'a')
+ for d in dns_list:
+ if opts.verbose:
+ print("Adding %s to %s" % (str(d), file_name))
+ wfile.write(str(d)+"\n")
+ wfile.flush()
+ os.rename(tmpfile, dns_update_cache)
+fcntl.lockf(cfile, fcntl.LOCK_UN)
+
+# delete the ccache if we created it
+if ccachename is not None:
+ os.unlink(ccachename)
+
+if error_count != 0:
+ print("Failed update of %u entries" % error_count)
+sys.exit(error_count)
diff --git a/source4/scripting/bin/samba_downgrade_db b/source4/scripting/bin/samba_downgrade_db
new file mode 100755
index 0000000..b9a0909
--- /dev/null
+++ b/source4/scripting/bin/samba_downgrade_db
@@ -0,0 +1,135 @@
+#!/usr/bin/python3
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2019
+#
+# Downgrade a database from 4.11 format to 4.7 format. 4.7 Format will
+# run on any version of Samba AD, and Samba will repack/reconfigure the
+# database if necessary.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import optparse
+import sys
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+
+import samba
+import ldb
+import urllib
+import os
+from samba import getopt as options
+from samba.samdb import SamDB
+from samba.dbchecker import dbcheck
+from samba.credentials import Credentials
+parser = optparse.OptionParser("samba_downgrade_db")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(options.VersionOptions(parser))
+parser.add_option("-H", "--URL", help="LDB URL for database",
+ type=str, metavar="URL", dest="H")
+opts, args = parser.parse_args()
+
+if len(args) != 0:
+ parser.print_usage()
+ sys.exit(1)
+
+lp_ctx = sambaopts.get_loadparm()
+
+if opts.H is None:
+ url = lp_ctx.private_path("sam.ldb")
+else:
+ url = opts.H
+
+samdb = ldb.Ldb(url=url,
+ flags=ldb.FLG_DONT_CREATE_DB,
+ options=["modules:"])
+
+partitions = samdb.search(base="@PARTITION",
+ scope=ldb.SCOPE_BASE,
+ attrs=["backendStore", "partition"])
+
+backend = str(partitions[0].get('backendStore', 'tdb'))
+
+if backend == "mdb":
+ samdb = None
+ options = ["pack_format_override=%d" % ldb.PACKING_FORMAT]
+ # We can't remove GUID indexes from LMDB in case there are very
+ # long DNs, so we just move down the pack format, which also removes
+ # references to ORDERED_INTEGER in @ATTRIBUTES.
+
+ # Reopen the DB with pack_format_override set
+ samdb = SamDB(url=url,
+ flags=ldb.FLG_DONT_CREATE_DB,
+ lp=lp_ctx,
+ options=options)
+ samdb.transaction_start()
+ samdb.transaction_commit()
+ print("Your database has been downgraded to LDB pack format version %0x (v1)." % ldb.PACKING_FORMAT)
+
+ print("NOTE: Any use of a Samba 4.11 tool that modifies the DB will "
+ "auto-upgrade back to pack format version %0x (v2)" %
+ ldb.PACKING_FORMAT_V2)
+ exit(0);
+
+# This is needed to force the @ATTRIBUTES and @INDEXLIST to be correct
+lp_ctx.set("dsdb:guid index", "false")
+
+modmsg = ldb.Message()
+modmsg.dn = ldb.Dn(samdb, '@INDEXLIST')
+modmsg.add(ldb.MessageElement(
+ elements=[],
+ flags=ldb.FLAG_MOD_REPLACE,
+ name='@IDXGUID'))
+modmsg.add(ldb.MessageElement(
+ elements=[],
+ flags=ldb.FLAG_MOD_REPLACE,
+ name='@IDX_DN_GUID'))
+
+samdb.transaction_start()
+samdb.modify(modmsg)
+
+privatedir = os.path.dirname(url)
+
+dbs = []
+for part in partitions[0]['partition']:
+ dbname = str(part).split(":")[1]
+ dbpath = os.path.join(privatedir, dbname)
+ if os.path.isfile(dbpath):
+ dbpath = "ldb://" + dbpath
+ db = ldb.Ldb(url=dbpath,
+ options=["modules:"],
+ flags=ldb.FLG_DONT_CREATE_DB)
+ db.transaction_start()
+ db.modify(modmsg)
+ dbs.append(db)
+
+for db in dbs:
+ db.transaction_commit()
+
+samdb.transaction_commit()
+
+print("Re-opening with the full DB stack")
+samdb = SamDB(url=url,
+ flags=ldb.FLG_DONT_CREATE_DB,
+ lp=lp_ctx)
+print("Re-triggering another re-index")
+chk = dbcheck(samdb)
+
+chk.reindex_database()
+
+print("Your database has been downgraded to DN-based index values.")
+
+print("NOTE: Any use of a Samba 4.8 or later tool including ldbsearch will "
+ "auto-upgrade back to GUID index mode")
diff --git a/source4/scripting/bin/samba_kcc b/source4/scripting/bin/samba_kcc
new file mode 100755
index 0000000..67d801e
--- /dev/null
+++ b/source4/scripting/bin/samba_kcc
@@ -0,0 +1,345 @@
+#!/usr/bin/env python3
+#
+# Compute our KCC topology
+#
+# Copyright (C) Dave Craft 2011
+# Copyright (C) Andrew Bartlett 2015
+#
+# Andrew Bartlett's alleged work performed by his underlings Douglas
+# Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import random
+
+# ensure we get messages out immediately, so they get in the samba logs,
+# and don't get swallowed by a timeout
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+# forcing GMT avoids a problem in some timezones with kerberos. Both MIT
+# heimdal can get mutual authentication errors due to the 24 second difference
+# between UTC and GMT when using some zone files (eg. the PDT zone from
+# the US)
+os.environ["TZ"] = "GMT"
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import optparse
+import time
+
+from samba import getopt as options
+
+from samba.kcc.graph_utils import verify_and_dot, list_verify_tests
+from samba.kcc.graph_utils import GraphError
+
+import logging
+from samba.kcc.debug import logger, DEBUG, DEBUG_FN
+from samba.kcc import KCC
+
+# If DEFAULT_RNG_SEED is None, /dev/urandom or system time is used.
+DEFAULT_RNG_SEED = None
+
+
+def test_all_reps_from(kcc, dburl, lp, creds, unix_now, rng_seed=None,
+ ldif_file=None):
+ """Run the KCC from all DSAs in read-only mode
+
+ The behaviour depends on the global opts variable which contains
+ command line variables. Usually you will want to run it with
+ opt.dot_file_dir set (via --dot-file-dir) to see the graphs that
+ would be created from each DC.
+
+ :param lp: a loadparm object.
+ :param creds: a Credentials object.
+ :param unix_now: the unix epoch time as an integer
+ :param rng_seed: a seed for the random number generator
+ :return None:
+ """
+ # This implies readonly and attempt_live_connections
+ dsas = kcc.list_dsas()
+ samdb = kcc.samdb
+ needed_parts = {}
+ current_parts = {}
+
+ guid_to_dnstr = {}
+ for site in kcc.site_table.values():
+ guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
+ for dnstr, dsa in site.dsa_table.items())
+
+ dot_edges = []
+ dot_vertices = []
+ colours = []
+ vertex_colours = []
+
+ for dsa_dn in dsas:
+ if rng_seed is not None:
+ random.seed(rng_seed)
+ kcc = KCC(unix_now, readonly=True,
+ verify=opts.verify, debug=opts.debug,
+ dot_file_dir=opts.dot_file_dir)
+ if ldif_file is not None:
+ try:
+ # The dburl in this case is a temporary database.
+ # Its non-existence is ensured at the script startup.
+ # If it exists, it is from a previous iteration of
+ # this loop -- unless we're in an unfortunate race.
+ # Because this database is temporary, it lacks some
+ # detail and needs to be re-created anew to set the
+ # local dsa.
+ os.unlink(dburl)
+ except OSError:
+ pass
+
+ kcc.import_ldif(dburl, lp, ldif_file, dsa_dn)
+ else:
+ kcc.samdb = samdb
+ kcc.run(dburl, lp, creds, forced_local_dsa=dsa_dn,
+ forget_local_links=opts.forget_local_links,
+ forget_intersite_links=opts.forget_intersite_links,
+ attempt_live_connections=opts.attempt_live_connections)
+
+ current, needed = kcc.my_dsa.get_rep_tables()
+
+ for dsa in kcc.my_site.dsa_table.values():
+ if dsa is kcc.my_dsa:
+ continue
+ kcc.translate_ntdsconn(dsa)
+ c, n = dsa.get_rep_tables()
+ current.update(c)
+ needed.update(n)
+
+ for name, rep_table, rep_parts in (
+ ('needed', needed, needed_parts),
+ ('current', current, current_parts)):
+ for part, nc_rep in rep_table.items():
+ edges = rep_parts.setdefault(part, [])
+ for reps_from in nc_rep.rep_repsFrom:
+ source = guid_to_dnstr[str(reps_from.source_dsa_obj_guid)]
+ dest = guid_to_dnstr[str(nc_rep.rep_dsa_guid)]
+ edges.append((source, dest))
+
+ for site in kcc.site_table.values():
+ for dsa in site.dsa_table.values():
+ if dsa.is_ro():
+ vertex_colours.append('#cc0000')
+ else:
+ vertex_colours.append('#0000cc')
+ dot_vertices.append(dsa.dsa_dnstr)
+ if dsa.connect_table:
+ DEBUG_FN("DSA %s %s connections:\n%s" %
+ (dsa.dsa_dnstr, len(dsa.connect_table),
+ [x.from_dnstr for x in
+ dsa.connect_table.values()]))
+ for con in dsa.connect_table.values():
+ if con.is_rodc_topology():
+ colours.append('red')
+ else:
+ colours.append('blue')
+ dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
+
+ verify_and_dot('all-dsa-connections', dot_edges, vertices=dot_vertices,
+ label="all dsa NTDSConnections", properties=(),
+ debug=DEBUG, verify=opts.verify,
+ dot_file_dir=opts.dot_file_dir,
+ directed=True, edge_colors=colours,
+ vertex_colors=vertex_colours)
+
+ for name, rep_parts in (('needed', needed_parts),
+ ('current', current_parts)):
+ for part, edges in rep_parts.items():
+ verify_and_dot('all-repsFrom_%s__%s' % (name, part), edges,
+ directed=True, label=part,
+ properties=(), debug=DEBUG, verify=opts.verify,
+ dot_file_dir=opts.dot_file_dir)
+
+##################################################
+# samba_kcc entry point
+##################################################
+
+
+parser = optparse.OptionParser("samba_kcc [options]")
+sambaopts = options.SambaOptions(parser)
+credopts = options.CredentialsOptions(parser)
+
+parser.add_option_group(sambaopts)
+parser.add_option_group(credopts)
+parser.add_option_group(options.VersionOptions(parser))
+
+parser.add_option("--readonly", default=False,
+ help="compute topology but do not update database",
+ action="store_true")
+
+parser.add_option("--debug",
+ help="debug output",
+ action="store_true")
+
+parser.add_option("--verify",
+ help="verify that assorted invariants are kept",
+ action="store_true")
+
+parser.add_option("--list-verify-tests",
+ help=("list what verification actions are available "
+ "and do nothing else"),
+ action="store_true")
+
+parser.add_option("--dot-file-dir", default=None,
+ help="Write Graphviz .dot files to this directory")
+
+parser.add_option("--seed",
+ help="random number seed",
+ type=int, default=DEFAULT_RNG_SEED)
+
+parser.add_option("--importldif",
+ help="import topology ldif file",
+ type=str, metavar="<file>")
+
+parser.add_option("--exportldif",
+ help="export topology ldif file",
+ type=str, metavar="<file>")
+
+parser.add_option("-H", "--URL",
+ help="LDB URL for database or target server",
+ type=str, metavar="<URL>", dest="dburl")
+
+parser.add_option("--tmpdb",
+ help="schemaless database file to create for ldif import",
+ type=str, metavar="<file>")
+
+parser.add_option("--now",
+ help=("assume current time is this ('YYYYmmddHHMMSS[tz]',"
+ " default: system time)"),
+ type=str, metavar="<date>")
+
+parser.add_option("--forced-local-dsa",
+ help="run calculations assuming the DSA is this DN",
+ type=str, metavar="<DSA>")
+
+parser.add_option("--attempt-live-connections", default=False,
+ help="Attempt to connect to other DSAs to test links",
+ action="store_true")
+
+parser.add_option("--list-valid-dsas", default=False,
+ help=("Print a list of DSA dnstrs that could be"
+ " used in --forced-local-dsa"),
+ action="store_true")
+
+parser.add_option("--test-all-reps-from", default=False,
+ help="Create and verify a graph of reps-from for every DSA",
+ action="store_true")
+
+parser.add_option("--forget-local-links", default=False,
+ help="pretend not to know the existing local topology",
+ action="store_true")
+
+parser.add_option("--forget-intersite-links", default=False,
+ help="pretend not to know the existing intersite topology",
+ action="store_true")
+
+opts, args = parser.parse_args()
+
+
+if opts.list_verify_tests:
+ list_verify_tests()
+ sys.exit(0)
+
+if opts.test_all_reps_from:
+ opts.readonly = True
+
+if opts.debug:
+ logger.setLevel(logging.DEBUG)
+elif opts.readonly:
+ logger.setLevel(logging.INFO)
+else:
+ logger.setLevel(logging.WARNING)
+
+random.seed(opts.seed)
+
+if opts.now:
+ for timeformat in ("%Y%m%d%H%M%S%Z", "%Y%m%d%H%M%S"):
+ try:
+ now_tuple = time.strptime(opts.now, timeformat)
+ break
+ except ValueError:
+ pass
+ else:
+ # else happens if break doesn't --> no match
+ print("could not parse time '%s'" % (opts.now), file = sys.stderr)
+ sys.exit(1)
+ unix_now = int(time.mktime(now_tuple))
+else:
+ unix_now = int(time.time())
+
+lp = sambaopts.get_loadparm()
+# only log warnings/errors by default, unless the user has specified otherwise
+if opts.debug is None:
+ lp.set('log level', '1')
+
+creds = credopts.get_credentials(lp, fallback_machine=True)
+
+if opts.dburl is None:
+ if opts.importldif:
+ opts.dburl = opts.tmpdb
+ else:
+ opts.dburl = lp.samdb_url()
+elif opts.importldif:
+ logger.error("Don't use -H/--URL with --importldif, use --tmpdb instead")
+ sys.exit(1)
+
+# Instantiate Knowledge Consistency Checker and perform run
+kcc = KCC(unix_now, readonly=opts.readonly, verify=opts.verify,
+ debug=opts.debug, dot_file_dir=opts.dot_file_dir)
+
+if opts.exportldif:
+ rc = kcc.export_ldif(opts.dburl, lp, creds, opts.exportldif)
+ sys.exit(rc)
+
+if opts.importldif:
+ if opts.tmpdb is None or opts.tmpdb.startswith('ldap'):
+ logger.error("Specify a target temp database file with --tmpdb option")
+ sys.exit(1)
+ if os.path.exists(opts.tmpdb):
+ logger.error("The temp database file (%s) specified with --tmpdb "
+ "already exists. We refuse to clobber it." % opts.tmpdb)
+ sys.exit(1)
+
+ rc = kcc.import_ldif(opts.tmpdb, lp, opts.importldif,
+ forced_local_dsa=opts.forced_local_dsa)
+ if rc != 0:
+ sys.exit(rc)
+
+
+kcc.load_samdb(opts.dburl, lp, creds, force=False)
+
+if opts.test_all_reps_from:
+ test_all_reps_from(kcc, opts.dburl, lp, creds, unix_now,
+ rng_seed=opts.seed,
+ ldif_file=opts.importldif)
+ sys.exit()
+
+if opts.list_valid_dsas:
+ print('\n'.join(kcc.list_dsas()))
+ sys.exit()
+
+try:
+ rc = kcc.run(opts.dburl, lp, creds, opts.forced_local_dsa,
+ opts.forget_local_links, opts.forget_intersite_links,
+ attempt_live_connections=opts.attempt_live_connections)
+ sys.exit(rc)
+
+except GraphError as e:
+ print( e)
+ sys.exit(1)
diff --git a/source4/scripting/bin/samba_spnupdate b/source4/scripting/bin/samba_spnupdate
new file mode 100755
index 0000000..84ff771
--- /dev/null
+++ b/source4/scripting/bin/samba_spnupdate
@@ -0,0 +1,254 @@
+#!/usr/bin/env python3
+#
+# update our servicePrincipalName names from spn_update_list
+#
+# Copyright (C) Andrew Tridgell 2010
+# Copyright (C) Matthieu Patou <mat@matws.net> 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import os, sys, re
+
+# ensure we get messages out immediately, so they get in the samba logs,
+# and don't get swallowed by a timeout
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+# forcing GMT avoids a problem in some timezones with kerberos. Both MIT
+# heimdal can get mutual authentication errors due to the 24 second difference
+# between UTC and GMT when using some zone files (eg. the PDT zone from
+# the US)
+os.environ["TZ"] = "GMT"
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import samba, ldb
+import optparse
+from samba import Ldb
+from samba import getopt as options
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.credentials import Credentials, DONT_USE_KERBEROS
+from samba.common import get_string
+
+parser = optparse.OptionParser("samba_spnupdate")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+parser.add_option("--verbose", action="store_true")
+
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+
+ccachename = None
+
+opts, args = parser.parse_args()
+
+if len(args) != 0:
+ parser.print_usage()
+ sys.exit(1)
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+domain = lp.get("realm")
+host = lp.get("netbios name")
+
+
+# get the list of substitution vars
+def get_subst_vars(samdb):
+ global lp
+ vars = {}
+
+ vars['DNSDOMAIN'] = samdb.domain_dns_name()
+ vars['DNSFOREST'] = samdb.forest_dns_name()
+ vars['HOSTNAME'] = samdb.host_dns_name()
+ vars['NETBIOSNAME'] = lp.get('netbios name').upper()
+ vars['WORKGROUP'] = lp.get('workgroup')
+ vars['NTDSGUID'] = samdb.get_ntds_GUID()
+ res = samdb.search(base=samdb.get_default_basedn(), scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
+ guid = samdb.schema_format_value("objectGUID", res[0]['objectGUID'][0])
+ vars['DOMAINGUID'] = get_string(guid)
+ return vars
+
+try:
+ private_dir = lp.get("private dir")
+ secrets_path = os.path.join(private_dir, "secrets.ldb")
+
+ secrets_db = Ldb(url=secrets_path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ res = secrets_db.search(base=None,
+ expression="(&(objectclass=ldapSecret)(cn=SAMDB Credentials))",
+ attrs=["samAccountName", "secret"])
+
+ if len(res) == 1:
+ credentials = Credentials()
+ credentials.set_kerberos_state(DONT_USE_KERBEROS)
+
+ if "samAccountName" in res[0]:
+ credentials.set_username(res[0]["samAccountName"][0])
+
+ if "secret" in res[0]:
+ credentials.set_password(res[0]["secret"][0])
+
+ else:
+ credentials = None
+
+ samdb = SamDB(url=lp.samdb_url(), session_info=system_session(), credentials=credentials, lp=lp)
+except ldb.LdbError as e:
+ (num, msg) = e.args
+ print("Unable to open sam database %s : %s" % (lp.samdb_url(), msg))
+ sys.exit(1)
+
+
+# get the substitution dictionary
+sub_vars = get_subst_vars(samdb)
+
+# get the list of SPN entries we should have
+spn_update_list = lp.private_path('spn_update_list')
+
+file = open(spn_update_list, "r")
+
+spn_list = []
+
+has_forest_dns = False
+has_domain_dns = False
+# check if we "are DNS server"
+res = samdb.search(base=samdb.get_config_basedn(),
+ expression='(objectguid=%s)' % sub_vars['NTDSGUID'],
+ attrs=["msDS-hasMasterNCs"])
+
+basedn = str(samdb.get_default_basedn())
+if len(res) == 1:
+ if "msDS-hasMasterNCs" in res[0]:
+ for e in res[0]["msDS-hasMasterNCs"]:
+ if str(e) == "DC=DomainDnsZones,%s" % basedn:
+ has_domain_dns = True
+ if str(e) == "DC=ForestDnsZones,%s" % basedn:
+ has_forest_dns = True
+
+
+# build the spn list
+for line in file:
+ line = line.strip()
+ if line == '' or line[0] == "#":
+ continue
+ if re.match(r".*/DomainDnsZones\..*", line) and not has_domain_dns:
+ continue
+ if re.match(r".*/ForestDnsZones\..*", line) and not has_forest_dns:
+ continue
+ line = samba.substitute_var(line, sub_vars)
+ spn_list.append(line)
+
+# get the current list of SPNs in our sam
+res = samdb.search(base=samdb.get_default_basedn(),
+ expression='(&(objectClass=computer)(samaccountname=%s$))' % sub_vars['NETBIOSNAME'],
+ attrs=["servicePrincipalName"])
+if not res or len(res) != 1:
+ print("Failed to find computer object for %s$" % sub_vars['NETBIOSNAME'])
+ sys.exit(1)
+
+machine_dn = res[0]["dn"]
+
+old_spns = []
+if "servicePrincipalName" in res[0]:
+ for s in res[0]["servicePrincipalName"]:
+ old_spns.append(str(s))
+
+if opts.verbose:
+ print("Existing SPNs: %s" % old_spns)
+
+add_list = []
+
+# work out what needs to be added
+for s in spn_list:
+ in_list = False
+ for s2 in old_spns:
+ if s2.upper() == s.upper():
+ in_list = True
+ break
+ if not in_list:
+ add_list.append(s)
+
+if opts.verbose:
+ print("New SPNs: %s" % add_list)
+
+if add_list == []:
+ if opts.verbose:
+ print("Nothing to add")
+ sys.exit(0)
+
+def local_update(add_list):
+ '''store locally'''
+ global res
+ msg = ldb.Message()
+ msg.dn = res[0]['dn']
+ msg[""] = ldb.MessageElement(add_list,
+ ldb.FLAG_MOD_ADD, "servicePrincipalName")
+ res = samdb.modify(msg)
+
+def call_rodc_update(d):
+ '''RODCs need to use the writeSPN DRS call'''
+ global lp, sub_vars
+ from samba import drs_utils
+ from samba.dcerpc import drsuapi, nbt
+ from samba.net import Net
+
+ if opts.verbose:
+ print("Using RODC SPN update")
+
+ creds = credopts.get_credentials(lp)
+ creds.set_machine_account(lp)
+
+ net = Net(creds=creds, lp=lp)
+ try:
+ cldap_ret = net.finddc(domain=domain, flags=nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE)
+ except Exception as reason:
+ print("Unable to find writeable DC for domain '%s' to send DRS writeSPN to : %s" % (domain, reason))
+ sys.exit(1)
+ server = cldap_ret.pdc_dns_name
+ try:
+ binding_options = "seal"
+ if lp.log_level() >= 5:
+ binding_options += ",print"
+ drs = drsuapi.drsuapi('ncacn_ip_tcp:%s[%s]' % (server, binding_options), lp, creds)
+ (drs_handle, supported_extensions) = drs_utils.drs_DsBind(drs)
+ except Exception as reason:
+ print("Unable to connect to DC '%s' for domain '%s' : %s" % (server, domain, reason))
+ sys.exit(1)
+ req1 = drsuapi.DsWriteAccountSpnRequest1()
+ req1.operation = drsuapi.DRSUAPI_DS_SPN_OPERATION_ADD
+ req1.object_dn = str(machine_dn)
+ req1.count = 0
+ spn_names = []
+ for n in add_list:
+ if n.find('E3514235-4B06-11D1-AB04-00C04FC2DCD2') != -1:
+ # this one isn't allowed for RODCs, but we don't know why yet
+ continue
+ ns = drsuapi.DsNameString()
+ ns.str = n
+ spn_names.append(ns)
+ req1.count = req1.count + 1
+ if spn_names == []:
+ return
+ req1.spn_names = spn_names
+ (level, res) = drs.DsWriteAccountSpn(drs_handle, 1, req1)
+ if (res.status != (0, 'WERR_OK')):
+ print("WriteAccountSpn has failed with error %s" % str(res.status))
+
+if samdb.am_rodc():
+ call_rodc_update(add_list)
+else:
+ local_update(add_list)
diff --git a/source4/scripting/bin/samba_upgradedns b/source4/scripting/bin/samba_upgradedns
new file mode 100755
index 0000000..afc5807
--- /dev/null
+++ b/source4/scripting/bin/samba_upgradedns
@@ -0,0 +1,589 @@
+#!/usr/bin/env python3
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2012
+#
+# Upgrade DNS provision from BIND9_FLATFILE to BIND9_DLZ or SAMBA_INTERNAL
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import os
+import errno
+import optparse
+import logging
+import grp
+from base64 import b64encode
+import shlex
+
+sys.path.insert(0, "bin/python")
+
+import ldb
+import samba
+from samba import param
+from samba.auth import system_session
+from samba.ndr import (
+ ndr_pack,
+ ndr_unpack )
+import samba.getopt as options
+from samba.upgradehelpers import (
+ get_paths,
+ get_ldbs )
+from samba.dsdb import DS_DOMAIN_FUNCTION_2003
+from samba.provision import (
+ find_provision_key_parameters,
+ interface_ips_v4,
+ interface_ips_v6 )
+from samba.provision.common import (
+ setup_path,
+ setup_add_ldif,
+ FILL_FULL)
+from samba.provision.sambadns import (
+ ARecord,
+ AAAARecord,
+ CNAMERecord,
+ NSRecord,
+ SOARecord,
+ SRVRecord,
+ TXTRecord,
+ get_dnsadmins_sid,
+ add_dns_accounts,
+ create_dns_partitions,
+ fill_dns_data_partitions,
+ create_dns_dir,
+ secretsdb_setup_dns,
+ create_dns_dir_keytab_link,
+ create_samdb_copy,
+ create_named_conf,
+ create_named_txt )
+from samba.dcerpc import security
+
+import dns.zone, dns.rdatatype
+
+__docformat__ = 'restructuredText'
+
+
+def find_bind_gid():
+ """Find system group id for bind9
+ """
+ for name in ["bind", "named"]:
+ try:
+ return grp.getgrnam(name)[2]
+ except KeyError:
+ pass
+ return None
+
+
+def convert_dns_rdata(rdata, serial=1):
+ """Convert resource records in dnsRecord format
+ """
+ if rdata.rdtype == dns.rdatatype.A:
+ rec = ARecord(rdata.address, serial=serial)
+ elif rdata.rdtype == dns.rdatatype.AAAA:
+ rec = AAAARecord(rdata.address, serial=serial)
+ elif rdata.rdtype == dns.rdatatype.CNAME:
+ rec = CNAMERecord(rdata.target.to_text(), serial=serial)
+ elif rdata.rdtype == dns.rdatatype.NS:
+ rec = NSRecord(rdata.target.to_text(), serial=serial)
+ elif rdata.rdtype == dns.rdatatype.SRV:
+ rec = SRVRecord(rdata.target.to_text(), int(rdata.port),
+ priority=int(rdata.priority), weight=int(rdata.weight),
+ serial=serial)
+ elif rdata.rdtype == dns.rdatatype.TXT:
+ slist = shlex.split(rdata.to_text())
+ rec = TXTRecord(slist, serial=serial)
+ elif rdata.rdtype == dns.rdatatype.SOA:
+ rec = SOARecord(rdata.mname.to_text(), rdata.rname.to_text(),
+ serial=int(rdata.serial),
+ refresh=int(rdata.refresh), retry=int(rdata.retry),
+ expire=int(rdata.expire), minimum=int(rdata.minimum))
+ else:
+ rec = None
+ return rec
+
+
+def import_zone_data(samdb, logger, zone, serial, domaindn, forestdn,
+ dnsdomain, dnsforest):
+ """Insert zone data in DNS partitions
+ """
+ labels = dnsdomain.split('.')
+ labels.append('')
+ domain_root = dns.name.Name(labels)
+ domain_prefix = "DC=%s,CN=MicrosoftDNS,DC=DomainDnsZones,%s" % (dnsdomain,
+ domaindn)
+
+ tmp = "_msdcs.%s" % dnsforest
+ labels = tmp.split('.')
+ labels.append('')
+ forest_root = dns.name.Name(labels)
+ dnsmsdcs = "_msdcs.%s" % dnsforest
+ forest_prefix = "DC=%s,CN=MicrosoftDNS,DC=ForestDnsZones,%s" % (dnsmsdcs,
+ forestdn)
+
+ # Extract @ record
+ at_record = zone.get_node(domain_root)
+ zone.delete_node(domain_root)
+
+ # SOA record
+ rdset = at_record.get_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+ soa_rec = ndr_pack(convert_dns_rdata(rdset[0]))
+ at_record.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.SOA)
+
+ # NS record
+ rdset = at_record.get_rdataset(dns.rdataclass.IN, dns.rdatatype.NS)
+ ns_rec = ndr_pack(convert_dns_rdata(rdset[0]))
+ at_record.delete_rdataset(dns.rdataclass.IN, dns.rdatatype.NS)
+
+ # A/AAAA records
+ ip_recs = []
+ for rdset in at_record:
+ for r in rdset:
+ rec = convert_dns_rdata(r)
+ ip_recs.append(ndr_pack(rec))
+
+ # Add @ record for domain
+ dns_rec = [soa_rec, ns_rec] + ip_recs
+ msg = ldb.Message(ldb.Dn(samdb, 'DC=@,%s' % domain_prefix))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(dns_rec, ldb.FLAG_MOD_ADD,
+ "dnsRecord")
+ try:
+ samdb.add(msg)
+ except Exception:
+ logger.error("Failed to add @ record for domain")
+ raise
+ logger.debug("Added @ record for domain")
+
+ # Add @ record for forest
+ dns_rec = [soa_rec, ns_rec]
+ msg = ldb.Message(ldb.Dn(samdb, 'DC=@,%s' % forest_prefix))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(dns_rec, ldb.FLAG_MOD_ADD,
+ "dnsRecord")
+ try:
+ samdb.add(msg)
+ except Exception:
+ logger.error("Failed to add @ record for forest")
+ raise
+ logger.debug("Added @ record for forest")
+
+ # Add remaining records in domain and forest
+ for node in zone.nodes:
+ name = node.relativize(forest_root).to_text()
+ if name == node.to_text():
+ name = node.relativize(domain_root).to_text()
+ dn = "DC=%s,%s" % (name, domain_prefix)
+ fqdn = "%s.%s" % (name, dnsdomain)
+ else:
+ dn = "DC=%s,%s" % (name, forest_prefix)
+ fqdn = "%s.%s" % (name, dnsmsdcs)
+
+ dns_rec = []
+ for rdataset in zone.nodes[node]:
+ for rdata in rdataset:
+ rec = convert_dns_rdata(rdata, serial)
+ if not rec:
+ logger.warn("Unsupported record type (%s) for %s, ignoring" %
+ dns.rdatatype.to_text(rdata.rdatatype), name)
+ else:
+ dns_rec.append(ndr_pack(rec))
+
+ msg = ldb.Message(ldb.Dn(samdb, dn))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(dns_rec, ldb.FLAG_MOD_ADD,
+ "dnsRecord")
+ try:
+ samdb.add(msg)
+ except Exception:
+ logger.error("Failed to add DNS record %s" % (fqdn))
+ raise
+ logger.debug("Added DNS record %s" % (fqdn))
+
+def cleanup_remove_file(file_path):
+ try:
+ os.remove(file_path)
+ except OSError as e:
+ if e.errno not in [errno.EEXIST, errno.ENOENT]:
+ pass
+ else:
+ logger.debug("Could not remove %s: %s" % (file_path, e.strerror))
+
+def cleanup_remove_dir(dir_path):
+ try:
+ for root, dirs, files in os.walk(dir_path, topdown=False):
+ for name in files:
+ os.remove(os.path.join(root, name))
+ for name in dirs:
+ os.rmdir(os.path.join(root, name))
+ os.rmdir(dir_path)
+ except OSError as e:
+ if e.errno not in [errno.EEXIST, errno.ENOENT]:
+ pass
+ else:
+ logger.debug("Could not delete dir %s: %s" % (dir_path, e.strerror))
+
+def cleanup_obsolete_dns_files(paths):
+ cleanup_remove_file(os.path.join(paths.private_dir, "named.conf"))
+ cleanup_remove_file(os.path.join(paths.private_dir, "named.conf.update"))
+ cleanup_remove_file(os.path.join(paths.private_dir, "named.txt"))
+
+ cleanup_remove_dir(os.path.join(paths.private_dir, "dns"))
+
+
+# dnsprovision creates application partitions for AD based DNS mainly if the existing
+# provision was created using earlier snapshots of samba4 which did not have support
+# for DNS partitions
+
+if __name__ == '__main__':
+
+ # Setup command line parser
+ parser = optparse.OptionParser("samba_upgradedns [options]")
+ sambaopts = options.SambaOptions(parser)
+ credopts = options.CredentialsOptions(parser)
+
+ parser.add_option_group(options.VersionOptions(parser))
+ parser.add_option_group(sambaopts)
+ parser.add_option_group(credopts)
+
+ parser.add_option("--dns-backend", type="choice", metavar="<BIND9_DLZ|SAMBA_INTERNAL>",
+ choices=["SAMBA_INTERNAL", "BIND9_DLZ"], default="SAMBA_INTERNAL",
+ help="The DNS server backend, default SAMBA_INTERNAL")
+ parser.add_option("--migrate", type="choice", metavar="<yes|no>",
+ choices=["yes","no"], default="yes",
+ help="Migrate existing zone data, default yes")
+ parser.add_option("--verbose", help="Be verbose", action="store_true")
+
+ opts = parser.parse_args()[0]
+
+ if opts.dns_backend is None:
+ opts.dns_backend = 'SAMBA_INTERNAL'
+
+ if opts.migrate:
+ autofill = False
+ else:
+ autofill = True
+
+ # Set up logger
+ logger = logging.getLogger("upgradedns")
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+ logger.setLevel(logging.INFO)
+ if opts.verbose:
+ logger.setLevel(logging.DEBUG)
+
+ lp = sambaopts.get_loadparm()
+ lp.load(lp.configfile)
+ creds = credopts.get_credentials(lp)
+
+ logger.info("Reading domain information")
+ paths = get_paths(param, smbconf=lp.configfile)
+ paths.bind_gid = find_bind_gid()
+ ldbs = get_ldbs(paths, creds, system_session(), lp)
+ names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
+ paths, lp.configfile, lp)
+
+ if names.domainlevel < DS_DOMAIN_FUNCTION_2003:
+ logger.error("Cannot create AD based DNS for OS level < 2003")
+ sys.exit(1)
+
+ domaindn = names.domaindn
+ forestdn = names.rootdn
+
+ dnsdomain = names.dnsdomain.lower()
+ dnsforest = dnsdomain
+
+ site = names.sitename
+ hostname = names.hostname
+ dnsname = '%s.%s' % (hostname, dnsdomain)
+
+ domainsid = names.domainsid
+ domainguid = names.domainguid
+ ntdsguid = names.ntdsguid
+
+ # Check for DNS accounts and create them if required
+ try:
+ msg = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT,
+ expression='(sAMAccountName=DnsAdmins)',
+ attrs=['objectSid'])
+ dnsadmins_sid = ndr_unpack(security.dom_sid, msg[0]['objectSid'][0])
+ except IndexError:
+ logger.info("Adding DNS accounts")
+ add_dns_accounts(ldbs.sam, domaindn)
+ dnsadmins_sid = get_dnsadmins_sid(ldbs.sam, domaindn)
+ else:
+ logger.info("DNS accounts already exist")
+
+ # Import dns records from zone file
+ if os.path.exists(paths.dns):
+ logger.info("Reading records from zone file %s" % paths.dns)
+ try:
+ zone = dns.zone.from_file(paths.dns, relativize=False)
+ rrset = zone.get_rdataset("%s." % dnsdomain, dns.rdatatype.SOA)
+ serial = int(rrset[0].serial)
+ except Exception as e:
+ logger.warn("Error parsing DNS data from '%s' (%s)" % (paths.dns, str(e)))
+ autofill = True
+ else:
+ logger.info("No zone file %s (normal)" % paths.dns)
+ autofill = True
+
+ # Create DNS partitions if missing and fill DNS information
+ try:
+ expression = '(|(dnsRoot=DomainDnsZones.%s)(dnsRoot=ForestDnsZones.%s))' % \
+ (dnsdomain, dnsforest)
+ msg = ldbs.sam.search(base=names.configdn, scope=ldb.SCOPE_DEFAULT,
+ expression=expression, attrs=['nCName'])
+ ncname = msg[0]['nCName'][0]
+ except IndexError:
+ logger.info("Creating DNS partitions")
+
+ logger.info("Looking up IPv4 addresses")
+ hostip = interface_ips_v4(lp)
+ try:
+ hostip.remove('127.0.0.1')
+ except ValueError:
+ pass
+ if not hostip:
+ logger.error("No IPv4 addresses found")
+ sys.exit(1)
+ else:
+ hostip = hostip[0]
+ logger.debug("IPv4 addresses: %s" % hostip)
+
+ logger.info("Looking up IPv6 addresses")
+ hostip6 = interface_ips_v6(lp)
+ if not hostip6:
+ hostip6 = None
+ else:
+ hostip6 = hostip6[0]
+ logger.debug("IPv6 addresses: %s" % hostip6)
+
+ create_dns_partitions(ldbs.sam, domainsid, names, domaindn, forestdn,
+ dnsadmins_sid, FILL_FULL)
+
+ logger.info("Populating DNS partitions")
+ if autofill:
+ logger.warn("DNS records will be automatically created")
+
+ fill_dns_data_partitions(ldbs.sam, domainsid, site, domaindn, forestdn,
+ dnsdomain, dnsforest, hostname, hostip, hostip6,
+ domainguid, ntdsguid, dnsadmins_sid,
+ autofill=autofill)
+
+ if not autofill:
+ logger.info("Importing records from zone file")
+ import_zone_data(ldbs.sam, logger, zone, serial, domaindn, forestdn,
+ dnsdomain, dnsforest)
+ else:
+ logger.info("DNS partitions already exist")
+
+ # Mark that we are hosting DNS partitions
+ try:
+ dns_nclist = [ 'DC=DomainDnsZones,%s' % domaindn,
+ 'DC=ForestDnsZones,%s' % forestdn ]
+
+ msgs = ldbs.sam.search(base=names.serverdn, scope=ldb.SCOPE_DEFAULT,
+ expression='(objectclass=nTDSDSa)',
+ attrs=['hasPartialReplicaNCs',
+ 'msDS-hasMasterNCs'])
+ msg = msgs[0]
+
+ master_nclist = []
+ ncs = msg.get("msDS-hasMasterNCs")
+ if ncs:
+ for nc in ncs:
+ master_nclist.append(str(nc))
+
+ partial_nclist = []
+ ncs = msg.get("hasPartialReplicaNCs")
+ if ncs:
+ for nc in ncs:
+ partial_nclist.append(str(nc))
+
+ modified_master = False
+ modified_partial = False
+
+ for nc in dns_nclist:
+ if nc not in master_nclist:
+ master_nclist.append(nc)
+ modified_master = True
+ if nc in partial_nclist:
+ partial_nclist.remove(nc)
+ modified_partial = True
+
+ if modified_master or modified_partial:
+ logger.debug("Updating msDS-hasMasterNCs and hasPartialReplicaNCs attributes")
+ m = ldb.Message()
+ m.dn = msg.dn
+ if modified_master:
+ m["msDS-hasMasterNCs"] = ldb.MessageElement(master_nclist,
+ ldb.FLAG_MOD_REPLACE,
+ "msDS-hasMasterNCs")
+ if modified_partial:
+ if partial_nclist:
+ m["hasPartialReplicaNCs"] = ldb.MessageElement(partial_nclist,
+ ldb.FLAG_MOD_REPLACE,
+ "hasPartialReplicaNCs")
+ else:
+ m["hasPartialReplicaNCs"] = ldb.MessageElement(ncs,
+ ldb.FLAG_MOD_DELETE,
+ "hasPartialReplicaNCs")
+ ldbs.sam.modify(m)
+ except Exception:
+ raise
+
+ # Special stuff for DLZ backend
+ if opts.dns_backend == "BIND9_DLZ":
+ config_migration = False
+
+ if (paths.private_dir != paths.binddns_dir and
+ os.path.isfile(os.path.join(paths.private_dir, "named.conf"))):
+ config_migration = True
+
+ # Check if dns-HOSTNAME account exists and create it if required
+ secrets_msgs = ldbs.secrets.search(expression='(samAccountName=dns-%s)' % hostname, attrs=['secret'])
+ msg = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT,
+ expression='(sAMAccountName=dns-%s)' % (hostname),
+ attrs=[])
+
+ if len(secrets_msgs) == 0 or len(msg) == 0:
+ logger.info("Adding dns-%s account" % hostname)
+
+ if len(secrets_msgs) == 1:
+ dn = secrets_msgs[0].dn
+ ldbs.secrets.delete(dn)
+
+ if len(msg) == 1:
+ dn = msg[0].dn
+ ldbs.sam.delete(dn)
+
+ dnspass = samba.generate_random_password(128, 255)
+ setup_add_ldif(ldbs.sam, setup_path("provision_dns_add_samba.ldif"), {
+ "DNSDOMAIN": dnsdomain,
+ "DOMAINDN": domaindn,
+ "DNSPASS_B64": b64encode(dnspass.encode('utf-16-le')).decode('utf8'),
+ "HOSTNAME" : hostname,
+ "DNSNAME" : dnsname }
+ )
+
+ res = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT,
+ expression='(sAMAccountName=dns-%s)' % (hostname),
+ attrs=["msDS-KeyVersionNumber"])
+ if "msDS-KeyVersionNumber" in res[0]:
+ dns_key_version_number = int(res[0]["msDS-KeyVersionNumber"][0])
+ else:
+ dns_key_version_number = None
+
+ secretsdb_setup_dns(ldbs.secrets, names,
+ paths.private_dir, paths.binddns_dir, realm=names.realm,
+ dnsdomain=names.dnsdomain,
+ dns_keytab_path=paths.dns_keytab, dnspass=dnspass,
+ key_version_number=dns_key_version_number)
+
+ else:
+ logger.info("dns-%s account already exists" % hostname)
+
+ if not os.path.exists(paths.binddns_dir):
+ # This directory won't exist if we're restoring from an offline backup.
+ os.mkdir(paths.binddns_dir, 0o770)
+
+ create_dns_dir_keytab_link(logger, paths)
+
+ # This forces a re-creation of dns directory and all the files within
+ # It's an overkill, but it's easier to re-create a samdb copy, rather
+ # than trying to fix a broken copy.
+ create_dns_dir(logger, paths)
+
+ # Setup a copy of SAM for BIND9
+ create_samdb_copy(ldbs.sam, logger, paths, names, domainsid,
+ domainguid)
+
+ create_named_conf(paths, names.realm, dnsdomain, opts.dns_backend, logger)
+
+ create_named_txt(paths.namedtxt, names.realm, dnsdomain, dnsname,
+ paths.binddns_dir, paths.dns_keytab)
+
+ cleanup_obsolete_dns_files(paths)
+
+ if config_migration:
+ logger.info("ATTENTION: The BIND configuration and keytab has been moved to: %s",
+ paths.binddns_dir)
+ logger.info(" Please update your BIND configuration accordingly.")
+ else:
+ logger.info("See %s for an example configuration include file for BIND", paths.namedconf)
+ logger.info("and %s for further documentation required for secure DNS "
+ "updates", paths.namedtxt)
+
+ elif opts.dns_backend == "SAMBA_INTERNAL":
+ # Make sure to remove everything from the bind-dns directory to avoid
+ # possible security issues with the named group having write access
+ # to all AD partitions
+ cleanup_remove_file(os.path.join(paths.binddns_dir, "dns.keytab"))
+ cleanup_remove_file(os.path.join(paths.binddns_dir, "named.conf"))
+ cleanup_remove_file(os.path.join(paths.binddns_dir, "named.conf.update"))
+ cleanup_remove_file(os.path.join(paths.binddns_dir, "named.txt"))
+
+ cleanup_remove_dir(os.path.dirname(paths.dns))
+
+ try:
+ os.chmod(paths.private_dir, 0o700)
+ os.chown(paths.private_dir, -1, 0)
+ except:
+ logger.warn("Failed to restore owner and permissions for %s",
+ (paths.private_dir))
+
+ # Check if dns-HOSTNAME account exists and delete it if required
+ try:
+ dn_str = 'samAccountName=dns-%s,CN=Principals' % hostname
+ msg = ldbs.secrets.search(expression='(dn=%s)' % dn_str, attrs=[])
+ dn = msg[0].dn
+ except IndexError:
+ dn = None
+
+ if dn is not None:
+ try:
+ ldbs.secrets.delete(dn)
+ except Exception:
+ logger.info("Failed to delete %s from secrets.ldb" % dn)
+
+ try:
+ msg = ldbs.sam.search(base=domaindn, scope=ldb.SCOPE_DEFAULT,
+ expression='(sAMAccountName=dns-%s)' % (hostname),
+ attrs=[])
+ dn = msg[0].dn
+ except IndexError:
+ dn = None
+
+ if dn is not None:
+ try:
+ ldbs.sam.delete(dn)
+ except Exception:
+ logger.info("Failed to delete %s from sam.ldb" % dn)
+
+ logger.info("Finished upgrading DNS")
+
+ services = lp.get("server services")
+ for service in services:
+ if service == "dns":
+ if opts.dns_backend.startswith("BIND"):
+ logger.info("You have switched to using %s as your dns backend,"
+ " but still have the internal dns starting. Please"
+ " make sure you add '-dns' to your server services"
+ " line in your smb.conf." % opts.dns_backend)
+ break
+ else:
+ if opts.dns_backend == "SAMBA_INTERNAL":
+ logger.info("You have switched to using %s as your dns backend,"
+ " but you still have samba starting looking for a"
+ " BIND backend. Please remove the -dns from your"
+ " server services line." % opts.dns_backend)
diff --git a/source4/scripting/bin/samba_upgradeprovision b/source4/scripting/bin/samba_upgradeprovision
new file mode 100755
index 0000000..3d072bc
--- /dev/null
+++ b/source4/scripting/bin/samba_upgradeprovision
@@ -0,0 +1,1848 @@
+#!/usr/bin/env python3
+# vim: expandtab
+#
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009 - 2010
+#
+# Based on provision a Samba4 server by
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import logging
+import optparse
+import os
+import shutil
+import sys
+import tempfile
+import re
+import traceback
+# Allow to run from s4 source directory (without installing samba)
+sys.path.insert(0, "bin/python")
+
+import ldb
+import samba
+import samba.getopt as options
+from samba.samdb import get_default_backend_store
+
+from base64 import b64encode
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session, admin_session
+from samba import tdb_util
+from samba import mdb_util
+from ldb import (SCOPE_SUBTREE, SCOPE_BASE,
+ FLAG_MOD_REPLACE, FLAG_MOD_ADD, FLAG_MOD_DELETE,
+ MessageElement, Message, Dn, LdbError)
+from samba import param, dsdb, Ldb
+from samba.common import confirm
+from samba.descriptor import get_wellknown_sds, get_empty_descriptor, get_diff_sds
+from samba.provision import (find_provision_key_parameters,
+ ProvisioningError, get_last_provision_usn,
+ get_max_usn, update_provision_usn, setup_path)
+from samba.schema import get_linked_attributes, Schema, get_schema_descriptor
+from samba.dcerpc import security, drsblobs
+from samba.dcerpc.security import (
+ SECINFO_OWNER, SECINFO_GROUP, SECINFO_DACL, SECINFO_SACL)
+from samba.ndr import ndr_unpack
+from samba.upgradehelpers import (dn_sort, get_paths, newprovision,
+ get_ldbs, findprovisionrange,
+ usn_in_range, identic_rename,
+ update_secrets, CHANGE, ERROR, SIMPLE,
+ CHANGEALL, GUESS, CHANGESD, PROVISION,
+ updateOEMInfo, getOEMInfo, update_gpo,
+ delta_update_basesamdb, update_policyids,
+ update_machine_account_password,
+ search_constructed_attrs_stored,
+ int64range2str, update_dns_account_password,
+ increment_calculated_keyversion_number,
+ print_provision_ranges)
+from samba.xattr import copytree_with_xattrs
+from functools import cmp_to_key
+
+# make sure the script dies immediately when hitting control-C,
+# rather than raising KeyboardInterrupt. As we do all database
+# operations using transactions, this is safe.
+import signal
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+replace=2**FLAG_MOD_REPLACE
+add=2**FLAG_MOD_ADD
+delete=2**FLAG_MOD_DELETE
+never=0
+
+
+# Will be modified during provision to tell if default sd has been modified
+# somehow ...
+
+#Errors are always logged
+
+__docformat__ = "restructuredText"
+
+# Attributes that are never copied from the reference provision (even if they
+# do not exist in the destination object).
+# This is most probably because they are populated automatcally when object is
+# created
+# This also apply to imported object from reference provision
+replAttrNotCopied = [ "dn", "whenCreated", "whenChanged", "objectGUID",
+ "parentGUID", "distinguishedName",
+ "instanceType", "cn",
+ "lmPwdHistory", "pwdLastSet", "ntPwdHistory",
+ "unicodePwd", "dBCSPwd", "supplementalCredentials",
+ "gPCUserExtensionNames", "gPCMachineExtensionNames",
+ "maxPwdAge", "secret", "possibleInferiors", "privilege",
+ "sAMAccountType", "oEMInformation", "creationTime" ]
+
+nonreplAttrNotCopied = ["uSNCreated", "replPropertyMetaData", "uSNChanged",
+ "nextRid" ,"rIDNextRID", "rIDPreviousAllocationPool"]
+
+nonDSDBAttrNotCopied = ["msDS-KeyVersionNumber", "priorSecret", "priorWhenChanged"]
+
+
+attrNotCopied = replAttrNotCopied
+attrNotCopied.extend(nonreplAttrNotCopied)
+attrNotCopied.extend(nonDSDBAttrNotCopied)
+# Usually for an object that already exists we do not overwrite attributes as
+# they might have been changed for good reasons. Anyway for a few of them it's
+# mandatory to replace them otherwise the provision will be broken somehow.
+# But for attribute that are just missing we do not have to specify them as the default
+# behavior is to add missing attribute
+hashOverwrittenAtt = { "prefixMap": replace, "systemMayContain": replace,
+ "systemOnly":replace, "searchFlags":replace,
+ "mayContain":replace, "systemFlags":replace+add,
+ "description":replace, "operatingSystemVersion":replace,
+ "adminPropertyPages":replace, "groupType":replace,
+ "wellKnownObjects":replace, "privilege":never,
+ "rIDAvailablePool": never,
+ "rIDNextRID": add, "rIDUsedPool": never,
+ "defaultSecurityDescriptor": replace + add,
+ "isMemberOfPartialAttributeSet": delete,
+ "attributeDisplayNames": replace + add,
+ "versionNumber": add}
+
+dnNotToRecalculateFound = False
+dnToRecalculate = []
+backlinked = []
+forwardlinked = set()
+dn_syntax_att = []
+not_replicated = []
+def define_what_to_log(opts):
+ what = 0
+ if opts.debugchange:
+ what = what | CHANGE
+ if opts.debugchangesd:
+ what = what | CHANGESD
+ if opts.debugguess:
+ what = what | GUESS
+ if opts.debugprovision:
+ what = what | PROVISION
+ if opts.debugall:
+ what = what | CHANGEALL
+ return what
+
+
+parser = optparse.OptionParser("samba_upgradeprovision [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+parser.add_option("--setupdir", type="string", metavar="DIR",
+ help="directory with setup files")
+parser.add_option("--debugprovision", help="Debug provision", action="store_true")
+parser.add_option("--debugguess", action="store_true",
+ help="Print information on which values are guessed")
+parser.add_option("--debugchange", action="store_true",
+ help="Print information on what is different but won't be changed")
+parser.add_option("--debugchangesd", action="store_true",
+ help="Print security descriptor differences")
+parser.add_option("--debugall", action="store_true",
+ help="Print all available information (very verbose)")
+parser.add_option("--db_backup_only", action="store_true",
+ help="Do the backup of the database in the provision, skip the sysvol / netlogon shares")
+parser.add_option("--full", action="store_true",
+ help="Perform full upgrade of the samdb (schema, configuration, new objects, ...")
+parser.add_option("--very-old-pre-alpha9", action="store_true",
+ help="Perform additional forced SD resets required for a database from before Samba 4.0.0alpha9.")
+
+opts = parser.parse_args()[0]
+
+handler = logging.StreamHandler(sys.stdout)
+upgrade_logger = logging.getLogger("upgradeprovision")
+upgrade_logger.setLevel(logging.INFO)
+
+upgrade_logger.addHandler(handler)
+
+provision_logger = logging.getLogger("provision")
+provision_logger.addHandler(handler)
+
+whatToLog = define_what_to_log(opts)
+
+def message(what, text):
+ """Print a message if this message type has been selected to be printed
+
+ :param what: Category of the message
+ :param text: Message to print """
+ if (whatToLog & what) or what <= 0:
+ upgrade_logger.info("%s", text)
+
+if len(sys.argv) == 1:
+ opts.interactive = True
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+
+creds = credopts.get_credentials(lp)
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+
+
+def check_for_DNS(refprivate, private, refbinddns_dir, binddns_dir, dns_backend):
+ """Check if the provision has already the requirement for dynamic dns
+
+ :param refprivate: The path to the private directory of the reference
+ provision
+ :param private: The path to the private directory of the upgraded
+ provision"""
+
+ spnfile = "%s/spn_update_list" % private
+ dnsfile = "%s/dns_update_list" % private
+
+ if not os.path.exists(spnfile):
+ shutil.copy("%s/spn_update_list" % refprivate, "%s" % spnfile)
+
+ if not os.path.exists(dnsfile):
+ shutil.copy("%s/dns_update_list" % refprivate, "%s" % dnsfile)
+
+ if not os.path.exists(binddns_dir):
+ os.mkdir(binddns_dir)
+
+ if dns_backend not in ['BIND9_DLZ', 'BIND9_FLATFILE']:
+ return
+
+ namedfile = lp.get("dnsupdate:path")
+ if not namedfile:
+ namedfile = "%s/named.conf.update" % binddns_dir
+ if not os.path.exists(namedfile):
+ destdir = "%s/new_dns" % binddns_dir
+ dnsdir = "%s/dns" % binddns_dir
+
+ if not os.path.exists(destdir):
+ os.mkdir(destdir)
+ if not os.path.exists(dnsdir):
+ os.mkdir(dnsdir)
+ shutil.copy("%s/named.conf" % refbinddns_dir, "%s/named.conf" % destdir)
+ shutil.copy("%s/named.txt" % refbinddns_dir, "%s/named.txt" % destdir)
+ message(SIMPLE, "It seems that your provision did not integrate "
+ "new rules for dynamic dns update of domain related entries")
+ message(SIMPLE, "A copy of the new bind configuration files and "
+ "template has been put in %s, you should read them and "
+ "configure dynamic dns updates" % destdir)
+
+
+def populate_links(samdb, schemadn):
+ """Populate an array with all the back linked attributes
+
+ This attributes that are modified automatically when
+ front attibutes are changed
+
+ :param samdb: A LDB object for sam.ldb file
+ :param schemadn: DN of the schema for the partition"""
+ linkedAttHash = get_linked_attributes(Dn(samdb, str(schemadn)), samdb)
+ backlinked.extend(linkedAttHash.values())
+ for t in linkedAttHash.keys():
+ forwardlinked.add(t)
+
+def isReplicated(att):
+ """ Indicate if the attribute is replicated or not
+
+ :param att: Name of the attribute to be tested
+ :return: True is the attribute is replicated, False otherwise
+ """
+
+ return (att not in not_replicated)
+
+def populateNotReplicated(samdb, schemadn):
+ """Populate an array with all the attributes that are not replicated
+
+ :param samdb: A LDB object for sam.ldb file
+ :param schemadn: DN of the schema for the partition"""
+ res = samdb.search(expression="(&(objectclass=attributeSchema)(systemflags:1.2.840.113556.1.4.803:=1))", base=Dn(samdb,
+ str(schemadn)), scope=SCOPE_SUBTREE,
+ attrs=["lDAPDisplayName"])
+ for elem in res:
+ not_replicated.append(str(elem["lDAPDisplayName"]))
+
+
+def populate_dnsyntax(samdb, schemadn):
+ """Populate an array with all the attributes that have DN synthax
+ (oid 2.5.5.1)
+
+ :param samdb: A LDB object for sam.ldb file
+ :param schemadn: DN of the schema for the partition"""
+ res = samdb.search(expression="(attributeSyntax=2.5.5.1)", base=Dn(samdb,
+ str(schemadn)), scope=SCOPE_SUBTREE,
+ attrs=["lDAPDisplayName"])
+ for elem in res:
+ dn_syntax_att.append(elem["lDAPDisplayName"])
+
+
+def sanitychecks(samdb, names):
+ """Make some checks before trying to update
+
+ :param samdb: An LDB object opened on sam.ldb
+ :param names: list of key provision parameters
+ :return: Status of check (1 for Ok, 0 for not Ok) """
+ res = samdb.search(expression="objectClass=ntdsdsa", base=str(names.configdn),
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+ if len(res) == 0:
+ print("No DC found. Your provision is most probably broken!")
+ return False
+ elif len(res) != 1:
+ print("Found %d domain controllers. For the moment " \
+ "upgradeprovision is not able to handle an upgrade on a " \
+ "domain with more than one DC. Please demote the other " \
+ "DC(s) before upgrading") % len(res)
+ return False
+ else:
+ return True
+
+
+def print_provision_key_parameters(names):
+ """Do a a pretty print of provision parameters
+
+ :param names: list of key provision parameters """
+ message(GUESS, "rootdn :" + str(names.rootdn))
+ message(GUESS, "configdn :" + str(names.configdn))
+ message(GUESS, "schemadn :" + str(names.schemadn))
+ message(GUESS, "serverdn :" + str(names.serverdn))
+ message(GUESS, "netbiosname :" + names.netbiosname)
+ message(GUESS, "defaultsite :" + names.sitename)
+ message(GUESS, "dnsdomain :" + names.dnsdomain)
+ message(GUESS, "hostname :" + names.hostname)
+ message(GUESS, "domain :" + names.domain)
+ message(GUESS, "realm :" + names.realm)
+ message(GUESS, "invocationid:" + names.invocation)
+ message(GUESS, "policyguid :" + names.policyid)
+ message(GUESS, "policyguiddc:" + str(names.policyid_dc))
+ message(GUESS, "domainsid :" + str(names.domainsid))
+ message(GUESS, "domainguid :" + names.domainguid)
+ message(GUESS, "ntdsguid :" + names.ntdsguid)
+ message(GUESS, "domainlevel :" + str(names.domainlevel))
+
+
+def handle_special_case(att, delta, new, old, useReplMetadata, basedn, aldb):
+ """Define more complicate update rules for some attributes
+
+ :param att: The attribute to be updated
+ :param delta: A messageElement object that correspond to the difference
+ between the updated object and the reference one
+ :param new: The reference object
+ :param old: The Updated object
+ :param useReplMetadata: A boolean that indicate if the update process
+ use replPropertyMetaData to decide what has to be updated.
+ :param basedn: The base DN of the provision
+ :param aldb: An ldb object used to build DN
+ :return: True to indicate that the attribute should be kept, False for
+ discarding it"""
+
+ # We do most of the special case handle if we do not have the
+ # highest usn as otherwise the replPropertyMetaData will guide us more
+ # correctly
+ if not useReplMetadata:
+ flag = delta.get(att).flags()
+ if (att == "sPNMappings" and flag == FLAG_MOD_REPLACE and
+ ldb.Dn(aldb, "CN=Directory Service,CN=Windows NT,"
+ "CN=Services,CN=Configuration,%s" % basedn)
+ == old[0].dn):
+ return True
+ if (att == "userAccountControl" and flag == FLAG_MOD_REPLACE and
+ ldb.Dn(aldb, "CN=Administrator,CN=Users,%s" % basedn)
+ == old[0].dn):
+ message(SIMPLE, "We suggest that you change the userAccountControl"
+ " for user Administrator from value %d to %d" %
+ (int(str(old[0][att])), int(str(new[0][att]))))
+ return False
+ if (att == "minPwdAge" and flag == FLAG_MOD_REPLACE):
+ if (int(str(old[0][att])) == 0):
+ delta[att] = MessageElement(new[0][att], FLAG_MOD_REPLACE, att)
+ return True
+
+ if (att == "member" and flag == FLAG_MOD_REPLACE):
+ hash = {}
+ newval = []
+ changeDelta=0
+ for elem in old[0][att]:
+ hash[str(elem).lower()]=1
+ newval.append(str(elem))
+
+ for elem in new[0][att]:
+ if not str(elem).lower() in hash:
+ changeDelta=1
+ newval.append(str(elem))
+ if changeDelta == 1:
+ delta[att] = MessageElement(newval, FLAG_MOD_REPLACE, att)
+ else:
+ delta.remove(att)
+ return True
+
+ if (att in ("gPLink", "gPCFileSysPath") and
+ flag == FLAG_MOD_REPLACE and
+ str(new[0].dn).lower() == str(old[0].dn).lower()):
+ delta.remove(att)
+ return True
+
+ if att == "forceLogoff":
+ ref=0x8000000000000000
+ oldval=int(old[0][att][0])
+ newval=int(new[0][att][0])
+ ref == old and ref == abs(new)
+ return True
+
+ if att in ("adminDisplayName", "adminDescription"):
+ return True
+
+ if (str(old[0].dn) == "CN=Samba4-Local-Domain, %s" % (names.schemadn)
+ and att == "defaultObjectCategory" and flag == FLAG_MOD_REPLACE):
+ return True
+
+ if (str(old[0].dn) == "CN=Title, %s" % (str(names.schemadn)) and
+ att == "rangeUpper" and flag == FLAG_MOD_REPLACE):
+ return True
+
+ if (str(old[0].dn) == "%s" % (str(names.rootdn))
+ and att == "subRefs" and flag == FLAG_MOD_REPLACE):
+ return True
+ #Allow to change revision of ForestUpdates objects
+ if (att == "revision" or att == "objectVersion"):
+ if str(delta.dn).lower().find("domainupdates") and str(delta.dn).lower().find("forestupdates") > 0:
+ return True
+ if str(delta.dn).endswith("CN=DisplaySpecifiers, %s" % names.configdn):
+ return True
+
+ # This is a bit of special animal as we might have added
+ # already SPN entries to the list that has to be modified
+ # So we go in detail to try to find out what has to be added ...
+ if (att == "servicePrincipalName" and delta.get(att).flags() == FLAG_MOD_REPLACE):
+ hash = {}
+ newval = []
+ changeDelta = 0
+ for elem in old[0][att]:
+ hash[str(elem)]=1
+ newval.append(str(elem))
+
+ for elem in new[0][att]:
+ if not str(elem) in hash:
+ changeDelta = 1
+ newval.append(str(elem))
+ if changeDelta == 1:
+ delta[att] = MessageElement(newval, FLAG_MOD_REPLACE, att)
+ else:
+ delta.remove(att)
+ return True
+
+ return False
+
+def dump_denied_change(dn, att, flagtxt, current, reference):
+ """Print detailed information about why a change is denied
+
+ :param dn: DN of the object which attribute is denied
+ :param att: Attribute that was supposed to be upgraded
+ :param flagtxt: Type of the update that should be performed
+ (add, change, remove, ...)
+ :param current: Value(s) of the current attribute
+ :param reference: Value(s) of the reference attribute"""
+
+ message(CHANGE, "dn= " + str(dn)+" " + att+" with flag " + flagtxt
+ + " must not be changed/removed. Discarding the change")
+ if att == "objectSid" :
+ message(CHANGE, "old : %s" % ndr_unpack(security.dom_sid, current[0]))
+ message(CHANGE, "new : %s" % ndr_unpack(security.dom_sid, reference[0]))
+ elif att == "rIDPreviousAllocationPool" or att == "rIDAllocationPool":
+ message(CHANGE, "old : %s" % int64range2str(current[0]))
+ message(CHANGE, "new : %s" % int64range2str(reference[0]))
+ else:
+ i = 0
+ for e in range(0, len(current)):
+ message(CHANGE, "old %d : %s" % (i, str(current[e])))
+ i+=1
+ if reference is not None:
+ i = 0
+ for e in range(0, len(reference)):
+ message(CHANGE, "new %d : %s" % (i, str(reference[e])))
+ i+=1
+
+def handle_special_add(samdb, dn, names):
+ """Handle special operation (like remove) on some object needed during
+ upgrade
+
+ This is mostly due to wrong creation of the object in previous provision.
+ :param samdb: An Ldb object representing the SAM database
+ :param dn: DN of the object to inspect
+ :param names: list of key provision parameters
+ """
+
+ dntoremove = None
+ objDn = Dn(samdb, "CN=IIS_IUSRS, CN=Builtin, %s" % names.rootdn)
+ if dn == objDn :
+ #This entry was misplaced lets remove it if it exists
+ dntoremove = "CN=IIS_IUSRS, CN=Users, %s" % names.rootdn
+
+ objDn = Dn(samdb,
+ "CN=Certificate Service DCOM Access, CN=Builtin, %s" % names.rootdn)
+ if dn == objDn:
+ #This entry was misplaced lets remove it if it exists
+ dntoremove = "CN=Certificate Service DCOM Access,"\
+ "CN=Users, %s" % names.rootdn
+
+ objDn = Dn(samdb, "CN=Cryptographic Operators, CN=Builtin, %s" % names.rootdn)
+ if dn == objDn:
+ #This entry was misplaced lets remove it if it exists
+ dntoremove = "CN=Cryptographic Operators, CN=Users, %s" % names.rootdn
+
+ objDn = Dn(samdb, "CN=Event Log Readers, CN=Builtin, %s" % names.rootdn)
+ if dn == objDn:
+ #This entry was misplaced lets remove it if it exists
+ dntoremove = "CN=Event Log Readers, CN=Users, %s" % names.rootdn
+
+ objDn = Dn(samdb,"CN=System,CN=WellKnown Security Principals,"
+ "CN=Configuration,%s" % names.rootdn)
+ if dn == objDn:
+ oldDn = Dn(samdb,"CN=Well-Known-Security-Id-System,"
+ "CN=WellKnown Security Principals,"
+ "CN=Configuration,%s" % names.rootdn)
+
+ res = samdb.search(expression="(distinguishedName=%s)" % oldDn,
+ base=str(names.rootdn),
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+
+ res2 = samdb.search(expression="(distinguishedName=%s)" % dn,
+ base=str(names.rootdn),
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+
+ if len(res) > 0 and len(res2) == 0:
+ message(CHANGE, "Existing object %s must be replaced by %s. "
+ "Renaming old object" % (str(oldDn), str(dn)))
+ samdb.rename(oldDn, objDn, ["relax:0", "provision:0"])
+
+ return 0
+
+ if dntoremove is not None:
+ res = samdb.search(expression="(cn=RID Set)",
+ base=str(names.rootdn),
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+
+ if len(res) == 0:
+ return 2
+ res = samdb.search(expression="(distinguishedName=%s)" % dntoremove,
+ base=str(names.rootdn),
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+ if len(res) > 0:
+ message(CHANGE, "Existing object %s must be replaced by %s. "
+ "Removing old object" % (dntoremove, str(dn)))
+ samdb.delete(res[0]["dn"])
+ return 0
+
+ return 1
+
+
+def check_dn_nottobecreated(hash, index, listdn):
+ """Check if one of the DN present in the list has a creation order
+ greater than the current.
+
+ Hash is indexed by dn to be created, with each key
+ is associated the creation order.
+
+ First dn to be created has the creation order 0, second has 1, ...
+ Index contain the current creation order
+
+ :param hash: Hash holding the different DN of the object to be
+ created as key
+ :param index: Current creation order
+ :param listdn: List of DNs on which the current DN depends on
+ :return: None if the current object do not depend on other
+ object or if all object have been created before."""
+ if listdn is None:
+ return None
+ for dn in listdn:
+ key = str(dn).lower()
+ if key in hash and hash[key] > index:
+ return str(dn)
+ return None
+
+
+
+def add_missing_object(ref_samdb, samdb, dn, names, basedn, hash, index):
+ """Add a new object if the dependencies are satisfied
+
+ The function add the object if the object on which it depends are already
+ created
+
+ :param ref_samdb: Ldb object representing the SAM db of the reference
+ provision
+ :param samdb: Ldb object representing the SAM db of the upgraded
+ provision
+ :param dn: DN of the object to be added
+ :param names: List of key provision parameters
+ :param basedn: DN of the partition to be updated
+ :param hash: Hash holding the different DN of the object to be
+ created as key
+ :param index: Current creation order
+ :return: True if the object was created False otherwise"""
+
+ ret = handle_special_add(samdb, dn, names)
+
+ if ret == 2:
+ return False
+
+ if ret == 0:
+ return True
+
+
+ reference = ref_samdb.search(expression="(distinguishedName=%s)" % (str(dn)),
+ base=basedn, scope=SCOPE_SUBTREE,
+ controls=["search_options:1:2"])
+ empty = Message()
+ delta = samdb.msg_diff(empty, reference[0])
+ delta.dn
+ skip = False
+ try:
+ if str(reference[0].get("cn")) == "RID Set":
+ for klass in reference[0].get("objectClass"):
+ if str(klass).lower() == "ridset":
+ skip = True
+ finally:
+ if delta.get("objectSid"):
+ sid = str(ndr_unpack(security.dom_sid, reference[0]["objectSid"][0]))
+ m = re.match(r".*-(\d+)$", sid)
+ if m and int(m.group(1))>999:
+ delta.remove("objectSid")
+ for att in attrNotCopied:
+ delta.remove(att)
+ for att in backlinked:
+ delta.remove(att)
+ for att in dn_syntax_att:
+ depend_on_yet_tobecreated = check_dn_nottobecreated(hash, index,
+ delta.get(str(att)))
+ if depend_on_yet_tobecreated is not None:
+ message(CHANGE, "Object %s depends on %s in attribute %s. "
+ "Delaying the creation" % (dn,
+ depend_on_yet_tobecreated, att))
+ return False
+
+ delta.dn = dn
+ if not skip:
+ message(CHANGE,"Object %s will be added" % dn)
+ samdb.add(delta, ["relax:0", "provision:0"])
+ else:
+ message(CHANGE,"Object %s was skipped" % dn)
+
+ return True
+
+def gen_dn_index_hash(listMissing):
+ """Generate a hash associating the DN to its creation order
+
+ :param listMissing: List of DN
+ :return: Hash with DN as keys and creation order as values"""
+ hash = {}
+ for i in range(0, len(listMissing)):
+ hash[str(listMissing[i]).lower()] = i
+ return hash
+
+def add_deletedobj_containers(ref_samdb, samdb, names):
+ """Add the object container: CN=Deleted Objects
+
+ This function create the container for each partition that need one and
+ then reference the object into the root of the partition
+
+ :param ref_samdb: Ldb object representing the SAM db of the reference
+ provision
+ :param samdb: Ldb object representing the SAM db of the upgraded provision
+ :param names: List of key provision parameters"""
+
+
+ wkoPrefix = "B:32:18E2EA80684F11D2B9AA00C04F79F805"
+ partitions = [str(names.rootdn), str(names.configdn)]
+ for part in partitions:
+ ref_delObjCnt = ref_samdb.search(expression="(cn=Deleted Objects)",
+ base=part, scope=SCOPE_SUBTREE,
+ attrs=["dn"],
+ controls=["show_deleted:0",
+ "show_recycled:0"])
+ delObjCnt = samdb.search(expression="(cn=Deleted Objects)",
+ base=part, scope=SCOPE_SUBTREE,
+ attrs=["dn"],
+ controls=["show_deleted:0",
+ "show_recycled:0"])
+ if len(ref_delObjCnt) > len(delObjCnt):
+ reference = ref_samdb.search(expression="cn=Deleted Objects",
+ base=part, scope=SCOPE_SUBTREE,
+ controls=["show_deleted:0",
+ "show_recycled:0"])
+ empty = Message()
+ delta = samdb.msg_diff(empty, reference[0])
+
+ delta.dn = Dn(samdb, str(reference[0]["dn"]))
+ for att in attrNotCopied:
+ delta.remove(att)
+
+ modcontrols = ["relax:0", "provision:0"]
+ samdb.add(delta, modcontrols)
+
+ listwko = []
+ res = samdb.search(expression="(objectClass=*)", base=part,
+ scope=SCOPE_BASE,
+ attrs=["dn", "wellKnownObjects"])
+
+ targetWKO = "%s:%s" % (wkoPrefix, str(reference[0]["dn"]))
+ found = False
+
+ if len(res[0]) > 0:
+ wko = res[0]["wellKnownObjects"]
+
+ # The wellKnownObject that we want to add.
+ for o in wko:
+ if str(o) == targetWKO:
+ found = True
+ listwko.append(str(o))
+
+ if not found:
+ listwko.append(targetWKO)
+
+ delta = Message()
+ delta.dn = Dn(samdb, str(res[0]["dn"]))
+ delta["wellKnownObjects"] = MessageElement(listwko,
+ FLAG_MOD_REPLACE,
+ "wellKnownObjects" )
+ samdb.modify(delta)
+
+def add_missing_entries(ref_samdb, samdb, names, basedn, list):
+ """Add the missing object whose DN is the list
+
+ The function add the object if the objects on which it depends are
+ already created.
+
+ :param ref_samdb: Ldb object representing the SAM db of the reference
+ provision
+ :param samdb: Ldb object representing the SAM db of the upgraded
+ provision
+ :param dn: DN of the object to be added
+ :param names: List of key provision parameters
+ :param basedn: DN of the partition to be updated
+ :param list: List of DN to be added in the upgraded provision"""
+
+ listMissing = []
+ listDefered = list
+
+ while(len(listDefered) != len(listMissing) and len(listDefered) > 0):
+ index = 0
+ listMissing = listDefered
+ listDefered = []
+ hashMissing = gen_dn_index_hash(listMissing)
+ for dn in listMissing:
+ ret = add_missing_object(ref_samdb, samdb, dn, names, basedn,
+ hashMissing, index)
+ index = index + 1
+ if ret == 0:
+ # DN can't be created because it depends on some
+ # other DN in the list
+ listDefered.append(dn)
+
+ if len(listDefered) != 0:
+ raise ProvisioningError("Unable to insert missing elements: "
+ "circular references")
+
+def handle_links(samdb, att, basedn, dn, value, ref_value, delta):
+ """This function handle updates on links
+
+ :param samdb: An LDB object pointing to the updated provision
+ :param att: Attribute to update
+ :param basedn: The root DN of the provision
+ :param dn: The DN of the inspected object
+ :param value: The value of the attribute
+ :param ref_value: The value of this attribute in the reference provision
+ :param delta: The MessageElement object that will be applied for
+ transforming the current provision"""
+
+ res = samdb.search(base=dn, controls=["search_options:1:2", "reveal:1"],
+ attrs=[att])
+
+ blacklist = {}
+ hash = {}
+ newlinklist = []
+ changed = False
+
+ for v in value:
+ newlinklist.append(str(v))
+
+ for e in value:
+ hash[e] = 1
+ # for w2k domain level the reveal won't reveal anything ...
+ # it means that we can readd links that were removed on purpose ...
+ # Also this function in fact just accept add not removal
+
+ for e in res[0][att]:
+ if not e in hash:
+ # We put in the blacklist all the element that are in the "revealed"
+ # result and not in the "standard" result
+ # This element are links that were removed before and so that
+ # we don't wan't to readd
+ blacklist[e] = 1
+
+ for e in ref_value:
+ if not e in blacklist and not e in hash:
+ newlinklist.append(str(e))
+ changed = True
+ if changed:
+ delta[att] = MessageElement(newlinklist, FLAG_MOD_REPLACE, att)
+ else:
+ delta.remove(att)
+
+ return delta
+
+
+def checkKeepAttributeWithMetadata(delta, att, message, reference, current,
+ hash_attr_usn, basedn, usns, samdb):
+ """ Check if we should keep the attribute modification or not
+
+ :param delta: A message diff object
+ :param att: An attribute
+ :param message: A function to print messages
+ :param reference: A message object for the current entry comming from
+ the reference provision.
+ :param current: A message object for the current entry commin from
+ the current provision.
+ :param hash_attr_usn: A dictionary with attribute name as keys,
+ USN and invocation id as values.
+ :param basedn: The DN of the partition
+ :param usns: A dictionary with invocation ID as keys and USN ranges
+ as values.
+ :param samdb: A ldb object pointing to the sam DB
+
+ :return: The modified message diff.
+ """
+ global defSDmodified
+ isFirst = True
+ txt = ""
+ dn = current[0].dn
+
+ for att in list(delta):
+ if att in ["dn", "objectSid"]:
+ delta.remove(att)
+ continue
+
+ # We have updated by provision usn information so let's exploit
+ # replMetadataProperties
+ if att in forwardlinked:
+ curval = current[0].get(att, ())
+ refval = reference[0].get(att, ())
+ delta = handle_links(samdb, att, basedn, current[0]["dn"],
+ curval, refval, delta)
+ continue
+
+
+ if isFirst and len(list(delta)) > 1:
+ isFirst = False
+ txt = "%s\n" % (str(dn))
+
+ if handle_special_case(att, delta, reference, current, True, None, None):
+ # This attribute is "complicated" to handle and handling
+ # was done in handle_special_case
+ continue
+
+ attrUSN = None
+ if hash_attr_usn.get(att):
+ [attrUSN, attInvId] = hash_attr_usn.get(att)
+
+ if attrUSN is None:
+ # If it's a replicated attribute and we don't have any USN
+ # information about it. It means that we never saw it before
+ # so let's add it !
+ # If it is a replicated attribute but we are not master on it
+ # (ie. not initially added in the provision we masterize).
+ # attrUSN will be -1
+ if isReplicated(att):
+ continue
+ else:
+ message(CHANGE, "Non replicated attribute %s changed" % att)
+ continue
+
+ if att == "nTSecurityDescriptor":
+ cursd = ndr_unpack(security.descriptor,
+ current[0]["nTSecurityDescriptor"][0])
+ refsd = ndr_unpack(security.descriptor,
+ reference[0]["nTSecurityDescriptor"][0])
+
+ diff = get_diff_sds(refsd, cursd, names.domainsid)
+ if diff == "":
+ # FIXME find a way to have it only with huge huge verbose mode
+ # message(CHANGE, "%ssd are identical" % txt)
+ # txt = ""
+ delta.remove(att)
+ continue
+ else:
+ delta.remove(att)
+ message(CHANGESD, "%ssd are not identical:\n%s" % (txt, diff))
+ txt = ""
+ if attrUSN == -1:
+ message(CHANGESD, "But the SD has been changed by someonelse "
+ "so it's impossible to know if the difference"
+ " cames from the modification or from a previous bug")
+ global dnNotToRecalculateFound
+ dnNotToRecalculateFound = True
+ else:
+ dnToRecalculate.append(dn)
+ continue
+
+ if attrUSN == -1:
+ # This attribute was last modified by another DC forget
+ # about it
+ message(CHANGE, "%sAttribute: %s has been "
+ "created/modified/deleted by another DC. "
+ "Doing nothing" % (txt, att))
+ txt = ""
+ delta.remove(att)
+ continue
+ elif not usn_in_range(int(attrUSN), usns.get(attInvId)):
+ message(CHANGE, "%sAttribute: %s was not "
+ "created/modified/deleted during a "
+ "provision or upgradeprovision. Current "
+ "usn: %d. Doing nothing" % (txt, att,
+ attrUSN))
+ txt = ""
+ delta.remove(att)
+ continue
+ else:
+ if att == "defaultSecurityDescriptor":
+ defSDmodified = True
+ if attrUSN:
+ message(CHANGE, "%sAttribute: %s will be modified"
+ "/deleted it was last modified "
+ "during a provision. Current usn: "
+ "%d" % (txt, att, attrUSN))
+ txt = ""
+ else:
+ message(CHANGE, "%sAttribute: %s will be added because "
+ "it did not exist before" % (txt, att))
+ txt = ""
+ continue
+
+ return delta
+
+def update_present(ref_samdb, samdb, basedn, listPresent, usns):
+ """ This function updates the object that are already present in the
+ provision
+
+ :param ref_samdb: An LDB object pointing to the reference provision
+ :param samdb: An LDB object pointing to the updated provision
+ :param basedn: A string with the value of the base DN for the provision
+ (ie. DC=foo, DC=bar)
+ :param listPresent: A list of object that is present in the provision
+ :param usns: A list of USN range modified by previous provision and
+ upgradeprovision grouped by invocation ID
+ """
+
+ # This hash is meant to speedup lookup of attribute name from an oid,
+ # it's for the replPropertyMetaData handling
+ hash_oid_name = {}
+ res = samdb.search(expression="objectClass=attributeSchema", base=basedn,
+ controls=["search_options:1:2"], attrs=["attributeID",
+ "lDAPDisplayName"])
+ if len(res) > 0:
+ for e in res:
+ strDisplay = str(e.get("lDAPDisplayName"))
+ hash_oid_name[str(e.get("attributeID"))] = strDisplay
+ else:
+ msg = "Unable to insert missing elements: circular references"
+ raise ProvisioningError(msg)
+
+ changed = 0
+ sd_flags = SECINFO_OWNER | SECINFO_GROUP | SECINFO_DACL | SECINFO_SACL
+ controls = ["search_options:1:2", "sd_flags:1:%d" % sd_flags]
+ message(CHANGE, "Using replPropertyMetadata for change selection")
+ for dn in listPresent:
+ reference = ref_samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn,
+ scope=SCOPE_SUBTREE,
+ controls=controls)
+ current = samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn,
+ scope=SCOPE_SUBTREE, controls=controls)
+
+ if (
+ (str(current[0].dn) != str(reference[0].dn)) and
+ (str(current[0].dn).upper() == str(reference[0].dn).upper())
+ ):
+ message(CHANGE, "Names are the same except for the case. "
+ "Renaming %s to %s" % (str(current[0].dn),
+ str(reference[0].dn)))
+ identic_rename(samdb, reference[0].dn)
+ current = samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn,
+ scope=SCOPE_SUBTREE,
+ controls=controls)
+
+ delta = samdb.msg_diff(current[0], reference[0])
+
+ for att in backlinked:
+ delta.remove(att)
+
+ for att in attrNotCopied:
+ delta.remove(att)
+
+ delta.remove("name")
+
+ nb_items = len(list(delta))
+
+ if nb_items == 1:
+ continue
+
+ if nb_items > 1:
+ # Fetch the replPropertyMetaData
+ res = samdb.search(expression="(distinguishedName=%s)" % (str(dn)), base=basedn,
+ scope=SCOPE_SUBTREE, controls=controls,
+ attrs=["replPropertyMetaData"])
+ ctr = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0]).ctr
+
+ hash_attr_usn = {}
+ for o in ctr.array:
+ # We put in this hash only modification
+ # made on the current host
+ att = hash_oid_name[samdb.get_oid_from_attid(o.attid)]
+ if str(o.originating_invocation_id) in usns.keys():
+ hash_attr_usn[att] = [o.originating_usn, str(o.originating_invocation_id)]
+ else:
+ hash_attr_usn[att] = [-1, None]
+
+ delta = checkKeepAttributeWithMetadata(delta, att, message, reference,
+ current, hash_attr_usn,
+ basedn, usns, samdb)
+
+ delta.dn = dn
+
+
+ if len(delta) >1:
+ # Skip dn as the value is not really changed ...
+ attributes=", ".join(delta.keys()[1:])
+ modcontrols = []
+ relaxedatt = ['iscriticalsystemobject', 'grouptype']
+ # Let's try to reduce as much as possible the use of relax control
+ for attr in delta.keys():
+ if attr.lower() in relaxedatt:
+ modcontrols = ["relax:0", "provision:0"]
+ message(CHANGE, "%s is different from the reference one, changed"
+ " attributes: %s\n" % (dn, attributes))
+ changed += 1
+ samdb.modify(delta, modcontrols)
+ return changed
+
+def reload_full_schema(samdb, names):
+ """Load the updated schema with all the new and existing classes
+ and attributes.
+
+ :param samdb: An LDB object connected to the sam.ldb of the update
+ provision
+ :param names: List of key provision parameters
+ """
+
+ schemadn = str(names.schemadn)
+ current = samdb.search(expression="objectClass=*", base=schemadn,
+ scope=SCOPE_SUBTREE)
+
+ schema_ldif = "".join(samdb.write_ldif(ent, ldb.CHANGETYPE_NONE) for ent in current)
+
+ prefixmap_data = b64encode(open(setup_path("prefixMap.txt"), 'rb').read()).decode('utf8')
+
+ # We don't actually add this ldif, just parse it
+ prefixmap_ldif = "dn: %s\nprefixMap:: %s\n\n" % (schemadn, prefixmap_data)
+
+ dsdb._dsdb_set_schema_from_ldif(samdb, prefixmap_ldif, schema_ldif, schemadn)
+
+
+def update_partition(ref_samdb, samdb, basedn, names, schema, provisionUSNs, prereloadfunc):
+ """Check differences between the reference provision and the upgraded one.
+
+ It looks for all objects which base DN is name.
+
+ This function will also add the missing object and update existing object
+ to add or remove attributes that were missing.
+
+ :param ref_sambdb: An LDB object conntected to the sam.ldb of the
+ reference provision
+ :param samdb: An LDB object connected to the sam.ldb of the update
+ provision
+ :param basedn: String value of the DN of the partition
+ :param names: List of key provision parameters
+ :param schema: A Schema object
+ :param provisionUSNs: A dictionary with range of USN modified during provision
+ or upgradeprovision. Ranges are grouped by invocationID.
+ :param prereloadfunc: A function that must be executed just before the reload
+ of the schema
+ """
+
+ hash_new = {}
+ hash = {}
+ listMissing = []
+ listPresent = []
+ reference = []
+ current = []
+
+ # Connect to the reference provision and get all the attribute in the
+ # partition referred by name
+ reference = ref_samdb.search(expression="objectClass=*", base=basedn,
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+
+ current = samdb.search(expression="objectClass=*", base=basedn,
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+ # Create a hash for speeding the search of new object
+ for i in range(0, len(reference)):
+ hash_new[str(reference[i]["dn"]).lower()] = reference[i]["dn"]
+
+ # Create a hash for speeding the search of existing object in the
+ # current provision
+ for i in range(0, len(current)):
+ hash[str(current[i]["dn"]).lower()] = current[i]["dn"]
+
+
+ for k in hash_new.keys():
+ if not k in hash:
+ if not str(hash_new[k]) == "CN=Deleted Objects, %s" % names.rootdn:
+ listMissing.append(hash_new[k])
+ else:
+ listPresent.append(hash_new[k])
+
+ # Sort the missing object in order to have object of the lowest level
+ # first (which can be containers for higher level objects)
+ listMissing.sort(key=cmp_to_key(dn_sort))
+ listPresent.sort(key=cmp_to_key(dn_sort))
+
+ # The following lines is to load the up to
+ # date schema into our current LDB
+ # a complete schema is needed as the insertion of attributes
+ # and class is done against it
+ # and the schema is self validated
+ samdb.set_schema(schema)
+ try:
+ message(SIMPLE, "There are %d missing objects" % (len(listMissing)))
+ add_deletedobj_containers(ref_samdb, samdb, names)
+
+ add_missing_entries(ref_samdb, samdb, names, basedn, listMissing)
+
+ prereloadfunc()
+ message(SIMPLE, "Reloading a merged schema, which might trigger "
+ "reindexing so please be patient")
+ reload_full_schema(samdb, names)
+ message(SIMPLE, "Schema reloaded!")
+
+ changed = update_present(ref_samdb, samdb, basedn, listPresent,
+ provisionUSNs)
+ message(SIMPLE, "There are %d changed objects" % (changed))
+ return 1
+
+ except Exception as err:
+ message(ERROR, "Exception during upgrade of samdb:")
+ (typ, val, tb) = sys.exc_info()
+ traceback.print_exception(typ, val, tb)
+ return 0
+
+
+def check_updated_sd(ref_sam, cur_sam, names):
+ """Check if the security descriptor in the upgraded provision are the same
+ as the reference
+
+ :param ref_sam: A LDB object connected to the sam.ldb file used as
+ the reference provision
+ :param cur_sam: A LDB object connected to the sam.ldb file used as
+ upgraded provision
+ :param names: List of key provision parameters"""
+ reference = ref_sam.search(expression="objectClass=*", base=str(names.rootdn),
+ scope=SCOPE_SUBTREE,
+ attrs=["dn", "nTSecurityDescriptor"],
+ controls=["search_options:1:2"])
+ current = cur_sam.search(expression="objectClass=*", base=str(names.rootdn),
+ scope=SCOPE_SUBTREE,
+ attrs=["dn", "nTSecurityDescriptor"],
+ controls=["search_options:1:2"])
+ hash = {}
+ for i in range(0, len(reference)):
+ refsd_blob = reference[i]["nTSecurityDescriptor"][0]
+ hash[str(reference[i]["dn"]).lower()] = refsd_blob
+
+
+ for i in range(0, len(current)):
+ key = str(current[i]["dn"]).lower()
+ if key in hash:
+ cursd_blob = current[i]["nTSecurityDescriptor"][0]
+ cursd = ndr_unpack(security.descriptor,
+ cursd_blob)
+ if cursd_blob != hash[key]:
+ refsd = ndr_unpack(security.descriptor,
+ hash[key])
+ txt = get_diff_sds(refsd, cursd, names.domainsid, False)
+ if txt != "":
+ message(CHANGESD, "On object %s ACL is different"
+ " \n%s" % (current[i]["dn"], txt))
+
+
+
+def fix_wellknown_sd(samdb, names):
+ """This function fix the SD for partition/wellknown containers (basedn, configdn, ...)
+ This is needed because some provision use to have broken SD on containers
+
+ :param samdb: An LDB object pointing to the sam of the current provision
+ :param names: A list of key provision parameters
+ """
+
+ list_wellknown_dns = []
+
+ subcontainers = get_wellknown_sds(samdb)
+
+ for [dn, descriptor_fn] in subcontainers:
+ list_wellknown_dns.append(dn)
+ if dn in dnToRecalculate:
+ delta = Message()
+ delta.dn = dn
+ descr = descriptor_fn(names.domainsid, name_map=names.name_map)
+ delta["nTSecurityDescriptor"] = MessageElement(descr, FLAG_MOD_REPLACE,
+ "nTSecurityDescriptor" )
+ samdb.modify(delta)
+ message(CHANGESD, "nTSecurityDescriptor updated on wellknown DN: %s" % delta.dn)
+
+ return list_wellknown_dns
+
+def rebuild_sd(samdb, names):
+ """Rebuild security descriptor of the current provision from scratch
+
+ During the different pre release of samba4 security descriptors
+ (SD) were notarly broken (up to alpha11 included)
+
+ This function allows one to get them back in order, this function works
+ only after the database comparison that --full mode uses and which
+ populates the dnToRecalculate and dnNotToRecalculate lists.
+
+ The idea is that the SD can be safely recalculated from scratch to get it right.
+
+ :param names: List of key provision parameters"""
+
+ listWellknown = fix_wellknown_sd(samdb, names)
+
+ if len(dnToRecalculate) != 0:
+ message(CHANGESD, "%d DNs have been marked as needed to be recalculated"
+ % (len(dnToRecalculate)))
+
+ for dn in dnToRecalculate:
+ # well known SDs have already been reset
+ if dn in listWellknown:
+ continue
+ delta = Message()
+ delta.dn = dn
+ sd_flags = SECINFO_OWNER | SECINFO_GROUP | SECINFO_DACL | SECINFO_SACL
+ try:
+ descr = get_empty_descriptor(names.domainsid)
+ delta["nTSecurityDescriptor"] = MessageElement(descr, FLAG_MOD_REPLACE,
+ "nTSecurityDescriptor")
+ samdb.modify(delta, ["sd_flags:1:%d" % sd_flags,"relax:0","local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK])
+ except LdbError as e:
+ samdb.transaction_cancel()
+ res = samdb.search(expression="objectClass=*", base=str(delta.dn),
+ scope=SCOPE_BASE,
+ attrs=["nTSecurityDescriptor"],
+ controls=["sd_flags:1:%d" % sd_flags])
+ badsd = ndr_unpack(security.descriptor,
+ res[0]["nTSecurityDescriptor"][0])
+ message(ERROR, "On %s bad stuff %s" % (str(delta.dn),badsd.as_sddl(names.domainsid)))
+ return
+
+def hasATProvision(samdb):
+ entry = samdb.search(expression="(distinguishedName=@PROVISION)", base = "",
+ scope=SCOPE_BASE,
+ attrs=["dn"])
+
+ if entry is not None and len(entry) == 1:
+ return True
+ else:
+ return False
+
+def removeProvisionUSN(samdb):
+ attrs = [samba.provision.LAST_PROVISION_USN_ATTRIBUTE, "dn"]
+ entry = samdb.search(expression="(distinguishedName=@PROVISION)", base = "",
+ scope=SCOPE_BASE,
+ attrs=attrs)
+ empty = Message()
+ empty.dn = entry[0].dn
+ delta = samdb.msg_diff(entry[0], empty)
+ delta.remove("dn")
+ delta.dn = entry[0].dn
+ samdb.modify(delta)
+
+def remove_stored_generated_attrs(paths, creds, session, lp):
+ """Remove previously stored constructed attributes
+
+ :param paths: List of paths for different provision objects
+ from the upgraded provision
+ :param creds: A credential object
+ :param session: A session object
+ :param lp: A line parser object
+ :return: An associative array whose key are the different constructed
+ attributes and the value the dn where this attributes were found.
+ """
+
+
+def simple_update_basesamdb(newpaths, paths, names):
+ """Update the provision container db: sam.ldb
+ This function is aimed at very old provision (before alpha9)
+
+ :param newpaths: List of paths for different provision objects
+ from the reference provision
+ :param paths: List of paths for different provision objects
+ from the upgraded provision
+ :param names: List of key provision parameters"""
+
+ message(SIMPLE, "Copy samdb")
+ tdb_util.tdb_copy(newpaths.samdb, paths.samdb)
+
+ message(SIMPLE, "Update partitions filename if needed")
+ schemaldb = os.path.join(paths.private_dir, "schema.ldb")
+ configldb = os.path.join(paths.private_dir, "configuration.ldb")
+ usersldb = os.path.join(paths.private_dir, "users.ldb")
+ samldbdir = os.path.join(paths.private_dir, "sam.ldb.d")
+
+ if not os.path.isdir(samldbdir):
+ os.mkdir(samldbdir)
+ os.chmod(samldbdir, 0o700)
+ if os.path.isfile(schemaldb):
+ tdb_util.tdb_copy(schemaldb, os.path.join(samldbdir,
+ "%s.ldb"%str(names.schemadn).upper()))
+ os.remove(schemaldb)
+ if os.path.isfile(usersldb):
+ tdb_util.tdb_copy(usersldb, os.path.join(samldbdir,
+ "%s.ldb"%str(names.rootdn).upper()))
+ os.remove(usersldb)
+ if os.path.isfile(configldb):
+ tdb_util.tdb_copy(configldb, os.path.join(samldbdir,
+ "%s.ldb"%str(names.configdn).upper()))
+ os.remove(configldb)
+
+
+def update_samdb(ref_samdb, samdb, names, provisionUSNs, schema, prereloadfunc):
+ """Upgrade the SAM DB contents for all the provision partitions
+
+ :param ref_sambdb: An LDB object conntected to the sam.ldb of the reference
+ provision
+ :param samdb: An LDB object connected to the sam.ldb of the update
+ provision
+ :param names: List of key provision parameters
+ :param provisionUSNs: A dictionary with range of USN modified during provision
+ or upgradeprovision. Ranges are grouped by invocationID.
+ :param schema: A Schema object that represent the schema of the provision
+ :param prereloadfunc: A function that must be executed just before the reload
+ of the schema
+ """
+
+ message(SIMPLE, "Starting update of samdb")
+ ret = update_partition(ref_samdb, samdb, str(names.rootdn), names,
+ schema, provisionUSNs, prereloadfunc)
+ if ret:
+ message(SIMPLE, "Update of samdb finished")
+ return 1
+ else:
+ message(SIMPLE, "Update failed")
+ return 0
+
+
+def backup_provision(samdb, paths, dir, only_db):
+ """This function backup the provision files so that a rollback
+ is possible
+
+ :param paths: Paths to different objects
+ :param dir: Directory where to store the backup
+ :param only_db: Skip sysvol for users with big sysvol
+ """
+
+ # Currently we default to tdb for the backend store type
+ #
+ backend_store = "tdb"
+ res = samdb.search(base="@PARTITION",
+ scope=ldb.SCOPE_BASE,
+ attrs=["backendStore"])
+ if "backendStore" in res[0]:
+ backend_store = str(res[0]["backendStore"][0])
+
+
+ if paths.sysvol and not only_db:
+ copytree_with_xattrs(paths.sysvol, os.path.join(dir, "sysvol"))
+
+ tdb_util.tdb_copy(paths.samdb, os.path.join(dir, os.path.basename(paths.samdb)))
+ tdb_util.tdb_copy(paths.secrets, os.path.join(dir, os.path.basename(paths.secrets)))
+ tdb_util.tdb_copy(paths.idmapdb, os.path.join(dir, os.path.basename(paths.idmapdb)))
+ tdb_util.tdb_copy(paths.privilege, os.path.join(dir, os.path.basename(paths.privilege)))
+ if os.path.isfile(os.path.join(paths.private_dir,"eadb.tdb")):
+ tdb_util.tdb_copy(os.path.join(paths.private_dir,"eadb.tdb"), os.path.join(dir, "eadb.tdb"))
+ shutil.copy2(paths.smbconf, dir)
+ shutil.copy2(os.path.join(paths.private_dir,"secrets.keytab"), dir)
+
+ samldbdir = os.path.join(paths.private_dir, "sam.ldb.d")
+ if not os.path.isdir(samldbdir):
+ samldbdir = paths.private_dir
+ schemaldb = os.path.join(paths.private_dir, "schema.ldb")
+ configldb = os.path.join(paths.private_dir, "configuration.ldb")
+ usersldb = os.path.join(paths.private_dir, "users.ldb")
+ tdb_util.tdb_copy(schemaldb, os.path.join(dir, "schema.ldb"))
+ tdb_util.tdb_copy(usersldb, os.path.join(dir, "configuration.ldb"))
+ tdb_util.tdb_copy(configldb, os.path.join(dir, "users.ldb"))
+ else:
+ os.mkdir(os.path.join(dir, "sam.ldb.d"), 0o700)
+
+ for ldb_name in os.listdir(samldbdir):
+ if not ldb_name.endswith("-lock"):
+ if backend_store == "mdb" and ldb_name != "metadata.tdb":
+ mdb_util.mdb_copy(os.path.join(samldbdir, ldb_name),
+ os.path.join(dir, "sam.ldb.d", ldb_name))
+ else:
+ tdb_util.tdb_copy(os.path.join(samldbdir, ldb_name),
+ os.path.join(dir, "sam.ldb.d", ldb_name))
+
+
+def sync_calculated_attributes(samdb, names):
+ """Synchronize attributes used for constructed ones, with the
+ old constructed that were stored in the database.
+
+ This apply for instance to msds-keyversionnumber that was
+ stored and that is now constructed from replpropertymetadata.
+
+ :param samdb: An LDB object attached to the currently upgraded samdb
+ :param names: Various key parameter about current provision.
+ """
+ listAttrs = ["msDs-KeyVersionNumber"]
+ hash = search_constructed_attrs_stored(samdb, names.rootdn, listAttrs)
+ if "msDs-KeyVersionNumber" in hash:
+ increment_calculated_keyversion_number(samdb, names.rootdn,
+ hash["msDs-KeyVersionNumber"])
+
+# Synopsis for updateprovision
+# 1) get path related to provision to be update (called current)
+# 2) open current provision ldbs
+# 3) fetch the key provision parameter (domain sid, domain guid, invocationid
+# of the DC ....)
+# 4) research of lastProvisionUSN in order to get ranges of USN modified
+# by either upgradeprovision or provision
+# 5) creation of a new provision the latest version of provision script
+# (called reference)
+# 6) get reference provision paths
+# 7) open reference provision ldbs
+# 8) setup helpers data that will help the update process
+# 9) (SKIPPED) we no longer update the privilege ldb by copying the one of referecence provision to
+# the current provision, because a shutil.copy would break the transaction locks both databases are under
+# and this database has not changed between 2009 and Samba 4.0.3 in Feb 2013 (at least)
+# 10)get the oemInfo field, this field contains information about the different
+# provision that have been done
+# 11)Depending on if the --very-old-pre-alpha9 flag is set the following things are done
+# A) When alpha9 or alphaxx not specified (default)
+# The base sam.ldb file is updated by looking at the difference between
+# referrence one and the current one. Everything is copied with the
+# exception of lastProvisionUSN attributes.
+# B) Other case (it reflect that that provision was done before alpha9)
+# The base sam.ldb of the reference provision is copied over
+# the current one, if necessary ldb related to partitions are moved
+# and renamed
+# The highest used USN is fetched so that changed by upgradeprovision
+# usn can be tracked
+# 12)A Schema object is created, it will be used to provide a complete
+# schema to current provision during update (as the schema of the
+# current provision might not be complete and so won't allow some
+# object to be created)
+# 13)Proceed to full update of sam DB (see the separate paragraph about i)
+# 14)The secrets db is updated by pull all the difference from the reference
+# provision into the current provision
+# 15)As the previous step has most probably modified the password stored in
+# in secret for the current DC, a new password is generated,
+# the kvno is bumped and the entry in samdb is also updated
+# 16)For current provision older than alpha9, we must fix the SD a little bit
+# administrator to update them because SD used to be generated with the
+# system account before alpha9.
+# 17)The highest usn modified so far is searched in the database it will be
+# the upper limit for usn modified during provision.
+# This is done before potential SD recalculation because we do not want
+# SD modified during recalculation to be marked as modified during provision
+# (and so possibly remplaced at next upgradeprovision)
+# 18)Rebuilt SD if the flag indicate to do so
+# 19)Check difference between SD of reference provision and those of the
+# current provision. The check is done by getting the sddl representation
+# of the SD. Each sddl in chuncked into parts (user,group,dacl,sacl)
+# Each part is verified separetly, for dacl and sacl ACL is splited into
+# ACEs and each ACE is verified separately (so that a permutation in ACE
+# didn't raise as an error).
+# 20)The oemInfo field is updated to add information about the fact that the
+# provision has been updated by the upgradeprovision version xxx
+# (the version is the one obtained when starting samba with the --version
+# parameter)
+# 21)Check if the current provision has all the settings needed for dynamic
+# DNS update to work (that is to say the provision is newer than
+# january 2010). If not dns configuration file from reference provision
+# are copied in a sub folder and the administrator is invited to
+# do what is needed.
+# 22)If the lastProvisionUSN attribute was present it is updated to add
+# the range of usns modified by the current upgradeprovision
+
+
+# About updating the sam DB
+# The update takes place in update_partition function
+# This function read both current and reference provision and list all
+# the available DN of objects
+# If the string representation of a DN in reference provision is
+# equal to the string representation of a DN in current provision
+# (without taking care of case) then the object is flaged as being
+# present. If the object is not present in current provision the object
+# is being flaged as missing in current provision. Object present in current
+# provision but not in reference provision are ignored.
+# Once the list of objects present and missing is done, the deleted object
+# containers are created in the differents partitions (if missing)
+#
+# Then the function add_missing_entries is called
+# This function will go through the list of missing entries by calling
+# add_missing_object for the given object. If this function returns 0
+# it means that the object needs some other object in order to be created
+# The object is reappended at the end of the list to be created later
+# (and preferably after all the needed object have been created)
+# The function keeps on looping on the list of object to be created until
+# it's empty or that the number of deferred creation is equal to the number
+# of object that still needs to be created.
+
+# The function add_missing_object will first check if the object can be created.
+# That is to say that it didn't depends other not yet created objects
+# If requisit can't be fullfilled it exists with 0
+# Then it will try to create the missing entry by creating doing
+# an ldb_message_diff between the object in the reference provision and
+# an empty object.
+# This resulting object is filtered to remove all the back link attribute
+# (ie. memberOf) as they will be created by the other linked object (ie.
+# the one with the member attribute)
+# All attributes specified in the attrNotCopied array are
+# also removed it's most of the time generated attributes
+
+# After missing entries have been added the update_partition function will
+# take care of object that exist but that need some update.
+# In order to do so the function update_present is called with the list
+# of object that are present in both provision and that might need an update.
+
+# This function handle first case mismatch so that the DN in the current
+# provision have the same case as in reference provision
+
+# It will then construct an associative array consiting of attributes as
+# key and invocationid as value( if the originating invocation id is
+# different from the invocation id of the current DC the value is -1 instead).
+
+# If the range of provision modified attributes is present, the function will
+# use the replMetadataProperty update method which is the following:
+# Removing attributes that should not be updated: rIDAvailablePool, objectSid,
+# creationTime, msDs-KeyVersionNumber, oEMInformation
+# Check for each attribute if its usn is within one of the modified by
+# provision range and if its originating id is the invocation id of the
+# current DC, then validate the update from reference to current.
+# If not or if there is no replMetatdataProperty for this attribute then we
+# do not update it.
+# Otherwise (case the range of provision modified attribute is not present) it
+# use the following process:
+# All attributes that need to be added are accepted at the exeption of those
+# listed in hashOverwrittenAtt, in this case the attribute needs to have the
+# correct flags specified.
+# For attributes that need to be modified or removed, a check is performed
+# in OverwrittenAtt, if the attribute is present and the modification flag
+# (remove, delete) is one of those listed for this attribute then modification
+# is accepted. For complicated handling of attribute update, the control is passed
+# to handle_special_case
+
+
+
+if __name__ == '__main__':
+ defSDmodified = False
+
+ # From here start the big steps of the program
+ # 1) First get files paths
+ paths = get_paths(param, smbconf=smbconf)
+ # Get ldbs with the system session, it is needed for searching
+ # provision parameters
+ session = system_session()
+
+ # This variable will hold the last provision USN once if it exists.
+ minUSN = 0
+ # 2)
+ ldbs = get_ldbs(paths, creds, session, lp)
+ backupdir = tempfile.mkdtemp(dir=paths.private_dir,
+ prefix="backupprovision")
+ backup_provision(ldbs.sam, paths, backupdir, opts.db_backup_only)
+ try:
+ ldbs.startTransactions()
+
+ # 3) Guess all the needed names (variables in fact) from the current
+ # provision.
+ names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
+ paths, smbconf, lp)
+ # 4)
+ lastProvisionUSNs = get_last_provision_usn(ldbs.sam)
+ if lastProvisionUSNs is not None:
+ v = 0
+ for k in lastProvisionUSNs.keys():
+ for r in lastProvisionUSNs[k]:
+ v = v + 1
+
+ message(CHANGE,
+ "Find last provision USN, %d invocation(s) for a total of %d ranges" %
+ (len(lastProvisionUSNs.keys()), v /2 ))
+
+ if lastProvisionUSNs.get("default") is not None:
+ message(CHANGE, "Old style for usn ranges used")
+ lastProvisionUSNs[str(names.invocation)] = lastProvisionUSNs["default"]
+ del lastProvisionUSNs["default"]
+ else:
+ message(SIMPLE, "Your provision lacks provision range information")
+ if confirm("Do you want to run findprovisionusnranges to try to find them ?", False):
+ ldbs.groupedRollback()
+ minobj = 5
+ (hash_id, nb_obj) = findprovisionrange(ldbs.sam, ldb.Dn(ldbs.sam, str(names.rootdn)))
+ message(SIMPLE, "Here is a list of changes that modified more than %d objects in 1 minute." % minobj)
+ message(SIMPLE, "Usually changes made by provision and upgradeprovision are those who affect a couple"
+ " of hundred of objects or more")
+ message(SIMPLE, "Total number of objects: %d" % nb_obj)
+ message(SIMPLE, "")
+
+ print_provision_ranges(hash_id, minobj, None, str(paths.samdb), str(names.invocation))
+
+ message(SIMPLE, "Once you applied/adapted the change(s) please restart the upgradeprovision script")
+ sys.exit(0)
+
+ # Objects will be created with the admin session
+ # (not anymore system session)
+ adm_session = admin_session(lp, str(names.domainsid))
+ # So we reget handle on objects
+ # ldbs = get_ldbs(paths, creds, adm_session, lp)
+
+ if not sanitychecks(ldbs.sam, names):
+ message(SIMPLE, "Sanity checks for the upgrade have failed. "
+ "Check the messages and correct the errors "
+ "before rerunning upgradeprovision")
+ ldbs.groupedRollback()
+ sys.exit(1)
+
+ # Let's see provision parameters
+ print_provision_key_parameters(names)
+
+ # 5) With all this information let's create a fresh new provision used as
+ # reference
+ message(SIMPLE, "Creating a reference provision")
+ provisiondir = tempfile.mkdtemp(dir=paths.private_dir,
+ prefix="referenceprovision")
+ result = newprovision(names, session, smbconf, provisiondir,
+ provision_logger, base_schema="2008_R2")
+ result.report_logger(provision_logger)
+
+ # TODO
+ # 6) and 7)
+ # We need to get a list of object which SD is directly computed from
+ # defaultSecurityDescriptor.
+ # This will allow us to know which object we can rebuild the SD in case
+ # of change of the parent's SD or of the defaultSD.
+ # Get file paths of this new provision
+ newpaths = get_paths(param, targetdir=provisiondir)
+ new_ldbs = get_ldbs(newpaths, creds, session, lp)
+ new_ldbs.startTransactions()
+
+ populateNotReplicated(new_ldbs.sam, names.schemadn)
+ # 8) Populate some associative array to ease the update process
+ # List of attribute which are link and backlink
+ populate_links(new_ldbs.sam, names.schemadn)
+ # List of attribute with ASN DN synthax)
+ populate_dnsyntax(new_ldbs.sam, names.schemadn)
+ # 9) (now skipped, was copy of privileges.ldb)
+ # 10)
+ oem = getOEMInfo(ldbs.sam, str(names.rootdn))
+ # Do some modification on sam.ldb
+ ldbs.groupedCommit()
+ new_ldbs.groupedCommit()
+ deltaattr = None
+ # 11)
+ message(GUESS, oem)
+ if oem is None or hasATProvision(ldbs.sam) or not opts.very_old_pre_alpha9:
+ # 11) A
+ # Starting from alpha9 we can consider that the structure is quite ok
+ # and that we should do only dela
+ deltaattr = delta_update_basesamdb(newpaths.samdb,
+ paths.samdb,
+ creds,
+ session,
+ lp,
+ message)
+ else:
+ # 11) B
+ simple_update_basesamdb(newpaths, paths, names)
+ ldbs = get_ldbs(paths, creds, session, lp)
+ removeProvisionUSN(ldbs.sam)
+
+ ldbs.startTransactions()
+ minUSN = int(str(get_max_usn(ldbs.sam, str(names.rootdn)))) + 1
+ new_ldbs.startTransactions()
+
+ # 12)
+ schema = Schema(names.domainsid, schemadn=str(names.schemadn))
+ # We create a closure that will be invoked just before schema reload
+ def schemareloadclosure():
+ basesam = Ldb(paths.samdb, session_info=session, credentials=creds, lp=lp,
+ options=["modules:"])
+ doit = False
+ if deltaattr is not None and len(deltaattr) > 1:
+ doit = True
+ if doit:
+ deltaattr.remove("dn")
+ for att in deltaattr:
+ if att.lower() == "dn":
+ continue
+ if (deltaattr.get(att) is not None
+ and deltaattr.get(att).flags() != FLAG_MOD_ADD):
+ doit = False
+ elif deltaattr.get(att) is None:
+ doit = False
+ if doit:
+ message(CHANGE, "Applying delta to @ATTRIBUTES")
+ deltaattr.dn = ldb.Dn(basesam, "@ATTRIBUTES")
+ basesam.modify(deltaattr)
+ else:
+ message(CHANGE, "Not applying delta to @ATTRIBUTES because "
+ "there is not only add")
+ # 13)
+ if opts.full:
+ if not update_samdb(new_ldbs.sam, ldbs.sam, names, lastProvisionUSNs,
+ schema, schemareloadclosure):
+ message(SIMPLE, "Rolling back all changes. Check the cause"
+ " of the problem")
+ message(SIMPLE, "Your system is as it was before the upgrade")
+ ldbs.groupedRollback()
+ new_ldbs.groupedRollback()
+ shutil.rmtree(provisiondir)
+ sys.exit(1)
+ else:
+ # Try to reapply the change also when we do not change the sam
+ # as the delta_upgrade
+ schemareloadclosure()
+ sync_calculated_attributes(ldbs.sam, names)
+ res = ldbs.sam.search(expression="(samaccountname=dns)",
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+ if len(res) > 0:
+ message(SIMPLE, "You still have the old DNS object for managing "
+ "dynamic DNS, but you didn't supply --full so "
+ "a correct update can't be done")
+ ldbs.groupedRollback()
+ new_ldbs.groupedRollback()
+ shutil.rmtree(provisiondir)
+ sys.exit(1)
+ # 14)
+ update_secrets(new_ldbs.secrets, ldbs.secrets, message)
+ # 14bis)
+ res = ldbs.sam.search(expression="(samaccountname=dns)",
+ scope=SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+
+ if (len(res) == 1):
+ ldbs.sam.delete(res[0]["dn"])
+ res2 = ldbs.secrets.search(expression="(samaccountname=dns)",
+ scope=SCOPE_SUBTREE, attrs=["dn"])
+ update_dns_account_password(ldbs.sam, ldbs.secrets, names)
+ message(SIMPLE, "IMPORTANT!!! "
+ "If you were using Dynamic DNS before you need "
+ "to update your configuration, so that the "
+ "tkey-gssapi-credential has the following value: "
+ "DNS/%s.%s" % (names.netbiosname.lower(),
+ names.realm.lower()))
+ # 15)
+ message(SIMPLE, "Update machine account")
+ update_machine_account_password(ldbs.sam, ldbs.secrets, names)
+
+ # 16) SD should be created with admin but as some previous acl were so wrong
+ # that admin can't modify them we have first to recreate them with the good
+ # form but with system account and then give the ownership to admin ...
+ if opts.very_old_pre_alpha9:
+ message(SIMPLE, "Fixing very old provision SD")
+ rebuild_sd(ldbs.sam, names)
+
+ # We calculate the max USN before recalculating the SD because we might
+ # touch object that have been modified after a provision and we do not
+ # want that the next upgradeprovision thinks that it has a green light
+ # to modify them
+
+ # 17)
+ maxUSN = get_max_usn(ldbs.sam, str(names.rootdn))
+
+ # 18) We rebuild SD if a we have a list of DN to recalculate or if the
+ # defSDmodified is set.
+ if opts.full and (defSDmodified or len(dnToRecalculate) >0):
+ message(SIMPLE, "Some (default) security descriptors (SDs) have "
+ "changed, recalculating them")
+ ldbs.sam.set_session_info(adm_session)
+ rebuild_sd(ldbs.sam, names)
+
+ # 19)
+ # Now we are quite confident in the recalculate process of the SD, we make
+ # it optional. And we don't do it if there is DN that we must touch
+ # as we are assured that on this DNs we will have differences !
+ # Also the check must be done in a clever way as for the moment we just
+ # compare SDDL
+ if dnNotToRecalculateFound == False and (opts.debugchangesd or opts.debugall):
+ message(CHANGESD, "Checking recalculated SDs")
+ check_updated_sd(new_ldbs.sam, ldbs.sam, names)
+
+ # 20)
+ updateOEMInfo(ldbs.sam, str(names.rootdn))
+ # 21)
+ check_for_DNS(newpaths.private_dir, paths.private_dir,
+ newpaths.binddns_dir, paths.binddns_dir,
+ names.dns_backend)
+ # 22)
+ update_provision_usn(ldbs.sam, minUSN, maxUSN, names.invocation)
+ if opts.full and (names.policyid is None or names.policyid_dc is None):
+ update_policyids(names, ldbs.sam)
+
+ if opts.full:
+ try:
+ update_gpo(paths, ldbs.sam, names, lp, message)
+ except ProvisioningError as e:
+ message(ERROR, "The policy for domain controller is missing. "
+ "You should restart upgradeprovision with --full")
+
+ ldbs.groupedCommit()
+ new_ldbs.groupedCommit()
+ message(SIMPLE, "Upgrade finished!")
+ # remove reference provision now that everything is done !
+ # So we have reindexed first if need when the merged schema was reloaded
+ # (as new attributes could have quick in)
+ # But the second part of the update (when we update existing objects
+ # can also have an influence on indexing as some attribute might have their
+ # searchflag modificated
+ message(SIMPLE, "Reopening samdb to trigger reindexing if needed "
+ "after modification")
+ samdb = Ldb(paths.samdb, session_info=session, credentials=creds, lp=lp)
+ message(SIMPLE, "Reindexing finished")
+
+ shutil.rmtree(provisiondir)
+ except Exception as err:
+ message(ERROR, "A problem occurred while trying to upgrade your "
+ "provision. A full backup is located at %s" % backupdir)
+ if opts.debugall or opts.debugchange:
+ (typ, val, tb) = sys.exc_info()
+ traceback.print_exception(typ, val, tb)
+ sys.exit(1)
diff --git a/source4/scripting/bin/setup_dns.sh b/source4/scripting/bin/setup_dns.sh
new file mode 100755
index 0000000..143f2c2
--- /dev/null
+++ b/source4/scripting/bin/setup_dns.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# example script to setup DNS for a vampired domain
+
+[ $# = 3 ] || {
+ echo "Usage: setup_dns.sh HOSTNAME DOMAIN IP"
+ exit 1
+}
+
+HOSTNAME="$(echo $1 | tr '[a-z]' '[A-Z]')"
+DOMAIN="$(echo $2 | tr '[a-z]' '[A-Z]')"
+IP="$3"
+
+RSUFFIX=$(echo $DOMAIN | sed s/[\.]/,DC=/g)
+
+[ -z "$PRIVATEDIR" ] && {
+ PRIVATEDIR=$(bin/samba-tool testparm --section-name=global --parameter-name='private dir' --suppress-prompt 2>/dev/null)
+}
+
+OBJECTGUID=$(bin/ldbsearch --scope=base -H "$PRIVATEDIR/sam.ldb" -b "CN=NTDS Settings,CN=$HOSTNAME,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=$RSUFFIX" objectguid | grep ^objectGUID | cut -d: -f2)
+
+samba4kinit=kinit
+if test -x $BINDIR/samba4kinit; then
+ samba4kinit=bin/samba4kinit
+fi
+
+echo "Found objectGUID $OBJECTGUID"
+
+echo "Running kinit for $HOSTNAME\$@$DOMAIN"
+$samba4kinit -e arcfour-hmac-md5 -k -t "$PRIVATEDIR/secrets.keytab" $HOSTNAME\$@$DOMAIN || exit 1
+echo "Adding $HOSTNAME.$DOMAIN"
+scripting/bin/nsupdate-gss --noverify $HOSTNAME $DOMAIN $IP 300 || {
+ echo "Failed to add A record"
+ exit 1
+}
+echo "Adding $OBJECTGUID._msdcs.$DOMAIN => $HOSTNAME.$DOMAIN"
+scripting/bin/nsupdate-gss --realm=$DOMAIN --noverify --ntype="CNAME" $OBJECTGUID _msdcs.$DOMAIN $HOSTNAME.$DOMAIN 300 || {
+ echo "Failed to add CNAME"
+ exit 1
+}
+echo "Checking"
+rndc flush
+host $HOSTNAME.$DOMAIN
+host $OBJECTGUID._msdcs.$DOMAIN
diff --git a/source4/scripting/bin/subunitrun b/source4/scripting/bin/subunitrun
new file mode 100755
index 0000000..7bfa851
--- /dev/null
+++ b/source4/scripting/bin/subunitrun
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+
+# Simple subunit testrunner for python
+
+# NOTE: This is deprecated - Using the standard subunit runner is
+# preferred - e.g. "python -m samba.subunit.run YOURMODULE".
+#
+# This wrapper will be removed once all tests can be run
+# without it. At the moment there are various tests which still
+# get e.g. credentials passed via command-line options to this
+# script.
+
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2014
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+
+# make sure the script dies immediately when hitting control-C,
+# rather than raising KeyboardInterrupt. As we do all database
+# operations using transactions, this is safe.
+import signal
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+import optparse
+import samba
+from samba.tests.subunitrun import TestProgram, SubunitOptions
+
+import samba.getopt as options
+import samba.tests
+
+
+usage = 'subunitrun [options] <tests>'
+description = '''
+This runs a Samba python test suite. The tests are typically located in
+python/samba/tests/*.py
+
+To run the tests from one of those modules, specify the test as
+samba.tests.MODULE. For example, to run the tests in common.py:
+
+ subunitrun samba.tests.common
+
+To list the tests in that module, use:
+
+ subunitrun -l samba.tests.common
+
+NOTE: This script is deprecated in favor of "python -m subunit.run". Don't use
+it unless it can be avoided.
+'''
+
+def format_description(formatter):
+ '''hack to prevent textwrap of the description'''
+ return description
+
+parser = optparse.OptionParser(usage=usage, description=description)
+parser.format_description = format_description
+credopts = options.CredentialsOptions(parser)
+sambaopts = options.SambaOptions(parser)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(credopts)
+parser.add_option_group(sambaopts)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+
+if not getattr(opts, "listtests", False):
+ lp = sambaopts.get_loadparm()
+ samba.tests.cmdline_credentials = credopts.get_credentials(lp)
+if getattr(opts, 'load_list', None):
+ args.insert(0, "--load-list=%s" % opts.load_list)
+
+TestProgram(module=None, args=args, opts=subunitopts)
diff --git a/source4/scripting/bin/wscript_build b/source4/scripting/bin/wscript_build
new file mode 100644
index 0000000..d31afb2
--- /dev/null
+++ b/source4/scripting/bin/wscript_build
@@ -0,0 +1,14 @@
+#!/usr/bin/env python3
+
+if bld.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'):
+ for script in ['samba_dnsupdate',
+ 'samba_spnupdate',
+ 'samba_kcc',
+ 'samba_upgradeprovision',
+ 'samba_upgradedns',
+ 'gen_output.py',
+ 'samba_downgrade_db']:
+ bld.SAMBA_SCRIPT(script, pattern=script, installdir='.')
+if bld.CONFIG_SET('WITH_ADS'):
+ bld.SAMBA_SCRIPT('samba-tool', pattern='samba-tool', installdir='.')
+bld.SAMBA_SCRIPT('samba-gpupdate', pattern='samba-gpupdate', installdir='.')
diff --git a/source4/scripting/devel/addlotscontacts b/source4/scripting/devel/addlotscontacts
new file mode 100644
index 0000000..9ecd16b
--- /dev/null
+++ b/source4/scripting/devel/addlotscontacts
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) Matthieu Patou <mat@matws.net> 2010
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+__docformat__ = "restructuredText"
+
+
+import optparse
+import sys
+# Allow to run from s4 source directory (without installing samba)
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session
+from samba import param
+from samba.provision import find_provision_key_parameters
+from samba.upgradehelpers import (get_paths, get_ldbs)
+from ldb import SCOPE_BASE, Message, MessageElement, Dn, FLAG_MOD_ADD
+
+parser = optparse.OptionParser("addlotscontacts [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+
+(opts, args) = parser.parse_args()
+
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+creds = credopts.get_credentials(lp)
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+if len(args) > 0:
+ num_contacts = int(args[0])
+else:
+ num_contacts = 10000
+
+if __name__ == '__main__':
+ paths = get_paths(param, smbconf=smbconf)
+ session = system_session()
+
+ ldbs = get_ldbs(paths, creds, session, lp)
+ ldbs.startTransactions()
+
+ names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
+ paths, smbconf, lp)
+
+ contactdn = "OU=Contacts,%s" % str(names.domaindn)
+ res = ldbs.sam.search(expression="(distinguishedName=%s)" % contactdn,
+ base=str(names.domaindn),
+ scope=SCOPE_BASE)
+
+ if (len(res) == 0):
+ msg = Message()
+ msg.dn = Dn(ldbs.sam, contactdn)
+ msg["objectClass"] = MessageElement("organizationalUnit", FLAG_MOD_ADD,
+ "objectClass")
+
+ ldbs.sam.add(msg)
+
+ print("Creating %d contacts" % num_contacts)
+ count = 0
+ increment = num_contacts / 10
+ if increment > 5000:
+ increment = 5000
+
+ while (count < num_contacts):
+ msg = Message()
+ msg.dn = Dn(ldbs.sam, "CN=contact%d,%s" % (count + 1, contactdn))
+ msg["objectClass"] = MessageElement("contact", FLAG_MOD_ADD,
+ "objectClass")
+
+ if count !=0 and (count % increment) == 0:
+ print("Added contacts: %d" % count)
+
+ ldbs.sam.add(msg)
+ count += 1
+
+ ldbs.groupedCommit()
diff --git a/source4/scripting/devel/chgkrbtgtpass b/source4/scripting/devel/chgkrbtgtpass
new file mode 100644
index 0000000..2beb2e7
--- /dev/null
+++ b/source4/scripting/devel/chgkrbtgtpass
@@ -0,0 +1,60 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) Matthieu Patou <mat@matws.net> 2010
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2015
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+__docformat__ = "restructuredText"
+
+
+import optparse
+import sys
+# Allow to run from s4 source directory (without installing samba)
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session
+from samba import param
+from samba.provision import find_provision_key_parameters
+from samba.upgradehelpers import (get_paths,
+ get_ldbs,
+ update_krbtgt_account_password)
+
+parser = optparse.OptionParser("chgkrbtgtpass [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+
+opts = parser.parse_args()[0]
+
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+creds = credopts.get_credentials(lp)
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+
+paths = get_paths(param, smbconf=smbconf)
+session = system_session()
+
+ldbs = get_ldbs(paths, creds, session, lp)
+ldbs.startTransactions()
+
+update_krbtgt_account_password(ldbs.sam)
+ldbs.groupedCommit()
diff --git a/source4/scripting/devel/chgtdcpass b/source4/scripting/devel/chgtdcpass
new file mode 100755
index 0000000..8f2415c
--- /dev/null
+++ b/source4/scripting/devel/chgtdcpass
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) Matthieu Patou <mat@matws.net> 2010
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+__docformat__ = "restructuredText"
+
+
+import optparse
+import sys
+# Allow to run from s4 source directory (without installing samba)
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from samba.credentials import DONT_USE_KERBEROS
+from samba.auth import system_session
+from samba import param
+from samba.provision import find_provision_key_parameters
+from samba.upgradehelpers import (get_paths,
+ get_ldbs,
+ update_machine_account_password)
+
+parser = optparse.OptionParser("chgtdcpass [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+
+opts = parser.parse_args()[0]
+
+lp = sambaopts.get_loadparm()
+smbconf = lp.configfile
+creds = credopts.get_credentials(lp)
+creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+
+if __name__ == '__main__':
+ paths = get_paths(param, smbconf=smbconf)
+ session = system_session()
+
+ ldbs = get_ldbs(paths, creds, session, lp)
+ ldbs.startTransactions()
+
+ names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
+ paths, smbconf, lp)
+
+ update_machine_account_password(ldbs.sam, ldbs.secrets, names)
+ ldbs.groupedCommit()
diff --git a/source4/scripting/devel/config_base b/source4/scripting/devel/config_base
new file mode 100755
index 0000000..f593f2f
--- /dev/null
+++ b/source4/scripting/devel/config_base
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+
+# this is useful for running samba tools with a different prefix
+
+# for example:
+# samba-tool $(scripting/devel/config_base /tmp/testprefix) join .....
+
+import sys, os
+
+vars = {
+ "ncalrpc dir" : "${PREFIX}/var/ncalrpc",
+ "private dir" : "${PREFIX}/private",
+ "lock dir" : "${PREFIX}/var/locks",
+ "pid directory" : "${PREFIX}/var/run",
+ "winbindd socket directory" : "${PREFIX}/var/run/winbindd",
+ "ntp signd socket directory" : "${PREFIX}/var/run/ntp_signd"
+}
+
+if len(sys.argv) != 2:
+ print("Usage: config_base BASEDIRECTORY")
+ sys.exit(1)
+
+prefix = sys.argv[1]
+
+config_dir = prefix + "/etc"
+config_file = config_dir + "/smb.conf"
+
+if not os.path.isdir(config_dir):
+ os.makedirs(config_dir, mode=0o755)
+if not os.path.isfile(config_file):
+ open(config_file, mode='w').close()
+
+options = (
+ " --configfile=${PREFIX}/etc/smb.conf"
+ "".join(" --option=%s=%s" % (v.replace(" ",""), vars[v]) for v in vars)
+ ).replace("${PREFIX}", prefix)
+
+
+print(options)
diff --git a/source4/scripting/devel/crackname b/source4/scripting/devel/crackname
new file mode 100755
index 0000000..021adfa
--- /dev/null
+++ b/source4/scripting/devel/crackname
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# script to call a DRSUAPI crackname
+# this is useful for plugfest testing and replication debug
+import sys
+from optparse import OptionParser
+
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from samba.dcerpc import drsuapi, misc
+
+def do_DsBind(drs):
+ '''make a DsBind call, returning the binding handle'''
+ bind_info = drsuapi.DsBindInfoCtr()
+ bind_info.length = 28
+ bind_info.info = drsuapi.DsBindInfo28()
+ bind_info.info.supported_extensions = 0
+ (info, handle) = drs.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
+ return handle
+
+
+########### main code ###########
+if __name__ == "__main__":
+ parser = OptionParser("crackname server [options]")
+ sambaopts = options.SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+ credopts = options.CredentialsOptionsDouble(parser)
+ parser.add_option_group(credopts)
+
+ parser.add_option("", "--name", type='str',
+ default='{ED9F5546-9729-4B04-9385-3FCFE2B17BA1}', help="name to crack")
+ parser.add_option("", "--outformat", type='int',
+ default=drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ help='format desired')
+ parser.add_option("", "--informat", type='int',
+ default=drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ help='format offered')
+
+ (opts, args) = parser.parse_args()
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if len(args) != 1:
+ parser.error("You must supply a server")
+
+ if creds.is_anonymous():
+ parser.error("You must supply credentials")
+
+ server = args[0]
+
+ binding_str = "ncacn_ip_tcp:%s[seal,print]" % server
+
+ drs = drsuapi.drsuapi(binding_str, lp, creds)
+ drs_handle = do_DsBind(drs)
+ print("DRS Handle: %s" % drs_handle)
+
+ req = drsuapi.DsNameRequest1()
+ names = drsuapi.DsNameString()
+ names.str = opts.name
+
+ req.codepage = 1252
+ req.language = 1033
+ req.format_flags = 0
+ req.format_offered = opts.informat
+ req.format_desired = opts.outformat
+ req.count = 1
+ req.names = [names]
+
+ (result, ctr) = drs.DsCrackNames(drs_handle, 1, req)
+ print("# of result = %d" %ctr.count)
+ if ctr.count:
+ print("status = %d" % ctr.array[0].status)
+ print("result name = %s" % ctr.array[0].result_name)
+ print("domain = %s" % ctr.array[0].dns_domain_name)
diff --git a/source4/scripting/devel/demodirsync.py b/source4/scripting/devel/demodirsync.py
new file mode 100755
index 0000000..e21dbbc
--- /dev/null
+++ b/source4/scripting/devel/demodirsync.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+
+import optparse
+import sys
+import base64
+
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from samba.dcerpc import drsblobs, misc
+from samba.ndr import ndr_pack, ndr_unpack
+from samba import Ldb
+
+parser = optparse.OptionParser("demodirsync [options]")
+sambaopts = options.SambaOptions(parser)
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+
+parser.add_option("-b", type="string", metavar="BASE",
+ help="set base DN for the search")
+parser.add_option("--host", type="string", metavar="HOST",
+ help="Ip of the host")
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+opts = parser.parse_args()[0]
+
+if opts.host is None:
+ print("Usage: demodirsync.py --host HOST [-b BASE]")
+ sys.exit(1)
+
+def printdirsync(ctl):
+ arr = ctl.split(':')
+ if arr[0] == 'dirsync':
+ print("Need to continue: %s" % arr[1])
+ cookie = ndr_unpack(drsblobs.ldapControlDirSyncCookie, base64.b64decode(arr[3]))
+ print("DC's NTDS guid: %s " % cookie.blob.guid1)
+ print("highest usn %s" % cookie.blob.highwatermark.highest_usn)
+ print("tmp higest usn %s" % cookie.blob.highwatermark.tmp_highest_usn)
+ print("reserved usn %s" % cookie.blob.highwatermark.reserved_usn)
+ if cookie.blob.extra_length > 0:
+ print("highest usn in extra %s" % cookie.blob.extra.ctr.cursors[0].highest_usn)
+ return cookie
+
+
+remote_ldb = Ldb("ldap://" + opts.host + ":389", credentials=creds, lp=lp)
+tab = []
+if opts.b:
+ base = opts.b
+else:
+ base = None
+
+guid = None
+(msgs, ctrls) = remote_ldb.search(expression="(samaccountname=administrator)", base=base, attrs=["objectClass"], controls=["dirsync:1:1:50"])
+if (len(ctrls)):
+ for ctl in ctrls:
+ arr = ctl.split(':')
+ if arr[0] == 'dirsync':
+ cookie = ndr_unpack(drsblobs.ldapControlDirSyncCookie, base64.b64decode(arr[3]))
+ guid = cookie.blob.guid1
+if not guid:
+ print("No dirsync control ... strange")
+ sys.exit(1)
+
+print("")
+print("Getting first guest without any cookie")
+(msgs, ctrls) = remote_ldb.searchex(expression="(samaccountname=guest)", base=base, attrs=["objectClass"], controls=["dirsync:1:1:50"])
+cookie = None
+if (len(ctrls)):
+ for ctl in ctrls:
+ cookie = printdirsync(ctl)
+ print("Returned %d entries" % len(msgs))
+
+savedcookie = cookie
+
+print("")
+print("Getting allusers with cookie")
+controls = ["dirsync:1:1:50:%s" % base64.b64encode(ndr_pack(cookie)).decode('utf8')]
+(msgs, ctrls) = remote_ldb.searchex(expression="(samaccountname=*)", base=base, attrs=["objectClass"], controls=controls)
+if (len(ctrls)):
+ for ctl in ctrls:
+ printdirsync(ctl)
+ print("Returned %d entries" % len(msgs))
+
+cookie = savedcookie
+cookie.blob.guid1 = misc.GUID("128a99bf-e2df-4832-ac0a-1fb625e530db")
+if cookie.blob.extra_length > 0:
+ cookie.blob.extra.ctr.cursors[0].source_dsa_invocation_id = misc.GUID("128a99bf-e2df-4832-ac0a-1fb625e530db")
+
+print("")
+print("Getting all the entries")
+controls = ["dirsync:1:1:50:%s" % base64.b64encode(ndr_pack(cookie)).decode('utf8')]
+(msgs, ctrls) = remote_ldb.searchex(expression="(objectclass=*)", base=base, controls=controls)
+cont = 0
+if (len(ctrls)):
+ for ctl in ctrls:
+ cookie = printdirsync(ctl)
+ if cookie is not None:
+ cont = (ctl.split(':'))[1]
+ print("Returned %d entries" % len(msgs))
+
+usn = cookie.blob.highwatermark.tmp_highest_usn
+if cookie.blob.extra_length > 0:
+ bigusn = cookie.blob.extra.ctr.cursors[0].highest_usn
+else:
+ bigusn = usn + 1000
+while (cont == "1"):
+ print("")
+ controls = ["dirsync:1:1:50:%s" % base64.b64encode(ndr_pack(cookie)).decode('utf8')]
+ (msgs, ctrls) = remote_ldb.searchex(expression="(objectclass=*)", base=base, controls=controls)
+ if (len(ctrls)):
+ for ctl in ctrls:
+ cookie = printdirsync(ctl)
+ if cookie is not None:
+ cont = (ctl.split(':'))[1]
+ print("Returned %d entries" % len(msgs))
+
+print("")
+print("Getting with cookie but usn changed to %d we should use the one in extra" % (bigusn - 1))
+cookie.blob.highwatermark.highest_usn = 0
+cookie.blob.highwatermark.tmp_highest_usn = usn - 2
+if cookie.blob.extra_length > 0:
+ print("here")
+ cookie.blob.extra.ctr.cursors[0].highest_usn = bigusn - 1
+controls = ["dirsync:1:1:50:%s" % base64.b64encode(ndr_pack(cookie)).decode('utf8')]
+(msgs, ctrls) = remote_ldb.searchex(expression="(objectclass=*)", base=base, controls=controls)
+if (len(ctrls)):
+ for ctl in ctrls:
+ cookie = printdirsync(ctl)
+ print("Returned %d entries" % len(msgs))
+
+print("")
+print("Getting with cookie but usn %d changed and extra/cursor GUID too" % (usn - 2))
+print(" so that it's (tmp)highest_usn that drives the limit")
+cookie.blob.highwatermark.highest_usn = 0
+cookie.blob.highwatermark.tmp_highest_usn = usn - 2
+if cookie.blob.extra_length > 0:
+ cookie.blob.extra.ctr.cursors[0].source_dsa_invocation_id = misc.GUID("128a99bf-e2df-4832-ac0a-1fb625e530db")
+ cookie.blob.extra.ctr.cursors[0].highest_usn = bigusn - 1
+controls = ["dirsync:1:1:50:%s" % base64.b64encode(ndr_pack(cookie)).decode('utf8')]
+(msgs, ctrls) = remote_ldb.searchex(expression="(objectclass=*)", base=base, controls=controls)
+if (len(ctrls)):
+ for ctl in ctrls:
+ cookie = printdirsync(ctl)
+ print("Returned %d entries" % len(msgs))
+
+print("")
+print("Getting with cookie but usn changed to %d" % (usn - 2))
+cookie.blob.highwatermark.highest_usn = 0
+cookie.blob.highwatermark.tmp_highest_usn = (usn - 2)
+if cookie.blob.extra_length > 0:
+ cookie.blob.extra.ctr.cursors[0].highest_usn = (usn - 2)
+controls = ["dirsync:1:1:50:%s" % base64.b64encode(ndr_pack(cookie)).decode('utf8')]
+(msgs, ctrls) = remote_ldb.searchex(expression="(objectclass=*)", base=base, controls=controls)
+if (len(ctrls)):
+ for ctl in ctrls:
+ cookie = printdirsync(ctl)
+ print("Returned %d entries" % len(msgs))
diff --git a/source4/scripting/devel/drs/fsmo.ldif.template b/source4/scripting/devel/drs/fsmo.ldif.template
new file mode 100644
index 0000000..d5b373a
--- /dev/null
+++ b/source4/scripting/devel/drs/fsmo.ldif.template
@@ -0,0 +1,75 @@
+dn: CN=RID Manager$,CN=System,BASEDN
+changetype: modify
+replace: fSMORoleOwner
+fSMORoleOwner: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,C
+ N=Sites,CN=Configuration,BASEDN
+-
+
+dn: BASEDN
+changetype: modify
+replace: fSMORoleOwner
+fSMORoleOwner: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,C
+ N=Sites,CN=Configuration,BASEDN
+-
+
+dn: CN=Infrastructure,BASEDN
+changetype: modify
+replace: fSMORoleOwner
+fSMORoleOwner: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,C
+ N=Sites,CN=Configuration,BASEDN
+-
+
+dn: CN=Partitions,CN=Configuration,BASEDN
+changetype: modify
+replace: fSMORoleOwner
+fSMORoleOwner: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,BASEDN
+-
+
+dn: CN=Schema,CN=Configuration,BASEDN
+changetype: modify
+replace: fSMORoleOwner
+fSMORoleOwner: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,BASEDN
+-
+
+dn: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,BASEDN
+changetype: modify
+replace: options
+options: 1
+-
+
+dn: CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,BASEDN
+changetype: modify
+replace: dNSHostName
+dNSHostName: MACHINE.DNSDOMAIN
+-
+
+dn: CN=NTDS Site Settings,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,BASEDN
+changetype: modify
+replace: interSiteTopologyGenerator
+interSiteTopologyGenerator: CN=NTDS Settings,CN=MACHINE,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,BASEDN
+-
+
+dn: CN=MACHINE,OU=Domain Controllers,BASEDN
+changetype: modify
+replace: servicePrincipalName
+servicePrincipalName: GC/MACHINE.DNSDOMAIN/DNSDOMAIN
+servicePrincipalName: HOST/MACHINE/NETBIOSDOMAIN
+servicePrincipalName: ldap/MACHINE/NETBIOSDOMAIN
+servicePrincipalName: ldap/MACHINE.DNSDOMAIN/ForestDnsZones.DNSDOMAIN
+servicePrincipalName: ldap/MACHINE.DNSDOMAIN/DomainDnsZones.DNSDOMAIN
+servicePrincipalName: DNS/MACHINE.DNSDOMAIN
+servicePrincipalName: RestrictedKrbHost/MACHINE.DNSDOMAIN
+servicePrincipalName: RestrictedKrbHost/MACHINE
+servicePrincipalName: HOST/MACHINE.DNSDOMAIN/NETBIOSDOMAIN
+servicePrincipalName: HOST/MACHINE
+servicePrincipalName: HOST/MACHINE.DNSDOMAIN
+servicePrincipalName: HOST/MACHINE.DNSDOMAIN/DNSDOMAIN
+servicePrincipalName: ldap/MACHINE.DNSDOMAIN/NETBIOSDOMAIN
+servicePrincipalName: ldap/MACHINE
+servicePrincipalName: ldap/MACHINE.DNSDOMAIN
+servicePrincipalName: ldap/MACHINE.DNSDOMAIN/DNSDOMAIN
+servicePrincipalName: E3514235-4B06-11D1-AB04-00C04FC2DCD2/NTDSGUID/DNSDOMAIN
+servicePrincipalName: ldap/NTDSGUID._msdcs.DNSDOMAIN
+servicePrincipalName: Dfsr-12F9A27C-BF97-4787-9364-D31B6C55EB04/MACHINE.DNSDOMAIN
+servicePrincipalName: NtFrs-88f5d2bd-b646-11d2-a6d3-00c04fc9b232/MACHINE.DNSDOMAIN
+-
diff --git a/source4/scripting/devel/drs/named.conf.ad.template b/source4/scripting/devel/drs/named.conf.ad.template
new file mode 100644
index 0000000..071c98c
--- /dev/null
+++ b/source4/scripting/devel/drs/named.conf.ad.template
@@ -0,0 +1,6 @@
+zone "DNSDOMAIN" IN {
+ type forward;
+ forwarders {
+ SERVERIP;
+ };
+};
diff --git a/source4/scripting/devel/drs/revampire_ad.sh b/source4/scripting/devel/drs/revampire_ad.sh
new file mode 100755
index 0000000..cd3164c
--- /dev/null
+++ b/source4/scripting/devel/drs/revampire_ad.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -x
+
+. $(dirname $0)/vars
+
+$(dirname $0)/vampire_ad.sh || exit 1
+
+ntds_guid=$(sudo bin/ldbsearch -H $PREFIX/private/sam.ldb -b "CN=NTDS Settings,CN=$machine,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,$dn" objectGUID | grep ^objectGUID | awk '{print $2}')
+
+cp $PREFIX/private/$DNSDOMAIN.zone{.template,}
+sed -i "s/NTDSGUID/$ntds_guid/g" $PREFIX/private/$DNSDOMAIN.zone
+cp $PREFIX/private/named.conf{.local,}
+sudo rndc reconfig
+fsmotmp=$(mktemp fsmo.ldif.XXXXXXXXX)
+cp $(dirname $0)/fsmo.ldif.template $fsmotmp
+sed -i "s/NTDSGUID/$ntds_guid/g" $fsmotmp
+sed -i "s/MACHINE/$machine/g" $fsmotmp
+sed -i "s/DNSDOMAIN/$DNSDOMAIN/g" $fsmotmp
+sed -i "s/BASEDN/$dn/g" $fsmotmp
+sed -i "s/NETBIOSDOMAIN/$workgroup/g" $fsmotmp
+sudo bin/ldbmodify -H $PREFIX/private/sam.ldb $fsmotmp
+rm $fsmotmp
diff --git a/source4/scripting/devel/drs/unvampire_ad.sh b/source4/scripting/devel/drs/unvampire_ad.sh
new file mode 100755
index 0000000..c005374
--- /dev/null
+++ b/source4/scripting/devel/drs/unvampire_ad.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -x
+
+. $(dirname $0)/vars
+
+if [ -z "$site" ]; then
+ site="Default-First-Site-Name"
+fi
+
+bin/ldbdel -r -H ldap://$server.$DNSDOMAIN -U$workgroup/administrator%$pass "CN=$machine,CN=Computers,$dn"
+bin/ldbdel -r -H ldap://$server.$DNSDOMAIN -U$workgroup/administrator%$pass "CN=$machine,OU=Domain Controllers,$dn"
+bin/ldbdel -r -H ldap://$server.$DNSDOMAIN -U$workgroup/administrator%$pass "CN=$machine,CN=Servers,CN=$site,CN=Sites,CN=Configuration,$dn"
+rm -f $PREFIX/private/*.ldb
diff --git a/source4/scripting/devel/drs/vampire_ad.sh b/source4/scripting/devel/drs/vampire_ad.sh
new file mode 100755
index 0000000..f3cdc3c
--- /dev/null
+++ b/source4/scripting/devel/drs/vampire_ad.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+set -x
+
+. $(dirname $0)/vars
+
+namedtmp=$(mktemp named.conf.ad.XXXXXXXXX)
+cp $(dirname $0)/named.conf.ad.template $namedtmp
+sed -i "s/DNSDOMAIN/$DNSDOMAIN/g" $namedtmp
+sed -i "s/SERVERIP/$server_ip/g" $namedtmp
+chmod a+r $namedtmp
+mv -f $namedtmp $PREFIX/private/named.conf
+sudo rndc reconfig
+$(dirname $0)/unvampire_ad.sh
+
+cat <<EOF >nsupdate.txt
+update delete $DNSDOMAIN A $machine_ip
+show
+send
+EOF
+echo "$pass" | kinit administrator
+nsupdate -g nsupdate.txt
+
+REALM="$(echo $DNSDOMAIN | tr '[a-z]' '[A-Z]')"
+
+sudo $GDB bin/samba-tool domain join $DNSDOMAIN DC -Uadministrator%$pass -s $PREFIX/etc/smb.conf --option=realm=$REALM --option="ads:dc function level=4" --option="ads:min function level=0" -d2 "$@" || exit 1
+# PRIVATEDIR=$PREFIX/private sudo -E scripting/bin/setup_dns.sh $machine $DNSDOMAIN $machine_ip || exit 1
+#sudo rndc flush
diff --git a/source4/scripting/devel/drs/vars b/source4/scripting/devel/drs/vars
new file mode 100644
index 0000000..b69b9f9
--- /dev/null
+++ b/source4/scripting/devel/drs/vars
@@ -0,0 +1,12 @@
+DNSDOMAIN=ad.samba.example.com
+PREFIX="/data/samba/samba4/prefix.ad"
+export PYTHONPATH=$PYTHONPATH:$PREFIX/lib/python2.6/site-packages
+pass="penguin"
+machine="ruth"
+machine_ip="192.168.122.1"
+workgroup=adruth
+dn="DC=ad,DC=samba,DC=example,DC=com"
+server=win2008-1
+server_ip=192.168.122.53
+site="Default-First-Site-Name"
+
diff --git a/source4/scripting/devel/enumprivs b/source4/scripting/devel/enumprivs
new file mode 100755
index 0000000..389f7d0
--- /dev/null
+++ b/source4/scripting/devel/enumprivs
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+
+# script to enumerate LSA privileges on a server
+
+import sys
+from optparse import OptionParser
+
+sys.path.insert(0, "bin/python")
+
+import samba
+import samba.getopt as options
+from samba.dcerpc import lsa, security
+
+def get_display_name(lsaconn, pol_handle, name):
+ '''get the display name for a privilege'''
+ string = lsa.String()
+ string.string = name
+
+ (disp_names, ret_lang) = lsaconn.LookupPrivDisplayName(pol_handle, string, 0x409, 0)
+ return disp_names.string
+
+
+
+
+########### main code ###########
+if __name__ == "__main__":
+ parser = OptionParser("enumprivs [options] server")
+ sambaopts = options.SambaOptions(parser)
+ credopts = options.CredentialsOptionsDouble(parser)
+ parser.add_option_group(credopts)
+
+ (opts, args) = parser.parse_args()
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if len(args) != 1:
+ parser.error("You must supply a server")
+
+ if not creds.authentication_requested():
+ parser.error("You must supply credentials")
+
+ server = args[0]
+
+ binding_str = "ncacn_np:%s[print]" % server
+
+ lsaconn = lsa.lsarpc(binding_str, lp, creds)
+
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ pol_handle = lsaconn.OpenPolicy2(''.decode('utf-8'),
+ objectAttr, security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ (handle, privs) = lsaconn.EnumPrivs(pol_handle, 0, 100)
+ for p in privs.privs:
+ disp_name = get_display_name(lsaconn, pol_handle, p.name.string)
+ print("0x%08x %31s \"%s\"" % (p.luid.low, p.name.string, disp_name))
diff --git a/source4/scripting/devel/getncchanges b/source4/scripting/devel/getncchanges
new file mode 100755
index 0000000..a1a4d14
--- /dev/null
+++ b/source4/scripting/devel/getncchanges
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+
+# script to call a DRS GetNCChanges from the command line
+# this is useful for plugfest testing
+import sys
+from optparse import OptionParser
+
+sys.path.insert(0, "bin/python")
+
+import samba, ldb
+import samba.getopt as options
+from samba.dcerpc import drsuapi, misc
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.ndr import ndr_unpack
+from samba.drs_utils import drs_get_rodc_partial_attribute_set, drs_DsBind
+
+
+########### main code ###########
+if __name__ == "__main__":
+ parser = OptionParser("getncchanges [options] server")
+ sambaopts = options.SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+ credopts = options.CredentialsOptionsDouble(parser)
+ parser.add_option_group(credopts)
+
+ parser.add_option("", "--dn", dest="dn", help="DN to replicate",)
+ parser.add_option("", "--exop", dest="exop", help="extended operation",)
+ parser.add_option("", "--pas", dest="use_pas", action='store_true', default=False,
+ help="send partial attribute set (for RODC)")
+ parser.add_option("", "--nb-iter", type='int', help="Number of getncchange iterations")
+ parser.add_option("", "--dest-dsa", type='str', help="destination DSA GUID")
+ parser.add_option("", "--rodc", action='store_true', default=False,
+ help='use RODC replica flags')
+ parser.add_option("", "--partial-rw", action='store_true', default=False,
+ help='use RW partial replica flags, not be confused with --pas')
+ parser.add_option("", "--replica-flags", type='int',
+ default=drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED,
+ help='replica flags')
+
+ (opts, args) = parser.parse_args()
+ if opts.rodc:
+ opts.replica_flags = drsuapi.DRSUAPI_DRS_INIT_SYNC |\
+ drsuapi.DRSUAPI_DRS_PER_SYNC |\
+ drsuapi.DRSUAPI_DRS_GET_ANC |\
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |\
+ drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING |\
+ drsuapi.DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP
+
+ if opts.partial_rw:
+ opts.replica_flags = drsuapi.DRSUAPI_DRS_INIT_SYNC |\
+ drsuapi.DRSUAPI_DRS_PER_SYNC |\
+ drsuapi.DRSUAPI_DRS_GET_ANC |\
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if len(args) != 1:
+ parser.error("You must supply a server")
+
+ if creds.is_anonymous():
+ parser.error("You must supply credentials")
+
+ if opts.partial_rw and opts.rodc:
+ parser.error("Can't specify --partial-rw and --rodc")
+
+ server = args[0]
+
+ binding_str = "ncacn_ip_tcp:%s[seal,print]" % server
+
+ drs = drsuapi.drsuapi(binding_str, lp, creds)
+ drs_handle, supported_extensions = drs_DsBind(drs)
+ print("DRS Handle: %s" % drs_handle)
+
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ if opts.use_pas:
+ local_samdb = SamDB(url=None, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ if opts.dn is None:
+ opts.dn = str(samdb.get_default_basedn())
+
+ if opts.exop is None:
+ exop = drsuapi.DRSUAPI_EXOP_NONE
+ else:
+ exop = int(opts.exop)
+
+ dest_dsa = opts.dest_dsa
+ if not dest_dsa:
+ print("no dest_dsa specified trying to figure out from ldap")
+ msgs = samdb.search(controls=["search_options:1:2"],
+ expression='(objectclass=ntdsdsa)')
+ if len(msgs) == 1:
+ dest_dsa = str(ndr_unpack(misc.GUID, msgs[0]["invocationId"][0]))
+ print("Found this dsa: %s" % dest_dsa)
+ else:
+ # TODO fixme
+ pass
+ if not dest_dsa:
+ print("Unable to find the dest_dsa automatically please specify it")
+ import sys
+ sys.exit(1)
+
+ null_guid = misc.GUID()
+ req8.destination_dsa_guid = misc.GUID(dest_dsa)
+ req8.source_dsa_invocation_id = misc.GUID(samdb.get_invocation_id())
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = opts.dn.decode("utf-8")
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = opts.replica_flags
+ req8.max_object_count = 402
+ req8.max_ndr_size = 402116
+ req8.extended_op = exop
+ req8.fsmo_info = 0
+ if opts.use_pas:
+ req8.partial_attribute_set = drs_get_rodc_partial_attribute_set(local_samdb)
+ else:
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ nb_iter = 0
+ while True:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ nb_iter += 1
+ if ctr.more_data == 0 or opts.nb_iter == nb_iter:
+ break
+ req8.highwatermark = ctr.new_highwatermark
diff --git a/source4/scripting/devel/nmfind b/source4/scripting/devel/nmfind
new file mode 100755
index 0000000..865c0d7
--- /dev/null
+++ b/source4/scripting/devel/nmfind
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# find object files containing a symbol
+# for example:
+# nmfind foo_function $(find bin/default -name '*.o')
+
+TARGET=$1
+shift
+for f in $*; do
+ if nm $f 2>&1 | grep $TARGET >/dev/null; then
+ echo [$f]
+ nm $f | grep $TARGET
+ echo
+ fi
+done
diff --git a/source4/scripting/devel/pfm_verify.py b/source4/scripting/devel/pfm_verify.py
new file mode 100755
index 0000000..f29c1e5
--- /dev/null
+++ b/source4/scripting/devel/pfm_verify.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# script to verify cached prefixMap on remote
+# server against the prefixMap stored in Schema NC
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import sys
+from optparse import OptionParser
+
+sys.path.insert(0, "bin/python")
+
+import samba
+import samba.getopt as options
+from ldb import SCOPE_BASE, SCOPE_SUBTREE
+from samba.dcerpc import drsuapi, misc, drsblobs
+from samba.drs_utils import drs_DsBind
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.ndr import ndr_pack, ndr_unpack
+
+
+def _samdb_fetch_pfm(samdb):
+ """Fetch prefixMap stored in SamDB using LDB connection"""
+ res = samdb.search(base=samdb.get_schema_basedn(), expression="", scope=SCOPE_BASE, attrs=["*"])
+ assert len(res) == 1
+ pfm = ndr_unpack(drsblobs.prefixMapBlob,
+ str(res[0]['prefixMap']))
+
+ pfm_schi = _samdb_fetch_schi(samdb)
+
+ return (pfm.ctr, pfm_schi)
+
+
+def _samdb_fetch_schi(samdb):
+ """Fetch schemaInfo stored in SamDB using LDB connection"""
+ res = samdb.search(base=samdb.get_schema_basedn(), expression="", scope=SCOPE_BASE, attrs=["*"])
+ assert len(res) == 1
+ if 'schemaInfo' in res[0]:
+ pfm_schi = ndr_unpack(drsblobs.schemaInfoBlob,
+ str(res[0]['schemaInfo']))
+ else:
+ pfm_schi = drsblobs.schemaInfoBlob()
+ pfm_schi.marker = 0xFF
+ return pfm_schi
+
+
+def _drs_fetch_pfm(server, samdb, creds, lp):
+ """Fetch prefixMap using DRS interface"""
+ binding_str = "ncacn_ip_tcp:%s[print,seal]" % server
+
+ drs = drsuapi.drsuapi(binding_str, lp, creds)
+ (drs_handle, supported_extensions) = drs_DsBind(drs)
+ print("DRS Handle: %s" % drs_handle)
+
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ dest_dsa = misc.GUID("9c637462-5b8c-4467-aef2-bdb1f57bc4ef")
+ replica_flags = 0
+
+ req8.destination_dsa_guid = dest_dsa
+ req8.source_dsa_invocation_id = misc.GUID(samdb.get_invocation_id())
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = samdb.get_schema_basedn()
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = replica_flags
+ req8.max_object_count = 0
+ req8.max_ndr_size = 402116
+ req8.extended_op = 0
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+ # check for schemaInfo element
+ pfm_it = pfm.mappings[-1]
+ assert pfm_it.id_prefix == 0
+ assert pfm_it.oid.length == 21
+ s = "".join(chr(x) for x in pfm_it.oid.binary_oid)
+ pfm_schi = ndr_unpack(drsblobs.schemaInfoBlob, s)
+ assert pfm_schi.marker == 0xFF
+ # remove schemaInfo element
+ pfm.num_mappings -= 1
+ return (pfm, pfm_schi)
+
+
+def _pfm_verify(drs_pfm, ldb_pfm):
+ errors = []
+ if drs_pfm.num_mappings != ldb_pfm.num_mappings:
+ errors.append("Different count of prefixes: drs = %d, ldb = %d"
+ % (drs_pfm.num_mappings, ldb_pfm.num_mappings))
+ count = min(drs_pfm.num_mappings, ldb_pfm.num_mappings)
+ for i in range(0, count):
+ it_err = []
+ drs_it = drs_pfm.mappings[i]
+ ldb_it = ldb_pfm.mappings[i]
+ if drs_it.id_prefix != ldb_it.id_prefix:
+ it_err.append("id_prefix")
+ if drs_it.oid.length != ldb_it.oid.length:
+ it_err.append("oid.length")
+ if drs_it.oid.binary_oid != ldb_it.oid.binary_oid:
+ it_err.append("oid.binary_oid")
+ if len(it_err):
+ errors.append("[%2d] differences in (%s)" % (i, it_err))
+ return errors
+
+
+def _pfm_schi_verify(drs_schi, ldb_schi):
+ errors = []
+ print(drs_schi.revision)
+ print(drs_schi.invocation_id)
+ if drs_schi.marker != ldb_schi.marker:
+ errors.append("Different marker in schemaInfo: drs = %d, ldb = %d"
+ % (drs_schi.marker, ldb_schi.marker))
+ if drs_schi.revision != ldb_schi.revision:
+ errors.append("Different revision in schemaInfo: drs = %d, ldb = %d"
+ % (drs_schi.revision, ldb_schi.revision))
+ if drs_schi.invocation_id != ldb_schi.invocation_id:
+ errors.append("Different invocation_id in schemaInfo: drs = %s, ldb = %s"
+ % (drs_schi.invocation_id, ldb_schi.invocation_id))
+ return errors
+
+
+########### main code ###########
+if __name__ == "__main__":
+ # command line parsing
+ parser = OptionParser("pfm_verify.py [options] server")
+ sambaopts = options.SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+ credopts = options.CredentialsOptionsDouble(parser)
+ parser.add_option_group(credopts)
+
+ (opts, args) = parser.parse_args()
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if len(args) != 1:
+ if "DC_SERVER" not in os.environ.keys():
+ parser.error("You must supply a server")
+ args.append(os.environ["DC_SERVER"])
+
+ if creds.is_anonymous():
+ parser.error("You must supply credentials")
+
+ server = args[0]
+
+ samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(lp),
+ credentials=creds, lp=lp)
+
+ exit_code = 0
+ (drs_pfm, drs_schi) = _drs_fetch_pfm(server, samdb, creds, lp)
+ (ldb_pfm, ldb_schi) = _samdb_fetch_pfm(samdb)
+ # verify prefixMaps
+ errors = _pfm_verify(drs_pfm, ldb_pfm)
+ if len(errors):
+ print("prefixMap verification errors:")
+ print("%s" % errors)
+ exit_code = 1
+ # verify schemaInfos
+ errors = _pfm_schi_verify(drs_schi, ldb_schi)
+ if len(errors):
+ print("schemaInfo verification errors:")
+ print("%s" % errors)
+ exit_code = 2
+
+ if exit_code != 0:
+ sys.exit(exit_code)
diff --git a/source4/scripting/devel/rebuild_zone.sh b/source4/scripting/devel/rebuild_zone.sh
new file mode 100755
index 0000000..94d1f9e
--- /dev/null
+++ b/source4/scripting/devel/rebuild_zone.sh
@@ -0,0 +1,109 @@
+#!/bin/sh
+# rebuild a zone file, adding all DCs
+
+[ $# -eq 2 ] || {
+ echo "rebuild_zone.sh <sam.ldb> <zonefile>"
+ exit 1
+}
+
+LDB="$1"
+ZFILE="$2"
+
+dnshostname=$(bin/ldbsearch -H $LDB --scope=base -b '' dnsHostname | grep ^dns | cut -d' ' -f2)
+host=$(echo $dnshostname | cut -d. -f1)
+realm=$(echo $dnshostname | cut -d. -f2-)
+GUIDs=$(bin/ldbsearch -H $LDB objectclass=ntdsdsa objectguid --cross-ncs | grep ^objectGUID | cut -d' ' -f2)
+DOMAINGUID=$(bin/ldbsearch -H $LDB --scope=base objectguid | grep ^objectGUID | cut -d' ' -f2)
+
+dcname()
+{
+ GUID=$1
+ echo $(bin/ldbsearch -H $LDB objectguid=$GUID dn --cross-ncs | grep CN=NTDS.Settings | cut -d, -f2 | cut -d= -f2)
+}
+
+getip()
+{
+ NAME=$1
+ ret=$(nmblookup $NAME | egrep '^[0-9]' | head -1 | cut -d' ' -f1)
+ test -n "$ret" || {
+ echo "Unable to find IP for $NAME. Using XX.XX.XX.XX. Please edit" 1>&2
+ echo "XX.XX.XX.XX"
+ }
+ echo $ret
+}
+
+echo "Generating header for host $host in realm $realm"
+cat <<EOF >$ZFILE
+; -*- zone -*-
+; generated by rebuild_zone.sh
+\$ORIGIN $realm.
+\$TTL 1W
+@ IN SOA @ hostmaster (
+ $(date +%Y%m%d%H) ; serial
+ 2D ; refresh
+ 4H ; retry
+ 6W ; expiry
+ 1W ) ; minimum
+ IN NS $host
+
+EOF
+
+for GUID in $GUIDs; do
+ dc=$(dcname $GUID)
+ echo "Generating IP for DC $dc"
+ ip=$(getip $dc)
+ test -n "$ip" || exit 1
+ echo " IN A $ip" >>$ZFILE
+done
+
+echo "; IP Addresses" >>$ZFILE
+for GUID in $GUIDs; do
+ dc=$(dcname $GUID)
+ ip=$(getip $dc)
+ test -n "$ip" || exit 1
+ echo "$dc IN A $ip" >>$ZFILE
+done
+
+for GUID in $GUIDs; do
+ dc=$(dcname $GUID)
+ ip=$(getip $dc)
+ test -n "$ip" || exit 1
+ echo "Generating zone body for DC $dc with IP $ip"
+ cat <<EOF >>$ZFILE
+;
+; Entries for $dc
+gc._msdcs IN A $ip
+$GUID._msdcs IN CNAME $dc
+_gc._tcp IN SRV 0 100 3268 $dc
+_gc._tcp.Default-First-Site-Name._sites IN SRV 0 100 3268 $dc
+_ldap._tcp.gc._msdcs IN SRV 0 100 389 $dc
+_ldap._tcp.Default-First-Site-Name._sites.gc._msdcs IN SRV 0 100 389 $dc
+_ldap._tcp IN SRV 0 100 389 $dc
+_ldap._tcp.dc._msdcs IN SRV 0 100 389 $dc
+_ldap._tcp.pdc._msdcs IN SRV 0 100 389 $dc
+_ldap._tcp.$DOMAINGUID.domains._msdcs IN SRV 0 100 389 $dc
+_ldap._tcp.Default-First-Site-Name._sites IN SRV 0 100 389 $dc
+_ldap._tcp.Default-First-Site-Name._sites.dc._msdcs IN SRV 0 100 389 $dc
+_kerberos._tcp IN SRV 0 100 88 $dc
+_kerberos._tcp.dc._msdcs IN SRV 0 100 88 $dc
+_kerberos._tcp.Default-First-Site-Name._sites IN SRV 0 100 88 $dc
+_kerberos._tcp.Default-First-Site-Name._sites.dc._msdcs IN SRV 0 100 88 $dc
+_kerberos._udp IN SRV 0 100 88 $dc
+_kerberos-master._tcp IN SRV 0 100 88 $dc
+_kerberos-master._udp IN SRV 0 100 88 $dc
+_kpasswd._tcp IN SRV 0 100 464 $dc
+_kpasswd._udp IN SRV 0 100 464 $dc
+EOF
+done
+
+cat <<EOF >>$ZFILE
+
+; kerberos hack
+_kerberos IN TXT $(echo $realm | tr [a-z] [A-Z])
+EOF
+
+echo "Rebuilt zone file $ZFILE OK"
+
+echo "Reloading bind config"
+PATH="/usr/sbin:$PATH" rndc reload
+exit 0
diff --git a/source4/scripting/devel/repl_cleartext_pwd.py b/source4/scripting/devel/repl_cleartext_pwd.py
new file mode 100755
index 0000000..6439291
--- /dev/null
+++ b/source4/scripting/devel/repl_cleartext_pwd.py
@@ -0,0 +1,412 @@
+#!/usr/bin/env python3
+#
+# Copyright Stefan Metzmacher 2011-2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# This is useful to sync passwords from an AD domain.
+#
+# $
+# $ source4/scripting/devel/repl_cleartext_pwd.py \
+# -Uadministrator%A1b2C3d4 \
+# 172.31.9.219 DC=bla,DC=base /tmp/cookie cleartext_utf8 131085 displayName
+# # starting at usn[0]
+# dn: CN=Test User1,CN=Users,DC=bla,DC=base
+# cleartext_utf8: A1b2C3d4
+# displayName:: VABlAHMAdAAgAFUAcwBlAHIAMQA=
+#
+# # up to usn[16449]
+# $
+# $ source4/scripting/devel/repl_cleartext_pwd.py \
+# -Uadministrator%A1b2C3d4
+# 172.31.9.219 DC=bla,DC=base cookie_file cleartext_utf8 131085 displayName
+# # starting at usn[16449]
+# # up to usn[16449]
+# $
+#
+
+import sys
+
+# Find right direction when running from source tree
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from optparse import OptionParser
+
+from samba.dcerpc import drsuapi, drsblobs, misc
+from samba.ndr import ndr_pack, ndr_unpack, ndr_print
+
+import binascii
+import hashlib
+import Crypto.Cipher.ARC4
+import struct
+import os
+
+from ldif import LDIFWriter
+
+
+class globals:
+ def __init__(self):
+ self.global_objs = {}
+ self.ldif = LDIFWriter(sys.stdout)
+
+ def add_attr(self, dn, attname, vals):
+ if dn not in self.global_objs:
+ self.global_objs[dn] = {}
+ self.global_objs[dn][attname] = vals
+
+ def print_all(self):
+ for dn, obj in self.global_objs.items():
+ self.ldif.unparse(dn, obj)
+ continue
+ self.global_objs = {}
+
+
+def attid_equal(a1, a2):
+ return (a1 & 0xffffffff) == (a2 & 0xffffffff)
+
+
+########### main code ###########
+if __name__ == "__main__":
+ parser = OptionParser("repl_cleartext_pwd.py [options] server dn cookie_file clear_utf8_name [attid attname attmode] [clear_utf16_name")
+ sambaopts = options.SambaOptions(parser)
+ credopts = options.CredentialsOptions(parser)
+ parser.add_option_group(credopts)
+
+ (opts, args) = parser.parse_args()
+
+ if len(args) == 4:
+ pass
+ elif len(args) == 7:
+ pass
+ elif len(args) >= 8:
+ pass
+ else:
+ parser.error("more arguments required - given=%d" % (len(args)))
+
+ server = args[0]
+ dn = args[1]
+ cookie_file = args[2]
+ if len(cookie_file) == 0:
+ cookie_file = None
+ clear_utf8_name = args[3]
+ if len(args) >= 7:
+ try:
+ attid = int(args[4], 16)
+ except Exception:
+ attid = int(args[4])
+ attname = args[5]
+ attmode = args[6]
+ if attmode not in ["raw", "utf8"]:
+ parser.error("attmode should be 'raw' or 'utf8'")
+ else:
+ attid = -1
+ attname = None
+ attmode = "raw"
+ if len(args) >= 8:
+ clear_utf16_name = args[7]
+ else:
+ clear_utf16_name = None
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if not creds.authentication_requested():
+ parser.error("You must supply credentials")
+
+ gls = globals()
+ try:
+ f = open(cookie_file, 'r')
+ store_blob = f.read()
+ f.close()
+
+ store_hdr = store_blob[0:28]
+ (store_version,
+ store_dn_len, store_dn_ofs,
+ store_hwm_len, store_hwm_ofs,
+ store_utdv_len, store_utdv_ofs) = \
+ struct.unpack("<LLLLLLL", store_hdr)
+
+ store_dn = store_blob[store_dn_ofs:store_dn_ofs + store_dn_len]
+ store_hwm_blob = store_blob[store_hwm_ofs:store_hwm_ofs + store_hwm_len]
+ store_utdv_blob = store_blob[store_utdv_ofs:store_utdv_ofs + store_utdv_len]
+
+ store_hwm = ndr_unpack(drsuapi.DsReplicaHighWaterMark, store_hwm_blob)
+ store_utdv = ndr_unpack(drsblobs.replUpToDateVectorBlob, store_utdv_blob)
+
+ assert store_dn == dn
+ # print "%s" % ndr_print(store_hwm)
+ # print "%s" % ndr_print(store_utdv)
+ except Exception:
+ store_dn = dn
+ store_hwm = drsuapi.DsReplicaHighWaterMark()
+ store_hwm.tmp_highest_usn = 0
+ store_hwm.reserved_usn = 0
+ store_hwm.highest_usn = 0
+ store_utdv = None
+
+ binding_str = "ncacn_ip_tcp:%s[spnego,seal]" % server
+
+ drs_conn = drsuapi.drsuapi(binding_str, lp, creds)
+
+ bind_info = drsuapi.DsBindInfoCtr()
+ bind_info.length = 28
+ bind_info.info = drsuapi.DsBindInfo28()
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_BASE
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT
+ (info, drs_handle) = drs_conn.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
+
+ null_guid = misc.GUID()
+
+ naming_context = drsuapi.DsReplicaObjectIdentifier()
+ naming_context.dn = dn
+ highwatermark = store_hwm
+ uptodateness_vector = None
+ if store_utdv is not None:
+ uptodateness_vector = drsuapi.DsReplicaCursorCtrEx()
+ if store_utdv.version == 1:
+ uptodateness_vector.cursors = store_utdv.cursors
+ elif store_utdv.version == 2:
+ cursors = []
+ for i in range(0, store_utdv.ctr.count):
+ cursor = drsuapi.DsReplicaCursor()
+ cursor.source_dsa_invocation_id = store_utdv.ctr.cursors[i].source_dsa_invocation_id
+ cursor.highest_usn = store_utdv.ctr.cursors[i].highest_usn
+ cursors.append(cursor)
+ uptodateness_vector.cursors = cursors
+
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ req8.destination_dsa_guid = null_guid
+ req8.source_dsa_invocation_id = null_guid
+ req8.naming_context = naming_context
+ req8.highwatermark = highwatermark
+ req8.uptodateness_vector = uptodateness_vector
+ req8.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+ req8.max_object_count = 402
+ req8.max_ndr_size = 402116
+ req8.extended_op = 0
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ user_session_key = drs_conn.user_session_key
+
+ print("# starting at usn[%d]" % (highwatermark.highest_usn))
+
+ while True:
+ (level, ctr) = drs_conn.DsGetNCChanges(drs_handle, 8, req8)
+ if ctr.first_object is None and ctr.object_count != 0:
+ raise RuntimeError("DsGetNCChanges: NULL first_object with object_count=%u" % (ctr.object_count))
+
+ obj_item = ctr.first_object
+ while obj_item is not None:
+ obj = obj_item.object
+
+ if obj.identifier is None:
+ obj_item = obj_item.next_object
+ continue
+
+ # print '%s' % obj.identifier.dn
+
+ is_deleted = False
+ for i in range(0, obj.attribute_ctr.num_attributes):
+ attr = obj.attribute_ctr.attributes[i]
+ if attid_equal(attr.attid, drsuapi.DRSUAPI_ATTID_isDeleted):
+ is_deleted = True
+ if is_deleted:
+ obj_item = obj_item.next_object
+ continue
+
+ spl_crypt = None
+ attvals = None
+ for i in range(0, obj.attribute_ctr.num_attributes):
+ attr = obj.attribute_ctr.attributes[i]
+ if attid_equal(attr.attid, attid):
+ attvals = []
+ for j in range(0, attr.value_ctr.num_values):
+ assert attr.value_ctr.values[j].blob is not None
+ val_raw = attr.value_ctr.values[j].blob
+ val = None
+ if attmode == "utf8":
+ val_unicode = unicode(val_raw, 'utf-16-le')
+ val = val_unicode.encode('utf-8')
+ elif attmode == "raw":
+ val = val_raw
+ else:
+ assert False, "attmode[%s]" % attmode
+ attvals.append(val)
+ if not attid_equal(attr.attid, drsuapi.DRSUAPI_ATTID_supplementalCredentials):
+ continue
+ assert attr.value_ctr.num_values <= 1
+ if attr.value_ctr.num_values == 0:
+ break
+ assert attr.value_ctr.values[0].blob is not None
+ spl_crypt = attr.value_ctr.values[0].blob
+
+ if spl_crypt is None:
+ obj_item = obj_item.next_object
+ continue
+
+ assert len(spl_crypt) >= 20
+ confounder = spl_crypt[0:16]
+ enc_buffer = spl_crypt[16:]
+
+ m5 = hashlib.md5()
+ m5.update(user_session_key)
+ m5.update(confounder)
+ enc_key = m5.digest()
+
+ rc4 = Crypto.Cipher.ARC4.new(enc_key)
+ plain_buffer = rc4.decrypt(enc_buffer)
+
+ (crc32_v) = struct.unpack("<L", plain_buffer[0:4])
+ attr_val = plain_buffer[4:]
+ crc32_c = binascii.crc32(attr_val) & 0xffffffff
+ assert int(crc32_v[0]) == int(crc32_c), "CRC32 0x%08X != 0x%08X" % (crc32_v[0], crc32_c)
+
+ spl = ndr_unpack(drsblobs.supplementalCredentialsBlob, attr_val)
+
+ # print '%s' % ndr_print(spl)
+
+ cleartext_hex = None
+
+ for i in range(0, spl.sub.num_packages):
+ pkg = spl.sub.packages[i]
+ if pkg.name != "Primary:CLEARTEXT":
+ continue
+ cleartext_hex = pkg.data
+
+ if cleartext_hex is not None:
+ cleartext_utf16 = binascii.a2b_hex(cleartext_hex)
+ if clear_utf16_name is not None:
+ gls.add_attr(obj.identifier.dn, clear_utf16_name, [cleartext_utf16])
+ try:
+ cleartext_unicode = unicode(cleartext_utf16, 'utf-16-le')
+ cleartext_utf8 = cleartext_unicode.encode('utf-8')
+ gls.add_attr(obj.identifier.dn, clear_utf8_name, [cleartext_utf8])
+ except Exception:
+ pass
+
+ if attvals is not None:
+ gls.add_attr(obj.identifier.dn, attname, attvals)
+
+ krb5_old_hex = None
+
+ for i in range(0, spl.sub.num_packages):
+ pkg = spl.sub.packages[i]
+ if pkg.name != "Primary:Kerberos":
+ continue
+ krb5_old_hex = pkg.data
+
+ if krb5_old_hex is not None:
+ krb5_old_raw = binascii.a2b_hex(krb5_old_hex)
+ krb5_old = ndr_unpack(drsblobs.package_PrimaryKerberosBlob, krb5_old_raw, allow_remaining=True)
+
+ # print '%s' % ndr_print(krb5_old)
+
+ krb5_new_hex = None
+
+ for i in range(0, spl.sub.num_packages):
+ pkg = spl.sub.packages[i]
+ if pkg.name != "Primary:Kerberos-Newer-Keys":
+ continue
+ krb5_new_hex = pkg.data
+
+ if krb5_new_hex is not None:
+ krb5_new_raw = binascii.a2b_hex(krb5_new_hex)
+ krb5_new = ndr_unpack(drsblobs.package_PrimaryKerberosBlob, krb5_new_raw, allow_remaining=True)
+
+ # print '%s' % ndr_print(krb5_new)
+
+ obj_item = obj_item.next_object
+
+ gls.print_all()
+
+ if ctr.more_data == 0:
+ store_hwm = ctr.new_highwatermark
+
+ store_utdv = drsblobs.replUpToDateVectorBlob()
+ store_utdv.version = ctr.uptodateness_vector.version
+ store_utdv_ctr = store_utdv.ctr
+ store_utdv_ctr.count = ctr.uptodateness_vector.count
+ store_utdv_ctr.cursors = ctr.uptodateness_vector.cursors
+ store_utdv.ctr = store_utdv_ctr
+
+ # print "%s" % ndr_print(store_hwm)
+ # print "%s" % ndr_print(store_utdv)
+
+ store_hwm_blob = ndr_pack(store_hwm)
+ store_utdv_blob = ndr_pack(store_utdv)
+
+ #
+ # uint32_t version '1'
+ # uint32_t dn_str_len
+ # uint32_t dn_str_ofs
+ # uint32_t hwm_blob_len
+ # uint32_t hwm_blob_ofs
+ # uint32_t utdv_blob_len
+ # uint32_t utdv_blob_ofs
+ store_hdr_len = 7 * 4
+ dn_ofs = store_hdr_len
+ hwm_ofs = dn_ofs + len(dn)
+ utdv_ofs = hwm_ofs + len(store_hwm_blob)
+ store_blob = struct.pack("<LLLLLLL", 1,
+ len(dn), dn_ofs,
+ len(store_hwm_blob), hwm_ofs,
+ len(store_utdv_blob), utdv_ofs) + \
+ dn + store_hwm_blob + store_utdv_blob
+
+ tmp_file = "%s.tmp" % cookie_file
+ f = open(tmp_file, 'wb')
+ f.write(store_blob)
+ f.close()
+ os.rename(tmp_file, cookie_file)
+
+ print("# up to usn[%d]" % (ctr.new_highwatermark.highest_usn))
+ break
+ print("# up to tmp_usn[%d]" % (ctr.new_highwatermark.highest_usn))
+ req8.highwatermark = ctr.new_highwatermark
diff --git a/source4/scripting/devel/rodcdns b/source4/scripting/devel/rodcdns
new file mode 100755
index 0000000..6830580
--- /dev/null
+++ b/source4/scripting/devel/rodcdns
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+# script to call a netlogon RODC DNS update
+
+import sys
+from optparse import OptionParser
+
+sys.path.insert(0, "bin/python")
+
+import samba
+import samba.getopt as options
+from samba.dcerpc import netlogon, winbind
+
+########### main code ###########
+if __name__ == "__main__":
+ parser = OptionParser("rodcdns [options]")
+ sambaopts = options.SambaOptions(parser)
+
+ parser.add_option("", "--weight", dest="weight", help="record weight", default=0, type='int')
+ parser.add_option("", "--priority", dest="priority", help="record priority", default=100, type='int')
+ parser.add_option("", "--port", dest="port", help="port number", default=389, type='int')
+ parser.add_option("", "--type", dest="type", help="record type", default=netlogon.NlDnsLdapAtSite, type='int')
+ parser.add_option("", "--site", dest="site", help="site name", default="Default-First-Site-Name")
+
+ (opts, args) = parser.parse_args()
+
+ lp = sambaopts.get_loadparm()
+
+ w = winbind.winbind("irpc:winbind_server", lp)
+
+ dns_names = netlogon.NL_DNS_NAME_INFO_ARRAY()
+ dns_names.count = 1
+ name = netlogon.NL_DNS_NAME_INFO()
+ name.type = opts.type
+ name.priority = opts.priority
+ name.weight = opts.weight
+ name.port = opts.port
+ name.dns_register = True
+ dns_names.names = [ name ]
+ site_name = opts.site
+
+ ret_names = w.DsrUpdateReadOnlyServerDnsRecords(site_name, 600, dns_names)
+ print("Status: %u" % ret_names.names[0].status)
diff --git a/source4/scripting/devel/speedtest.py b/source4/scripting/devel/speedtest.py
new file mode 100755
index 0000000..8c044c4
--- /dev/null
+++ b/source4/scripting/devel/speedtest.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# This speed test aims to show difference in execution time for bulk
+# creation of user objects. This will help us compare
+# Samba4 vs MS Active Directory performance.
+
+# Copyright (C) Zahari Zahariev <zahari.zahariev@postpath.com> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import optparse
+import sys
+import time
+import base64
+from decimal import Decimal
+
+sys.path.insert(0, "bin/python")
+import samba
+from samba.tests.subunitrun import TestProgram, SubunitOptions
+
+import samba.getopt as options
+
+from ldb import SCOPE_BASE, SCOPE_SUBTREE
+from samba.ndr import ndr_unpack
+from samba.dcerpc import security
+
+from samba.auth import system_session
+from samba import gensec, sd_utils
+from samba.samdb import SamDB
+from samba.credentials import Credentials
+import samba.tests
+from samba.tests import delete_force
+
+parser = optparse.OptionParser("speedtest.py [options] <host>")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+opts, args = parser.parse_args()
+
+if len(args) < 1:
+ parser.print_usage()
+ sys.exit(1)
+
+host = args[0]
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+
+#
+# Tests start here
+#
+
+
+class SpeedTest(samba.tests.TestCase):
+
+ def find_domain_sid(self, ldb):
+ res = ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_BASE)
+ return ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+
+ def setUp(self):
+ super(SpeedTest, self).setUp()
+ self.ldb_admin = ldb
+ self.base_dn = ldb.domain_dn()
+ self.domain_sid = security.dom_sid(ldb.get_domain_sid())
+ self.user_pass = "samba123@"
+ print("baseDN: %s" % self.base_dn)
+
+ def create_user(self, user_dn):
+ ldif = """
+dn: """ + user_dn + """
+sAMAccountName: """ + user_dn.split(",")[0][3:] + """
+objectClass: user
+unicodePwd:: """ + base64.b64encode(("\"%s\"" % self.user_pass).encode('utf-16-le')).decode('utf8') + """
+url: www.example.com
+"""
+ self.ldb_admin.add_ldif(ldif)
+
+ def create_group(self, group_dn, desc=None):
+ ldif = """
+dn: """ + group_dn + """
+objectClass: group
+sAMAccountName: """ + group_dn.split(",")[0][3:] + """
+groupType: 4
+url: www.example.com
+"""
+ self.ldb_admin.add_ldif(ldif)
+
+ def create_bundle(self, count):
+ for i in range(count):
+ self.create_user("cn=speedtestuser%d,cn=Users,%s" % (i + 1, self.base_dn))
+
+ def remove_bundle(self, count):
+ for i in range(count):
+ delete_force(self.ldb_admin, "cn=speedtestuser%d,cn=Users,%s" % (i + 1, self.base_dn))
+
+ def remove_test_users(self):
+ res = ldb.search(base="cn=Users,%s" % self.base_dn, expression="(objectClass=user)", scope=SCOPE_SUBTREE)
+ dn_list = [item.dn for item in res if "speedtestuser" in str(item.dn)]
+ for dn in dn_list:
+ delete_force(self.ldb_admin, dn)
+
+
+class SpeedTestAddDel(SpeedTest):
+
+ def setUp(self):
+ super(SpeedTestAddDel, self).setUp()
+
+ def run_bundle(self, num):
+ print("\n=== Test ADD/DEL %s user objects ===\n" % num)
+ avg_add = Decimal("0.0")
+ avg_del = Decimal("0.0")
+ for x in [1, 2, 3]:
+ start = time.time()
+ self.create_bundle(num)
+ res_add = Decimal(str(time.time() - start))
+ avg_add += res_add
+ print(" Attempt %s ADD: %.3fs" % (x, float(res_add)))
+ #
+ start = time.time()
+ self.remove_bundle(num)
+ res_del = Decimal(str(time.time() - start))
+ avg_del += res_del
+ print(" Attempt %s DEL: %.3fs" % (x, float(res_del)))
+ print("Average ADD: %.3fs" % float(Decimal(avg_add) / Decimal("3.0")))
+ print("Average DEL: %.3fs" % float(Decimal(avg_del) / Decimal("3.0")))
+ print("")
+
+ def test_00000(self):
+ """ Remove possibly undeleted test users from previous test
+ """
+ self.remove_test_users()
+
+ def test_00010(self):
+ self.run_bundle(10)
+
+ def test_00100(self):
+ self.run_bundle(100)
+
+ def test_01000(self):
+ self.run_bundle(1000)
+
+ def _test_10000(self):
+ """ This test should be enabled preferably against MS Active Directory.
+ It takes quite the time against Samba4 (1-2 days).
+ """
+ self.run_bundle(10000)
+
+
+class AclSearchSpeedTest(SpeedTest):
+
+ def setUp(self):
+ super(AclSearchSpeedTest, self).setUp()
+ self.ldb_admin.newuser("acltestuser", "samba123@")
+ self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
+ self.ldb_user = self.get_ldb_connection("acltestuser", "samba123@")
+ self.user_sid = self.sd_utils.get_object_sid(self.get_user_dn("acltestuser"))
+
+ def tearDown(self):
+ super(AclSearchSpeedTest, self).tearDown()
+ delete_force(self.ldb_admin, self.get_user_dn("acltestuser"))
+
+ def run_search_bundle(self, num, _ldb):
+ print("\n=== Creating %s user objects ===\n" % num)
+ self.create_bundle(num)
+ mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
+ for i in range(num):
+ self.sd_utils.dacl_add_ace("cn=speedtestuser%d,cn=Users,%s" %
+ (i + 1, self.base_dn), mod)
+ print("\n=== %s user objects created ===\n" % num)
+ print("\n=== Test search on %s user objects ===\n" % num)
+ avg_search = Decimal("0.0")
+ for x in [1, 2, 3]:
+ start = time.time()
+ res = _ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_SUBTREE)
+ res_search = Decimal(str(time.time() - start))
+ avg_search += res_search
+ print(" Attempt %s SEARCH: %.3fs" % (x, float(res_search)))
+ print("Average Search: %.3fs" % float(Decimal(avg_search) / Decimal("3.0")))
+ self.remove_bundle(num)
+
+ def get_user_dn(self, name):
+ return "CN=%s,CN=Users,%s" % (name, self.base_dn)
+
+ def get_ldb_connection(self, target_username, target_password):
+ creds_tmp = Credentials()
+ creds_tmp.set_username(target_username)
+ creds_tmp.set_password(target_password)
+ creds_tmp.set_domain(creds.get_domain())
+ creds_tmp.set_realm(creds.get_realm())
+ creds_tmp.set_workstation(creds.get_workstation())
+ creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
+ | gensec.FEATURE_SEAL)
+ ldb_target = SamDB(url=host, credentials=creds_tmp, lp=lp)
+ return ldb_target
+
+ def test_search_01000(self):
+ self.run_search_bundle(1000, self.ldb_admin)
+
+ def test_search2_01000(self):
+ # allow the user to see objects but not attributes, all attributes will be filtered out
+ mod = "(A;;LC;;;%s)(D;;RP;;;%s)" % (str(self.user_sid), str(self.user_sid))
+ self.sd_utils.dacl_add_ace("CN=Users,%s" % self.base_dn, mod)
+ self.run_search_bundle(1000, self.ldb_user)
+
+# Important unit running information
+
+
+if "://" not in host:
+ host = "ldap://%s" % host
+
+ldb_options = ["modules:paged_searches"]
+ldb = SamDB(host, credentials=creds, session_info=system_session(), lp=lp, options=ldb_options)
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/source4/scripting/devel/tmpfs.sh b/source4/scripting/devel/tmpfs.sh
new file mode 100755
index 0000000..e4798ec
--- /dev/null
+++ b/source4/scripting/devel/tmpfs.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+# This sets up bin/ and st/ as tmpfs filesystems, which saves a lot of
+# time waiting on the disk!
+
+sudo echo "About to (re)mount bin and st as tmpfs"
+rm -rf bin st
+sudo umount bin >/dev/null 2>&1
+sudo umount st >/dev/null 2>&1
+mkdir -p bin st || exit 1
+sudo mount -t tmpfs /dev/null bin || exit 1
+sudo chown $USER bin/. || exit 1
+echo "tmpfs setup for bin/"
+sudo mount -t tmpfs /dev/null st || exit 1
+sudo chown $USER st/. || exit 1
+echo "tmpfs setup for st/"
diff --git a/source4/scripting/devel/watch_servers.sh b/source4/scripting/devel/watch_servers.sh
new file mode 100644
index 0000000..88d66a2
--- /dev/null
+++ b/source4/scripting/devel/watch_servers.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+[ $# -ge 3 ] || {
+ echo "Usage: watch_servers.sh DB1 DB2 PASSWORD SEARCH <attrs>"
+ exit 1
+}
+
+host1="$1"
+host2="$2"
+password="$3"
+search="$4"
+shift 4
+
+watch -n1 "echo '$host1:'; bin/ldbsearch -S -H $host1 -Uadministrator%$password '$search' description $* | egrep -v '^ref|Ref|returned|entries|referrals' | uniq; echo; echo '$host2:'; bin/ldbsearch -S -H $host2 -Uadministrator%$password '$search' description $* | egrep -v '^ref|Ref|returned|entries|referrals' | uniq;"
diff --git a/source4/scripting/man/samba-gpupdate.8.xml b/source4/scripting/man/samba-gpupdate.8.xml
new file mode 100644
index 0000000..c7c9963
--- /dev/null
+++ b/source4/scripting/man/samba-gpupdate.8.xml
@@ -0,0 +1,128 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<refentry id="samba-gpupdate.8">
+<refentryinfo><date>2017-07-11</date></refentryinfo>
+
+<refmeta>
+ <refentrytitle>SAMBA_GPOUPDATE</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo class="source">Samba</refmiscinfo>
+ <refmiscinfo class="manual">System Administration tools</refmiscinfo>
+ <refmiscinfo class="version">4.8.0</refmiscinfo>
+</refmeta>
+
+<refnamediv>
+ <refname>samba-gpupdate</refname>
+ <refpurpose>apply group policy</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+ <cmdsynopsis>
+ <command>samba-gpupdate</command>
+ </cmdsynopsis>
+
+ <cmdsynopsis>
+ <command>samba-gpupdate</command>
+ <arg choice="opt">
+ <replaceable>options</replaceable>
+ </arg>
+ </cmdsynopsis>
+
+</refsynopsisdiv>
+
+
+<refsect1>
+ <title>DESCRIPTION</title>
+ <para>This tool is part of the
+ <citerefentry><refentrytitle>samba</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry> suite.</para>
+
+ <para><command>samba-gpupdate</command> a script for
+ applying and unapplying Group Policy. This applies
+ password policies (minimum/maximum password age,
+ minimum password length, and password complexity),
+ kerberos policies (user/service ticket lifetime and
+ renew lifetime), smb.conf policies,
+ hourly/daily/weekly/monthly cron scripts, Sudo
+ Privileges, Message of the Day and Logon Prompt
+ messages, etc.</para>
+
+</refsect1>
+
+<refsect1>
+ <title>OPTIONS</title>
+
+<para><option>-h</option>, <option>--help</option>
+ show this help message and exit</para>
+
+<para><option>-H </option>URL, <option>--url</option>=<emphasis remap="I">URL</emphasis>
+ URL for the samdb</para>
+
+<para><option>-X</option>, <option>--unapply</option>
+ Unapply Group Policy</para>
+
+<para><option>--target</option>
+ {Computer | User}</para>
+
+<para><option>--force</option>
+ Reapplies all policy settings</para>
+
+<para><option>--rsop</option>
+ Print the Resultant Set of Policy</para>
+
+<para>Samba Common Options:</para>
+
+<para><option>-s </option>FILE, <option>--configfile</option>=<emphasis remap="I">FILE</emphasis>
+ Configuration file</para>
+
+<para><option>-d </option>DEBUGLEVEL, <option>--debuglevel</option>=<emphasis remap="I">DEBUGLEVEL</emphasis>
+ debug level</para>
+
+<para><option>--option</option>=<emphasis remap="I">OPTION</emphasis>
+ set smb.conf option from command line</para>
+
+<para><option>--realm</option>=<emphasis remap="I">REALM</emphasis>
+ set the realm name</para>
+
+<para>Version Options:</para>
+
+<para><option>-V</option>, <option>--version</option>
+ Display version number</para>
+
+<para>Credentials Options:</para>
+
+<para><option>--simple-bind-dn</option>=<emphasis remap="I">DN</emphasis>
+ DN to use for a simple bind</para>
+
+<para><option>--password</option>=<emphasis remap="I">PASSWORD</emphasis>
+ Password</para>
+
+<para><option>-U </option>USERNAME, <option>--username</option>=<emphasis remap="I">USERNAME</emphasis>
+ Username</para>
+
+<para><option>-W </option>WORKGROUP, <option>--workgroup</option>=<emphasis remap="I">WORKGROUP</emphasis>
+ Workgroup</para>
+
+<para><option>-N</option>, <option>--no-pass</option>
+ Don't ask for a password</para>
+
+<para><option>-k </option>KERBEROS, <option>--kerberos</option>=<emphasis remap="I">KERBEROS</emphasis>
+ Use Kerberos</para>
+
+<para><option>--ipaddress</option>=<emphasis remap="I">IPADDRESS</emphasis>
+ IP address of server</para>
+
+<para><option>-P</option>, <option>--machine-pass</option>
+ Use stored machine account password</para>
+
+</refsect1>
+
+<refsect1>
+ <title>AUTHOR</title>
+ <para>The original Samba software and related utilities were
+ created by Andrew Tridgell. Samba is now developed by the
+ Samba Team as an Open Source project similar to the way the
+ Linux kernel is developed.</para>
+</refsect1>
+
+</refentry>
diff --git a/source4/scripting/wscript_build b/source4/scripting/wscript_build
new file mode 100644
index 0000000..6728dec
--- /dev/null
+++ b/source4/scripting/wscript_build
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+from samba_utils import MODE_755
+
+sbin_files = ''
+if bld.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'):
+ sbin_files = 'bin/samba_downgrade_db bin/samba_dnsupdate bin/samba_spnupdate bin/samba_upgradedns bin/samba_kcc '
+if not bld.env.disable_python:
+ sbin_files += 'bin/samba-gpupdate'
+ man_files = 'man/samba-gpupdate.8'
+
+if sbin_files:
+ bld.INSTALL_FILES('${SBINDIR}',
+ sbin_files,
+ chmod=MODE_755, python_fixup=True, flat=True)
+ if 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']:
+ bld.MANPAGES(man_files, True)
+
+if bld.CONFIG_SET('WITH_ADS'):
+ bld.INSTALL_FILES('${BINDIR}',
+ 'bin/samba-tool',
+ chmod=MODE_755, python_fixup=True, flat=True)
+
+bld.RECURSE('bin')