summaryrefslogtreecommitdiffstats
path: root/source4/torture/drs
diff options
context:
space:
mode:
Diffstat (limited to 'source4/torture/drs')
-rw-r--r--source4/torture/drs/drs_init.c80
-rw-r--r--source4/torture/drs/drs_util.c167
-rw-r--r--source4/torture/drs/python/cracknames.py204
-rw-r--r--source4/torture/drs/python/delete_object.py376
-rw-r--r--source4/torture/drs/python/drs_base.py632
-rw-r--r--source4/torture/drs/python/fsmo.py152
-rw-r--r--source4/torture/drs/python/getnc_exop.py1304
-rw-r--r--source4/torture/drs/python/getnc_schema.py304
-rw-r--r--source4/torture/drs/python/getnc_unpriv.py306
-rw-r--r--source4/torture/drs/python/getncchanges.py1427
-rw-r--r--source4/torture/drs/python/link_conflicts.py763
-rw-r--r--source4/torture/drs/python/linked_attributes_drs.py162
-rw-r--r--source4/torture/drs/python/repl_move.py2608
-rw-r--r--source4/torture/drs/python/repl_rodc.py735
-rw-r--r--source4/torture/drs/python/repl_schema.py444
-rw-r--r--source4/torture/drs/python/repl_secdesc.py400
-rw-r--r--source4/torture/drs/python/replica_sync.py747
-rw-r--r--source4/torture/drs/python/replica_sync_rodc.py155
-rw-r--r--source4/torture/drs/python/ridalloc_exop.py802
-rw-r--r--source4/torture/drs/python/samba_tool_drs.py410
-rw-r--r--source4/torture/drs/python/samba_tool_drs_critical.py98
-rw-r--r--source4/torture/drs/python/samba_tool_drs_no_dns.py174
-rw-r--r--source4/torture/drs/python/samba_tool_drs_showrepl.py377
-rw-r--r--source4/torture/drs/rpc/dssync.c1072
-rw-r--r--source4/torture/drs/rpc/msds_intid.c792
-rw-r--r--source4/torture/drs/unit/prefixmap_tests.c900
-rw-r--r--source4/torture/drs/unit/schemainfo_tests.c740
-rw-r--r--source4/torture/drs/wscript_build12
28 files changed, 16343 insertions, 0 deletions
diff --git a/source4/torture/drs/drs_init.c b/source4/torture/drs/drs_init.c
new file mode 100644
index 0000000..bbe246d
--- /dev/null
+++ b/source4/torture/drs/drs_init.c
@@ -0,0 +1,80 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ DRSUAPI utility functions to be used in torture tests
+
+ Copyright (C) Kamen Mazdrashki <kamen.mazdrashki@postpath.com> 2009
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "torture/smbtorture.h"
+#include "torture/rpc/drsuapi.h"
+#include "dsdb/samdb/samdb.h"
+#include "torture/drs/proto.h"
+
+/**
+ * DRSUAPI tests to be executed remotely
+ */
+static struct torture_suite * torture_drs_rpc_suite(TALLOC_CTX *mem_ctx,
+ const char *suite_name)
+{
+ struct torture_suite *suite = torture_suite_create(mem_ctx, suite_name);
+
+ torture_drs_rpc_dssync_tcase(suite);
+ torture_drs_rpc_dsintid_tcase(suite);
+
+ suite->description = talloc_strdup(suite,
+ "DRSUAPI RPC Tests Suite");
+
+ return suite;
+}
+
+/**
+ * DRSUAPI tests to be executed remotely
+ */
+static struct torture_suite * torture_drs_unit_suite(TALLOC_CTX *mem_ctx,
+ const char *suite_name)
+{
+ struct torture_suite *suite = torture_suite_create(mem_ctx, suite_name);
+
+ torture_drs_unit_prefixmap(suite);
+ torture_drs_unit_schemainfo(suite);
+
+ suite->description = talloc_strdup(suite,
+ "DRSUAPI Unit Tests Suite");
+
+ return suite;
+}
+
+/**
+ * DRSUAPI torture module initialization
+ */
+NTSTATUS torture_drs_init(TALLOC_CTX *ctx)
+{
+ struct torture_suite *suite;
+
+ /* register RPC related test cases */
+ suite = torture_drs_rpc_suite(ctx, "drs.rpc");
+ if (!suite) return NT_STATUS_NO_MEMORY;
+ torture_register_suite(ctx, suite);
+
+ /* register DRS Unit test cases */
+ suite = torture_drs_unit_suite(ctx, "drs.unit");
+ if (!suite) return NT_STATUS_NO_MEMORY;
+ torture_register_suite(ctx, suite);
+
+ return NT_STATUS_OK;
+}
diff --git a/source4/torture/drs/drs_util.c b/source4/torture/drs/drs_util.c
new file mode 100644
index 0000000..c43836e
--- /dev/null
+++ b/source4/torture/drs/drs_util.c
@@ -0,0 +1,167 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ DRSUAPI utility functions to be used in torture tests
+
+ Copyright (C) Kamen Mazdrashki <kamen.mazdrashki@postpath.com> 2009
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "torture/torture.h"
+#include "dsdb/samdb/samdb.h"
+#include "torture/rpc/drsuapi.h"
+#include "../lib/util/asn1.h"
+#include "torture/drs/proto.h"
+
+/**
+ * Decode Attribute OID based on MS documentation
+ * See MS-DRSR.pdf - 5.16.4
+ *
+ * On success returns decoded OID and
+ * corresponding prefix_map index (if requested)
+ */
+bool drs_util_oid_from_attid(struct torture_context *tctx,
+ const struct drsuapi_DsReplicaOIDMapping_Ctr *prefix_map,
+ uint32_t attid,
+ const char **_oid,
+ int *map_idx)
+{
+ uint32_t i, hi_word, lo_word;
+ DATA_BLOB bin_oid = {NULL, 0};
+ char *oid;
+ struct drsuapi_DsReplicaOIDMapping *map_entry = NULL;
+ TALLOC_CTX *mem_ctx = talloc_named(tctx, 0, "util_drsuapi_oid_from_attid");
+
+ /* crack attid value */
+ hi_word = attid >> 16;
+ lo_word = attid & 0xFFFF;
+
+ /* check last entry in the prefix map is the special one */
+ map_entry = &prefix_map->mappings[prefix_map->num_mappings-1];
+ torture_assert(tctx,
+ (map_entry->id_prefix == 0)
+ && (*map_entry->oid.binary_oid == 0xFF),
+ "Last entry in Prefix Map is not the special one!");
+
+ /* locate corresponding prefixMap entry */
+ map_entry = NULL;
+ for (i = 0; i < prefix_map->num_mappings - 1; i++) {
+
+ if (hi_word == prefix_map->mappings[i].id_prefix) {
+ map_entry = &prefix_map->mappings[i];
+ if (map_idx) *map_idx = i;
+ break;
+ }
+ }
+
+ torture_assert(tctx, map_entry, "Unable to locate corresponding Prefix Map entry");
+
+ /* copy partial oid making enough room */
+ bin_oid.length = map_entry->oid.length + 2;
+ bin_oid.data = talloc_array(mem_ctx, uint8_t, bin_oid.length);
+ torture_assert(tctx, bin_oid.data, "Not enough memory");
+ memcpy(bin_oid.data, map_entry->oid.binary_oid, map_entry->oid.length);
+
+ if (lo_word < 128) {
+ bin_oid.length = bin_oid.length - 1;
+ bin_oid.data[bin_oid.length-1] = lo_word;
+ }
+ else {
+ if (lo_word >= 32768) {
+ lo_word -= 32768;
+ }
+ bin_oid.data[bin_oid.length-2] = ((lo_word / 128) % 128) + 128; /* (0x80 | ((lo_word>>7) & 0x7f)) */
+ bin_oid.data[bin_oid.length-1] = lo_word % 128; /* lo_word & 0x7f */
+ }
+
+ torture_assert(tctx,
+ ber_read_OID_String(tctx, bin_oid, &oid),
+ "Failed to decode binary OID");
+ talloc_free(mem_ctx);
+
+ *_oid = oid;
+
+ return true;
+}
+
+
+/**
+ * Loads dsdb_schema from ldb connection using remote prefixMap.
+ * Schema will be loaded only if:
+ * - ldb has no attached schema
+ * - reload_schema is true
+ *
+ * This function is to be used in tests that use GetNCChanges() function
+ */
+bool drs_util_dsdb_schema_load_ldb(struct torture_context *tctx,
+ struct ldb_context *ldb,
+ const struct drsuapi_DsReplicaOIDMapping_Ctr *mapping_ctr,
+ bool reload_schema)
+{
+ int ret;
+ WERROR werr;
+ char *err_msg;
+ struct ldb_result *res;
+ struct ldb_dn *schema_dn;
+ struct dsdb_schema *ldap_schema;
+
+ ldap_schema = dsdb_get_schema(ldb, NULL);
+ if (ldap_schema && !reload_schema) {
+ return true;
+ }
+
+ schema_dn = ldb_get_schema_basedn(ldb);
+ torture_assert(tctx, schema_dn != NULL,
+ talloc_asprintf(tctx, "ldb_get_schema_basedn() failed: %s", ldb_errstring(ldb)));
+
+ ldap_schema = dsdb_new_schema(ldb);
+ torture_assert(tctx, ldap_schema != NULL, "dsdb_new_schema() failed!");
+
+ werr = dsdb_load_prefixmap_from_drsuapi(ldap_schema, mapping_ctr);
+ torture_assert_werr_ok(tctx, werr,
+ "Failed to construct prefixMap from drsuapi data");
+
+ /*
+ * load the attribute and objectClass definitions
+ */
+ ret = ldb_search(ldb, ldap_schema, &res,
+ schema_dn, LDB_SCOPE_ONELEVEL, NULL,
+ "(|(objectClass=attributeSchema)(objectClass=classSchema))");
+ if (ret != LDB_SUCCESS) {
+ err_msg = talloc_asprintf(tctx,
+ "failed to search attributeSchema or classSchema objects: %s",
+ ldb_errstring(ldb));
+ torture_fail(tctx, err_msg);
+ }
+
+ ret = dsdb_load_ldb_results_into_schema(tctx, ldb, ldap_schema, res, &err_msg);
+ if (ret != LDB_SUCCESS) {
+ err_msg = talloc_asprintf(tctx,
+ "dsdb_load_ldb_results_into_schema failed: %s",
+ err_msg);
+ torture_fail(tctx, err_msg);
+ }
+
+ talloc_free(res);
+
+ ret = dsdb_set_schema(ldb, ldap_schema, SCHEMA_WRITE);
+ if (ret != LDB_SUCCESS) {
+ torture_fail(tctx,
+ talloc_asprintf(tctx, "dsdb_set_schema() failed: %s", ldb_strerror(ret)));
+ }
+
+ return true;
+}
diff --git a/source4/torture/drs/python/cracknames.py b/source4/torture/drs/python/cracknames.py
new file mode 100644
index 0000000..f244605
--- /dev/null
+++ b/source4/torture/drs/python/cracknames.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) Catalyst .Net Ltd 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+import ldb
+import drs_base
+
+from samba.dcerpc import drsuapi
+
+
+class DrsCracknamesTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsCracknamesTestCase, self).setUp()
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+
+ self.ou = "ou=Cracknames_ou,%s" % self.ldb_dc1.get_default_basedn()
+ self.username = "Cracknames_user"
+ self.user = "cn=%s,%s" % (self.username, self.ou)
+
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+
+ self.user_record = {
+ "dn": self.user,
+ "objectclass": "user",
+ "sAMAccountName": self.username,
+ "userPrincipalName": "test@test.com",
+ "servicePrincipalName": "test/%s" % self.ldb_dc1.get_default_basedn(),
+ "displayName": "test"}
+
+ self.ldb_dc1.add(self.user_record)
+ self.ldb_dc1.delete(self.user_record["dn"])
+ self.ldb_dc1.add(self.user_record)
+
+ # The formats specified in MS-DRSR 4.1.4.13; DS_NAME_FORMAT
+ # We don't support any of the ones specified in 4.1.4.1.2.
+ self.formats = {
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_DISPLAY,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_USER_PRINCIPAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL_EX,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SID_OR_SID_HISTORY,
+ # This format is not supported by Windows (or us)
+ # drsuapi.DRSUAPI_DS_NAME_FORMAT_DNS_DOMAIN,
+ }
+
+ def tearDown(self):
+ self.ldb_dc1.delete(self.user)
+ self.ldb_dc1.delete(self.ou)
+ super(DrsCracknamesTestCase, self).tearDown()
+
+ def test_Cracknames(self):
+ """
+ Verifies that we can cracknames any of the standard formats
+ (DS_NAME_FORMAT) to a GUID, and that we can cracknames a
+ GUID to any of the standard formats.
+
+ GUID was chosen just so that we don't have to do an n^2 loop.
+ """
+ (result, ctr) = self._do_cracknames(self.user,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
+
+ user_guid = ctr.array[0].result_name
+
+ for name_format in self.formats:
+ (result, ctr) = self._do_cracknames(user_guid,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ name_format)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK,
+ "Expected 0, got %s, desired format is %s"
+ % (ctr.array[0].status, name_format))
+
+ (result, ctr) = self._do_cracknames(ctr.array[0].result_name,
+ name_format,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK,
+ "Expected 0, got %s, offered format is %s"
+ % (ctr.array[0].status, name_format))
+
+ def test_MultiValuedAttribute(self):
+ """
+ Verifies that, if we try and cracknames with the desired output
+ being a multi-valued attribute, it returns
+ DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE.
+ """
+ username = "Cracknames_user_MVA"
+ user = "cn=%s,%s" % (username, self.ou)
+
+ user_record = {
+ "dn": user,
+ "objectclass": "user",
+ "sAMAccountName": username,
+ "userPrincipalName": "test2@test.com",
+ "servicePrincipalName": ["test2/%s" % self.ldb_dc1.get_default_basedn(),
+ "test3/%s" % self.ldb_dc1.get_default_basedn()],
+ "displayName": "test2"}
+
+ self.ldb_dc1.add(user_record)
+
+ (result, ctr) = self._do_cracknames(user,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
+
+ user_guid = ctr.array[0].result_name
+
+ (result, ctr) = self._do_cracknames(user_guid,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE)
+
+ self.ldb_dc1.delete(user)
+
+ def test_NoSPNAttribute(self):
+ """
+ Verifies that, if we try and cracknames with the desired output
+ being an SPN, it returns
+ DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE.
+ """
+ username = "Cracknames_no_SPN"
+ user = "cn=%s,%s" % (username, self.ou)
+
+ user_record = {
+ "dn": user,
+ "objectclass": "user",
+ "sAMAccountName" : username,
+ "userPrincipalName" : "test4@test.com",
+ "displayName" : "test4"}
+
+ self.ldb_dc1.add(user_record)
+
+ (result, ctr) = self._do_cracknames(user,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
+
+ user_guid = ctr.array[0].result_name
+
+ (result, ctr) = self._do_cracknames(user_guid,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_NOT_FOUND)
+
+ self.ldb_dc1.delete(user)
+
+ def _do_cracknames(self, name, format_offered, format_desired):
+ req = drsuapi.DsNameRequest1()
+ names = drsuapi.DsNameString()
+ names.str = name
+
+ req.codepage = 1252 # German, but it doesn't really matter here
+ req.language = 1033
+ req.format_flags = 0
+ req.format_offered = format_offered
+ req.format_desired = format_desired
+ req.count = 1
+ req.names = [names]
+
+ (result, ctr) = self.drs.DsCrackNames(self.drs_handle, 1, req)
+ return (result, ctr)
diff --git a/source4/torture/drs/python/delete_object.py b/source4/torture/drs/python/delete_object.py
new file mode 100644
index 0000000..5f2f703
--- /dev/null
+++ b/source4/torture/drs/python/delete_object.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN delete_object -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import time
+
+
+from ldb import (
+ SCOPE_SUBTREE,
+)
+
+import drs_base
+import ldb
+
+
+class DrsDeleteObjectTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsDeleteObjectTestCase, self).setUp()
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ def tearDown(self):
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsDeleteObjectTestCase, self).tearDown()
+
+ def _make_username(self):
+ return "DrsDelObjUser_" + time.strftime("%s", time.gmtime())
+
+ # now also used to check the group
+ def _check_obj(self, sam_ldb, obj_orig, is_deleted):
+ # search the user by guid as it may be deleted
+ guid_str = self._GUID_string(obj_orig["objectGUID"][0])
+ expression = "(objectGUID=%s)" % guid_str
+ res = sam_ldb.search(base=self.domain_dn,
+ expression=expression,
+ controls=["show_deleted:1"])
+ self.assertEqual(len(res), 1)
+ user_cur = res[0]
+ # Deleted Object base DN
+ dodn = self._deleted_objects_dn(sam_ldb)
+ # now check properties of the user
+ cn_orig = str(obj_orig["cn"][0])
+ cn_cur = str(user_cur["cn"][0])
+ name_orig = str(obj_orig["name"][0])
+ name_cur = str(user_cur["name"][0])
+ if is_deleted:
+ self.assertEqual(str(user_cur["isDeleted"][0]), "TRUE")
+ self.assertFalse("objectCategory" in user_cur)
+ self.assertFalse("sAMAccountType" in user_cur)
+ self.assertFalse("description" in user_cur)
+ self.assertFalse("memberOf" in user_cur)
+ self.assertFalse("member" in user_cur)
+ self.assertTrue(dodn in str(user_cur["dn"]),
+ "User %s is deleted but it is not located under %s (found at %s)!" % (name_orig, dodn, user_cur["dn"]))
+ self.assertEqual(name_cur, name_orig + "\nDEL:" + guid_str)
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+ self.assertEqual(cn_cur, cn_orig + "\nDEL:" + guid_str)
+ self.assertEqual(name_cur, cn_cur)
+ else:
+ self.assertFalse("isDeleted" in user_cur)
+ self.assertEqual(name_cur, name_orig)
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+ self.assertEqual(cn_cur, cn_orig)
+ self.assertEqual(name_cur, cn_cur)
+ self.assertEqual(obj_orig["dn"], user_cur["dn"])
+ self.assertTrue(dodn not in str(user_cur["dn"]))
+ return user_cur
+
+ def test_ReplicateDeletedObject1(self):
+ """Verifies how a deleted-object is replicated between two DCs.
+ This test should verify that:
+ - deleted-object is replicated properly
+ - We verify that after replication,
+ object's state to conform to a tombstone-object state
+ - This test replicates the object modifications to
+ the server with the user deleted first
+
+ TODO: It will also be great if check replPropertyMetaData.
+ TODO: Check for deleted-object state, depending on DC's features
+ when recycle-bin is enabled
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username, password="P@sswOrd!")
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # delete user on DC1
+ self.ldb_dc1.delete(user_dn)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description or memberOf yet
+ self.assertFalse("description" in user_cur)
+ self.assertFalse("memberOf" in user_cur)
+
+ self.ldb_dc2.newgroup("group_%s" % username)
+
+ self.ldb_dc2.newgroup("group2_%s" % username)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group_orig = ldb_res[0]
+ group_dn = ldb_res[0]["dn"]
+
+ # modify user on DC2 to have a description and be a member of the group
+ m = ldb.Message()
+ m.dn = user_dn
+ m["description"] = ldb.MessageElement("a description",
+ ldb.FLAG_MOD_ADD, "description")
+ self.ldb_dc2.modify(m)
+ m = ldb.Message()
+ m.dn = group_dn
+ m["member"] = ldb.MessageElement(str(user_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group2_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group2_dn = ldb_res[0]["dn"]
+ group2_orig = ldb_res[0]
+
+ m = ldb.Message()
+ m.dn = group2_dn
+ m["member"] = ldb.MessageElement(str(group_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description yet
+ self.assertTrue("description" in user_cur)
+ self.assertTrue("memberOf" in user_cur)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+
+ # This group is a member of another group
+ self.assertTrue("memberOf" in ldb_res[0])
+
+ # The user was deleted on DC1, but check the modify we just did on DC2
+ self.assertTrue("member" in ldb_res[0])
+
+ # trigger replication from DC2 to DC1
+ # to check if deleted object gets restored
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+
+ # This group is a member of another group
+ self.assertTrue("memberOf" in ldb_res[0])
+
+ # The user was deleted on DC1, but the modify we did on DC2, check it never replicated in
+ self.assertFalse("member" in ldb_res[0])
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=True)
+
+ # delete group on DC1
+ self.ldb_dc1.delete(group_dn)
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check group info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=group_orig, is_deleted=True)
+ # check group info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=group_orig, is_deleted=True)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group2_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertFalse("member" in ldb_res[0])
+
+ # delete group on DC1
+ self.ldb_dc1.delete(group2_dn)
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check group info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=group2_orig, is_deleted=True)
+ # check group info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=group2_orig, is_deleted=True)
+
+ def test_ReplicateDeletedObject2(self):
+ """Verifies how a deleted-object is replicated between two DCs.
+ This test should verify that:
+ - deleted-object is replicated properly
+ - We verify that after replication,
+ object's state to conform to a tombstone-object state
+ - This test replicates the delete to the server with the
+ object modifications first
+
+ TODO: It will also be great if check replPropertyMetaData.
+ TODO: Check for deleted-object state, depending on DC's features
+ when recycle-bin is enabled
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username, password="P@sswOrd!")
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # delete user on DC1
+ self.ldb_dc1.delete(user_dn)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description or memberOf yet
+ self.assertFalse("description" in user_cur)
+ self.assertFalse("memberOf" in user_cur)
+
+ self.ldb_dc2.newgroup("group_%s" % username)
+
+ self.ldb_dc2.newgroup("group2_%s" % username)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group_dn = ldb_res[0]["dn"]
+
+ # modify user on DC2 to have a description and be a member of the group
+ m = ldb.Message()
+ m.dn = user_dn
+ m["description"] = ldb.MessageElement("a description",
+ ldb.FLAG_MOD_ADD, "description")
+ self.ldb_dc2.modify(m)
+ m = ldb.Message()
+ m.dn = group_dn
+ m["member"] = ldb.MessageElement(str(user_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group2_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group2_dn = ldb_res[0]["dn"]
+
+ m = ldb.Message()
+ m.dn = group2_dn
+ m["member"] = ldb.MessageElement(str(group_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description yet
+ self.assertTrue("description" in user_cur)
+ self.assertTrue("memberOf" in user_cur)
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object gets restored
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=True)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("memberOf" in ldb_res[0])
+ self.assertFalse("member" in ldb_res[0])
+
+ # trigger replication from DC2 to DC1
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=True)
+
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("memberOf" in ldb_res[0])
+ self.assertFalse("member" in ldb_res[0])
+
+ # delete group on DC1
+ self.ldb_dc1.delete(group_dn)
+ self.ldb_dc1.delete(group2_dn)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
diff --git a/source4/torture/drs/python/drs_base.py b/source4/torture/drs/python/drs_base.py
new file mode 100644
index 0000000..bf98e59
--- /dev/null
+++ b/source4/torture/drs/python/drs_base.py
@@ -0,0 +1,632 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+# Copyright (C) Catalyst IT Ltd. 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import time
+import os
+import ldb
+
+sys.path.insert(0, "bin/python")
+import samba.tests
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba import dsdb
+from samba.dcerpc import drsuapi, misc, drsblobs, security
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.drs_utils import drs_DsBind
+from samba import gensec
+from ldb import (
+ SCOPE_BASE,
+ Message,
+ FLAG_MOD_REPLACE,
+)
+from samba.common import cmp
+from samba.common import get_string
+
+
+class DrsBaseTestCase(SambaToolCmdTest):
+ """Base class implementation for all DRS python tests.
+ It is intended to provide common initialization and
+ and functionality used by all DRS tests in drs/python
+ test package. For instance, DC1 and DC2 are always used
+ to pass URLs for DCs to test against"""
+
+ def setUp(self):
+ super(DrsBaseTestCase, self).setUp()
+ creds = self.get_credentials()
+ creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+
+ # connect to DCs
+ self.url_dc1 = samba.tests.env_get_var_value("DC1")
+ (self.ldb_dc1, self.info_dc1) = samba.tests.connect_samdb_ex(self.url_dc1,
+ ldap_only=True)
+ self.url_dc2 = samba.tests.env_get_var_value("DC2")
+ (self.ldb_dc2, self.info_dc2) = samba.tests.connect_samdb_ex(self.url_dc2,
+ ldap_only=True)
+ self.test_ldb_dc = self.ldb_dc1
+
+ # cache some of RootDSE props
+ self.schema_dn = str(self.info_dc1["schemaNamingContext"][0])
+ self.domain_dn = str(self.info_dc1["defaultNamingContext"][0])
+ self.config_dn = str(self.info_dc1["configurationNamingContext"][0])
+ self.forest_level = int(self.info_dc1["forestFunctionality"][0])
+
+ # we will need DCs DNS names for 'samba-tool drs' command
+ self.dnsname_dc1 = str(self.info_dc1["dnsHostName"][0])
+ self.dnsname_dc2 = str(self.info_dc2["dnsHostName"][0])
+
+ # for debugging the test code
+ self._debug = False
+
+ def tearDown(self):
+ super(DrsBaseTestCase, self).tearDown()
+
+ def set_test_ldb_dc(self, ldb_dc):
+ """Sets which DC's LDB we perform operations on during the test"""
+ self.test_ldb_dc = ldb_dc
+
+ def _GUID_string(self, guid):
+ return get_string(self.test_ldb_dc.schema_format_value("objectGUID", guid))
+
+ def _ldap_schemaUpdateNow(self, sam_db):
+ rec = {"dn": "",
+ "schemaUpdateNow": "1"}
+ m = Message.from_dict(sam_db, rec, FLAG_MOD_REPLACE)
+ sam_db.modify(m)
+
+ def _deleted_objects_dn(self, sam_ldb):
+ wkdn = "<WKGUID=18E2EA80684F11D2B9AA00C04F79F805,%s>" % self.domain_dn
+ res = sam_ldb.search(base=wkdn,
+ scope=SCOPE_BASE,
+ controls=["show_deleted:1"])
+ self.assertEqual(len(res), 1)
+ return str(res[0]["dn"])
+
+ def _lost_and_found_dn(self, sam_ldb, nc):
+ wkdn = "<WKGUID=%s,%s>" % (dsdb.DS_GUID_LOSTANDFOUND_CONTAINER, nc)
+ res = sam_ldb.search(base=wkdn,
+ scope=SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ return str(res[0]["dn"])
+
+ def _make_obj_name(self, prefix):
+ return prefix + time.strftime("%s", time.gmtime())
+
+ def _samba_tool_cmd_list(self, drs_command):
+ # make command line credentials string
+
+ # If test runs on windows then it can provide its own auth string
+ if hasattr(self, 'cmdline_auth'):
+ cmdline_auth = self.cmdline_auth
+ else:
+ ccache_name = self.get_creds_ccache_name()
+
+ # Tunnel the command line credentials down to the
+ # subcommand to avoid a new kinit
+ cmdline_auth = "--use-krb5-ccache=%s" % ccache_name
+
+ # bin/samba-tool drs <drs_command> <cmdline_auth>
+ return ["drs", drs_command, cmdline_auth]
+
+ def _net_drs_replicate(self, DC, fromDC, nc_dn=None, forced=True,
+ local=False, full_sync=False, single=False):
+ if nc_dn is None:
+ nc_dn = self.domain_dn
+ # make base command line
+ samba_tool_cmdline = self._samba_tool_cmd_list("replicate")
+ # bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
+ samba_tool_cmdline += [DC, fromDC, nc_dn]
+
+ if forced:
+ samba_tool_cmdline += ["--sync-forced"]
+ if local:
+ samba_tool_cmdline += ["--local"]
+ if full_sync:
+ samba_tool_cmdline += ["--full-sync"]
+ if single:
+ samba_tool_cmdline += ["--single-object"]
+
+ (result, out, err) = self.runsubcmd(*samba_tool_cmdline)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _enable_inbound_repl(self, DC):
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # disable replication
+ samba_tool_cmd += [DC, "--dsa-option=-DISABLE_INBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _disable_inbound_repl(self, DC):
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # disable replication
+ samba_tool_cmd += [DC, "--dsa-option=+DISABLE_INBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _enable_all_repl(self, DC):
+ self._enable_inbound_repl(DC)
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # enable replication
+ samba_tool_cmd += [DC, "--dsa-option=-DISABLE_OUTBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _disable_all_repl(self, DC):
+ self._disable_inbound_repl(DC)
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # disable replication
+ samba_tool_cmd += [DC, "--dsa-option=+DISABLE_OUTBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _get_highest_hwm_utdv(self, ldb_conn):
+ res = ldb_conn.search("", scope=ldb.SCOPE_BASE, attrs=["highestCommittedUSN"])
+ hwm = drsuapi.DsReplicaHighWaterMark()
+ hwm.tmp_highest_usn = int(res[0]["highestCommittedUSN"][0])
+ hwm.reserved_usn = 0
+ hwm.highest_usn = hwm.tmp_highest_usn
+
+ utdv = drsuapi.DsReplicaCursorCtrEx()
+ cursors = []
+ c1 = drsuapi.DsReplicaCursor()
+ c1.source_dsa_invocation_id = misc.GUID(ldb_conn.get_invocation_id())
+ c1.highest_usn = hwm.highest_usn
+ cursors.append(c1)
+ utdv.count = len(cursors)
+ utdv.cursors = cursors
+ return (hwm, utdv)
+
+ def _get_identifier(self, ldb_conn, dn):
+ res = ldb_conn.search(dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID", "objectSid"])
+ id = drsuapi.DsReplicaObjectIdentifier()
+ id.guid = ndr_unpack(misc.GUID, res[0]['objectGUID'][0])
+ if "objectSid" in res[0]:
+ id.sid = ndr_unpack(security.dom_sid, res[0]['objectSid'][0])
+ id.dn = str(res[0].dn)
+ return id
+
+ def _get_ctr6_links(self, ctr6):
+ """
+ Unpacks the linked attributes from a DsGetNCChanges response
+ and returns them as a list.
+ """
+ ctr6_links = []
+ for lidx in range(0, ctr6.linked_attributes_count):
+ l = ctr6.linked_attributes[lidx]
+ try:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ l.value.blob)
+ except:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3Binary,
+ l.value.blob)
+ al = AbstractLink(l.attid, l.flags,
+ l.identifier.guid,
+ target.guid, target.dn)
+ ctr6_links.append(al)
+
+ return ctr6_links
+
+ def _get_ctr6_object_guids(self, ctr6):
+ """Returns all the object GUIDs in a GetNCChanges response"""
+ guid_list = []
+
+ obj = ctr6.first_object
+ for i in range(0, ctr6.object_count):
+ guid_list.append(str(obj.object.identifier.guid))
+ obj = obj.next_object
+
+ return guid_list
+
+ def _ctr6_debug(self, ctr6):
+ """
+ Displays basic info contained in a DsGetNCChanges response.
+ Having this debug code allows us to see the difference in behaviour
+ between Samba and Windows easier. Turn on the self._debug flag to see it.
+ """
+
+ if self._debug:
+ print("------------ recvd CTR6 -------------")
+
+ next_object = ctr6.first_object
+ for i in range(0, ctr6.object_count):
+ print("Obj %d: %s %s" % (i, next_object.object.identifier.dn[:25],
+ next_object.object.identifier.guid))
+ next_object = next_object.next_object
+
+ print("Linked Attributes: %d" % ctr6.linked_attributes_count)
+ for lidx in range(0, ctr6.linked_attributes_count):
+ l = ctr6.linked_attributes[lidx]
+ try:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ l.value.blob)
+ except:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3Binary,
+ l.value.blob)
+
+ print("Link Tgt %s... <-- Src %s"
+ % (target.dn[:25], l.identifier.guid))
+ state = "Del"
+ if l.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE:
+ state = "Act"
+ print(" v%u %s changed %u" % (l.meta_data.version, state,
+ l.meta_data.originating_change_time))
+
+ print("HWM: %d" % (ctr6.new_highwatermark.highest_usn))
+ print("Tmp HWM: %d" % (ctr6.new_highwatermark.tmp_highest_usn))
+ print("More data: %d" % (ctr6.more_data))
+
+ def _get_replication(self, replica_flags,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_NONE, drs=None, drs_handle=None,
+ highwatermark=None, uptodateness_vector=None,
+ more_flags=0, max_objects=133, exop=0,
+ dest_dsa=drsuapi.DRSUAPI_DS_BIND_GUID_W2K3,
+ source_dsa=None, invocation_id=None, nc_dn_str=None):
+ """
+ Builds a DsGetNCChanges request based on the information provided
+ and returns the response received from the DC.
+ """
+ if source_dsa is None:
+ source_dsa = self.test_ldb_dc.get_ntds_GUID()
+ if invocation_id is None:
+ invocation_id = self.test_ldb_dc.get_invocation_id()
+ if nc_dn_str is None:
+ nc_dn_str = self.test_ldb_dc.domain_dn()
+
+ if highwatermark is None:
+ if self.default_hwm is None:
+ (highwatermark, _) = self._get_highest_hwm_utdv(self.test_ldb_dc)
+ else:
+ highwatermark = self.default_hwm
+
+ if drs is None:
+ drs = self.drs
+ if drs_handle is None:
+ drs_handle = self.drs_handle
+
+ req10 = self._getnc_req10(dest_dsa=dest_dsa,
+ invocation_id=invocation_id,
+ nc_dn_str=nc_dn_str,
+ exop=exop,
+ max_objects=max_objects,
+ replica_flags=replica_flags,
+ more_flags=more_flags)
+ req10.highwatermark = highwatermark
+ if uptodateness_vector is not None:
+ uptodateness_vector_v1 = drsuapi.DsReplicaCursorCtrEx()
+ cursors = []
+ for i in range(0, uptodateness_vector.count):
+ c = uptodateness_vector.cursors[i]
+ c1 = drsuapi.DsReplicaCursor()
+ c1.source_dsa_invocation_id = c.source_dsa_invocation_id
+ c1.highest_usn = c.highest_usn
+ cursors.append(c1)
+ uptodateness_vector_v1.count = len(cursors)
+ uptodateness_vector_v1.cursors = cursors
+ req10.uptodateness_vector = uptodateness_vector_v1
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 10, req10)
+ self._ctr6_debug(ctr)
+
+ self.assertEqual(level, 6, "expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(source_dsa))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(invocation_id))
+ self.assertEqual(ctr.extended_ret, drs_error)
+
+ return ctr
+
+ def _check_replication(self, expected_dns, replica_flags, expected_links=None,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_NONE, drs=None, drs_handle=None,
+ highwatermark=None, uptodateness_vector=None,
+ more_flags=0, more_data=False,
+ dn_ordered=True, links_ordered=True,
+ max_objects=133, exop=0,
+ dest_dsa=drsuapi.DRSUAPI_DS_BIND_GUID_W2K3,
+ source_dsa=None, invocation_id=None, nc_dn_str=None,
+ nc_object_count=0, nc_linked_attributes_count=0):
+ """
+ Makes sure that replication returns the specific error given.
+ """
+ if expected_links is None:
+ expected_links = []
+
+ # send a DsGetNCChanges to the DC
+ ctr6 = self._get_replication(replica_flags,
+ drs_error, drs, drs_handle,
+ highwatermark, uptodateness_vector,
+ more_flags, max_objects, exop, dest_dsa,
+ source_dsa, invocation_id, nc_dn_str)
+
+ # check the response is what we expect
+ self._check_ctr6(ctr6, expected_dns, expected_links,
+ nc_object_count=nc_object_count, more_data=more_data,
+ dn_ordered=dn_ordered)
+ return (ctr6.new_highwatermark, ctr6.uptodateness_vector)
+
+ def _get_ctr6_dn_list(self, ctr6):
+ """
+ Returns the DNs contained in a DsGetNCChanges response.
+ """
+ dn_list = []
+ next_object = ctr6.first_object
+ for i in range(0, ctr6.object_count):
+ dn_list.append(next_object.object.identifier.dn)
+ next_object = next_object.next_object
+ self.assertEqual(next_object, None)
+
+ return dn_list
+
+ def _check_ctr6(self, ctr6, expected_dns=None, expected_links=None,
+ dn_ordered=True, links_ordered=True,
+ more_data=False, nc_object_count=0,
+ nc_linked_attributes_count=0, drs_error=0):
+ """
+ Check that a ctr6 matches the specified parameters.
+ """
+ if expected_dns is None:
+ expected_dns = []
+
+ if expected_links is None:
+ expected_links = []
+
+ ctr6_raw_dns = self._get_ctr6_dn_list(ctr6)
+
+ # filter out changes to the RID Set objects, as these can happen
+ # intermittently and mess up the test assertions
+ ctr6_dns = []
+ for dn in ctr6_raw_dns:
+ if "CN=RID Set," in dn or "CN=RID Manager$," in dn:
+ print("Removing {0} from GetNCChanges reply".format(dn))
+ else:
+ ctr6_dns.append(dn)
+
+ self.assertEqual(len(ctr6_dns), len(expected_dns),
+ "Received unexpected objects (%s)" % ctr6_dns)
+ self.assertEqual(ctr6.object_count, len(ctr6_raw_dns))
+ self.assertEqual(ctr6.linked_attributes_count, len(expected_links))
+ self.assertEqual(ctr6.more_data, more_data)
+ self.assertEqual(ctr6.nc_object_count, nc_object_count)
+ self.assertEqual(ctr6.nc_linked_attributes_count, nc_linked_attributes_count)
+ self.assertEqual(ctr6.drs_error[0], drs_error)
+
+ i = 0
+ for dn in expected_dns:
+ # Expect them back in the exact same order as specified.
+ if dn_ordered:
+ self.assertNotEqual(ctr6_dns[i], None)
+ self.assertEqual(ctr6_dns[i], dn)
+ i = i + 1
+ # Don't care what order
+ else:
+ self.assertTrue(dn in ctr6_dns, "Couldn't find DN '%s' anywhere in ctr6 response." % dn)
+
+ # Extract the links from the response
+ ctr6_links = self._get_ctr6_links(ctr6)
+ expected_links.sort()
+
+ lidx = 0
+ for el in expected_links:
+ if links_ordered:
+ self.assertEqual(el, ctr6_links[lidx])
+ lidx += 1
+ else:
+ self.assertTrue(el in ctr6_links, "Couldn't find link '%s' anywhere in ctr6 response." % el)
+
+ def _exop_req8(self, dest_dsa, invocation_id, nc_dn_str, exop,
+ replica_flags=0, max_objects=0, partial_attribute_set=None,
+ partial_attribute_set_ex=None, mapping_ctr=None, nc_guid=None):
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ req8.destination_dsa_guid = misc.GUID(dest_dsa) if dest_dsa else misc.GUID()
+ req8.source_dsa_invocation_id = misc.GUID(invocation_id)
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = str(nc_dn_str)
+ if nc_guid is not None:
+ req8.naming_context.guid = nc_guid
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = replica_flags
+ req8.max_object_count = max_objects
+ req8.max_ndr_size = 402116
+ req8.extended_op = exop
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = partial_attribute_set
+ req8.partial_attribute_set_ex = partial_attribute_set_ex
+ if mapping_ctr:
+ req8.mapping_ctr = mapping_ctr
+ else:
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ return req8
+
+ def _getnc_req10(self, dest_dsa, invocation_id, nc_dn_str, exop,
+ replica_flags=0, max_objects=0, partial_attribute_set=None,
+ partial_attribute_set_ex=None, mapping_ctr=None,
+ more_flags=0, nc_guid=None):
+ req10 = drsuapi.DsGetNCChangesRequest10()
+
+ req10.destination_dsa_guid = misc.GUID(dest_dsa) if dest_dsa else misc.GUID()
+ req10.source_dsa_invocation_id = misc.GUID(invocation_id)
+ req10.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req10.naming_context.dn = str(nc_dn_str)
+ if nc_guid is not None:
+ req10.naming_context.guid = nc_guid
+ req10.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req10.highwatermark.tmp_highest_usn = 0
+ req10.highwatermark.reserved_usn = 0
+ req10.highwatermark.highest_usn = 0
+ req10.uptodateness_vector = None
+ req10.replica_flags = replica_flags
+ req10.max_object_count = max_objects
+ req10.max_ndr_size = 402116
+ req10.extended_op = exop
+ req10.fsmo_info = 0
+ req10.partial_attribute_set = partial_attribute_set
+ req10.partial_attribute_set_ex = partial_attribute_set_ex
+ if mapping_ctr:
+ req10.mapping_ctr = mapping_ctr
+ else:
+ req10.mapping_ctr.num_mappings = 0
+ req10.mapping_ctr.mappings = None
+ req10.more_flags = more_flags
+
+ return req10
+
+ def _ds_bind(self, server_name, creds=None, ip=None):
+ if ip is None:
+ binding_str = f"ncacn_ip_tcp:{server_name}[seal]"
+ else:
+ binding_str = f"ncacn_ip_tcp:{ip}[seal,target_hostname={server_name}]"
+
+ if creds is None:
+ creds = self.get_credentials()
+ drs = drsuapi.drsuapi(binding_str, self.get_loadparm(), creds)
+ (drs_handle, supported_extensions) = drs_DsBind(drs)
+ return (drs, drs_handle)
+
+ def get_partial_attribute_set(self, attids=None):
+ if attids is None:
+ attids = [drsuapi.DRSUAPI_ATTID_objectClass]
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+ return partial_attribute_set
+
+
+class AbstractLink:
+ def __init__(self, attid, flags, identifier, targetGUID,
+ targetDN=""):
+ self.attid = attid
+ self.flags = flags
+ self.identifier = str(identifier)
+ self.selfGUID_blob = ndr_pack(identifier)
+ self.targetGUID = str(targetGUID)
+ self.targetGUID_blob = ndr_pack(targetGUID)
+ self.targetDN = targetDN
+
+ def __repr__(self):
+ return "AbstractLink(0x%08x, 0x%08x, %s, %s)" % (
+ self.attid, self.flags, self.identifier, self.targetGUID)
+
+ def __internal_cmp__(self, other, verbose=False):
+ """See CompareLinks() in MS-DRSR section 4.1.10.5.17"""
+ if not isinstance(other, AbstractLink):
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => wrong type" % (self, other))
+ return NotImplemented
+
+ c = cmp(self.selfGUID_blob, other.selfGUID_blob)
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different identifier" % (self, other, c))
+ return c
+
+ c = other.attid - self.attid
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different attid" % (self, other, c))
+ return c
+
+ self_active = self.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ other_active = other.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+
+ c = self_active - other_active
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different FLAG_ACTIVE" % (self, other, c))
+ return c
+
+ c = cmp(self.targetGUID_blob, other.targetGUID_blob)
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different target" % (self, other, c))
+ return c
+
+ c = self.flags - other.flags
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different flags" % (self, other, c))
+ return c
+
+ return 0
+
+ def __lt__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c < 0:
+ return True
+ return False
+
+ def __le__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c <= 0:
+ return True
+ return False
+
+ def __eq__(self, other):
+ c = self.__internal_cmp__(other, verbose=True)
+ if c == NotImplemented:
+ return NotImplemented
+ if c == 0:
+ return True
+ return False
+
+ def __ne__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c != 0:
+ return True
+ return False
+
+ def __gt__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c > 0:
+ return True
+ return False
+
+ def __ge__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c >= 0:
+ return True
+ return False
+
+ def __hash__(self):
+ return hash((self.attid, self.flags, self.identifier, self.targetGUID))
diff --git a/source4/torture/drs/python/fsmo.py b/source4/torture/drs/python/fsmo.py
new file mode 100644
index 0000000..55805b9
--- /dev/null
+++ b/source4/torture/drs/python/fsmo.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Anatoliy Atanasov <anatoliy.atanasov@postpath.com> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN fsmo -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import sys
+import time
+import os
+
+sys.path.insert(0, "bin/python")
+
+from ldb import SCOPE_BASE
+
+import drs_base
+
+
+class DrsFsmoTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsFsmoTestCase, self).setUp()
+
+ # we have to wait for the replication before we make the check
+ self.fsmo_wait_max_time = 20
+ self.fsmo_wait_sleep_time = 0.2
+
+ # cache some of RootDSE props
+ self.dsServiceName_dc1 = self.info_dc1["dsServiceName"][0]
+ self.dsServiceName_dc2 = self.info_dc2["dsServiceName"][0]
+ self.infrastructure_dn = "CN=Infrastructure," + self.domain_dn
+ self.naming_dn = "CN=Partitions," + self.config_dn
+ self.rid_dn = "CN=RID Manager$,CN=System," + self.domain_dn
+ self.domain_dns_dn = (
+ "CN=Infrastructure,DC=DomainDnsZones, %s" % self.domain_dn )
+ self.forest_dns_dn = (
+ "CN=Infrastructure,DC=ForestDnsZones, %s" % self.domain_dn )
+
+ def tearDown(self):
+ super(DrsFsmoTestCase, self).tearDown()
+
+ def _net_fsmo_role_transfer(self, DC, role, noop=False):
+ # make command line credentials string
+ ccache_name = self.get_creds_ccache_name()
+ cmd_line_auth = "--use-krb5-ccache=%s" % ccache_name
+ (result, out, err) = self.runsubcmd("fsmo", "transfer",
+ "--role=%s" % role,
+ "-H", "ldap://%s:389" % DC,
+ cmd_line_auth)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ if not noop:
+ self.assertTrue("FSMO transfer of '%s' role successful" % role in out)
+ else:
+ self.assertTrue("This DC already has the '%s' FSMO role" % role in out)
+
+ def _wait_for_role_transfer(self, ldb_dc, role_dn, master):
+ """Wait for role transfer for certain amount of time
+
+ :return: (Result=True|False, CurrentMasterDnsName) tuple
+ """
+ cur_master = ''
+ retries = int(self.fsmo_wait_max_time / self.fsmo_wait_sleep_time) + 1
+ for i in range(0, retries):
+ # check if master has been transferred
+ res = ldb_dc.search(role_dn,
+ scope=SCOPE_BASE, attrs=["fSMORoleOwner"])
+ assert len(res) == 1, "Only one fSMORoleOwner value expected!"
+ cur_master = res[0]["fSMORoleOwner"][0]
+ if master == cur_master:
+ return (True, cur_master)
+ # skip last sleep, if no need to wait anymore
+ if i != (retries - 1):
+ # wait a little bit before next retry
+ time.sleep(self.fsmo_wait_sleep_time)
+ return (False, cur_master)
+
+ def _role_transfer(self, role, role_dn):
+ """Triggers transfer of role from DC1 to DC2
+ and vice versa so the role goes back to the original dc"""
+ # dc2 gets the role from dc1
+ print("Testing for %s role transfer from %s to %s" % (role, self.dnsname_dc1, self.dnsname_dc2))
+
+ self._net_fsmo_role_transfer(DC=self.dnsname_dc2, role=role)
+ # check if the role is transferred
+ (res, master) = self._wait_for_role_transfer(ldb_dc=self.ldb_dc2,
+ role_dn=role_dn,
+ master=self.dsServiceName_dc2)
+ self.assertTrue(res,
+ "Transferring %s role to %s has failed, master is: %s!" % (role, self.dsServiceName_dc2, master))
+
+ # dc1 gets back the role from dc2
+ print("Testing for %s role transfer from %s to %s" % (role, self.dnsname_dc2, self.dnsname_dc1))
+ self._net_fsmo_role_transfer(DC=self.dnsname_dc1, role=role)
+ # check if the role is transferred
+ (res, master) = self._wait_for_role_transfer(ldb_dc=self.ldb_dc1,
+ role_dn=role_dn,
+ master=self.dsServiceName_dc1)
+ self.assertTrue(res,
+ "Transferring %s role to %s has failed, master is: %s!" % (role, self.dsServiceName_dc1, master))
+
+ # dc1 keeps the role
+ print("Testing for no-op %s role transfer from %s to %s" % (role, self.dnsname_dc2, self.dnsname_dc1))
+ self._net_fsmo_role_transfer(DC=self.dnsname_dc1, role=role, noop=True)
+ # check if the role is transferred
+ (res, master) = self._wait_for_role_transfer(ldb_dc=self.ldb_dc1,
+ role_dn=role_dn,
+ master=self.dsServiceName_dc1)
+ self.assertTrue(res,
+ "Transferring %s role to %s has failed, master is: %s!" % (role, self.dsServiceName_dc1, master))
+
+ def test_SchemaMasterTransfer(self):
+ self._role_transfer(role="schema", role_dn=self.schema_dn)
+
+ def test_InfrastructureMasterTransfer(self):
+ self._role_transfer(role="infrastructure", role_dn=self.infrastructure_dn)
+
+ def test_PDCMasterTransfer(self):
+ self._role_transfer(role="pdc", role_dn=self.domain_dn)
+
+ def test_RIDMasterTransfer(self):
+ self._role_transfer(role="rid", role_dn=self.rid_dn)
+
+ def test_NamingMasterTransfer(self):
+ self._role_transfer(role="naming", role_dn=self.naming_dn)
+
+ def test_DomainDnsZonesMasterTransfer(self):
+ self._role_transfer(role="domaindns", role_dn=self.domain_dns_dn)
+
+ def test_ForestDnsZonesMasterTransfer(self):
+ self._role_transfer(role="forestdns", role_dn=self.forest_dns_dn)
diff --git a/source4/torture/drs/python/getnc_exop.py b/source4/torture/drs/python/getnc_exop.py
new file mode 100644
index 0000000..0f12d9b
--- /dev/null
+++ b/source4/torture/drs/python/getnc_exop.py
@@ -0,0 +1,1304 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN getnc_exop -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import random
+
+import drs_base
+from drs_base import AbstractLink
+
+import samba.tests
+from samba import werror, WERRORError
+
+import ldb
+from ldb import SCOPE_BASE
+
+from samba.dcerpc import drsuapi, misc, drsblobs
+from samba.drs_utils import drs_DsBind
+from samba.ndr import ndr_unpack, ndr_pack
+from functools import cmp_to_key
+from samba.common import cmp
+
+
+def _linked_attribute_compare(la1, la2):
+ """See CompareLinks() in MS-DRSR section 4.1.10.5.17"""
+ la1, la1_target = la1
+ la2, la2_target = la2
+
+ # Ascending host object GUID
+ c = cmp(ndr_pack(la1.identifier.guid), ndr_pack(la2.identifier.guid))
+ if c != 0:
+ return c
+
+ # Ascending attribute ID
+ if la1.attid != la2.attid:
+ return -1 if la1.attid < la2.attid else 1
+
+ la1_active = la1.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ la2_active = la2.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+
+ # Ascending 'is present'
+ if la1_active != la2_active:
+ return 1 if la1_active else -1
+
+ # Ascending target object GUID
+ return cmp(ndr_pack(la1_target), ndr_pack(la2_target))
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a semi-black box test case for DsGetNCChanges
+ implementation for extended operations. It should be testing
+ how DsGetNCChanges handles different input params (mostly invalid).
+ Final goal is to make DsGetNCChanges as binary compatible to
+ Windows implementation as possible"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.ou = "OU=test_getncchanges%d,%s" % (random.randint(0, 4294967295),
+ self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ (self.default_hwm, self.default_utdv) = self._get_highest_hwm_utdv(self.ldb_dc1)
+
+ def tearDown(self):
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e:
+ (enum, string) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _determine_fSMORoleOwner(self, fsmo_obj_dn):
+ """Returns (owner, not_owner) pair where:
+ owner: dns name for FSMO owner
+ not_owner: dns name for DC not owning the FSMO"""
+ # collect info to return later
+ fsmo_info_1 = {"dns_name": self.dnsname_dc1,
+ "invocation_id": self.ldb_dc1.get_invocation_id(),
+ "ntds_guid": self.ldb_dc1.get_ntds_GUID(),
+ "server_dn": self.ldb_dc1.get_serverName()}
+ fsmo_info_2 = {"dns_name": self.dnsname_dc2,
+ "invocation_id": self.ldb_dc2.get_invocation_id(),
+ "ntds_guid": self.ldb_dc2.get_ntds_GUID(),
+ "server_dn": self.ldb_dc2.get_serverName()}
+
+ msgs = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=fsmo_info_1["server_dn"], attrs=["serverReference"])
+ fsmo_info_1["server_acct_dn"] = ldb.Dn(self.ldb_dc1, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_1["rid_set_dn"] = ldb.Dn(self.ldb_dc1, "CN=RID Set") + fsmo_info_1["server_acct_dn"]
+
+ msgs = self.ldb_dc2.search(scope=ldb.SCOPE_BASE, base=fsmo_info_2["server_dn"], attrs=["serverReference"])
+ fsmo_info_2["server_acct_dn"] = ldb.Dn(self.ldb_dc2, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_2["rid_set_dn"] = ldb.Dn(self.ldb_dc2, "CN=RID Set") + fsmo_info_2["server_acct_dn"]
+
+ # determine the owner dc
+ res = self.ldb_dc1.search(fsmo_obj_dn,
+ scope=SCOPE_BASE, attrs=["fSMORoleOwner"])
+ assert len(res) == 1, "Only one fSMORoleOwner value expected for %s!" % fsmo_obj_dn
+ fsmo_owner = res[0]["fSMORoleOwner"][0]
+ if fsmo_owner == self.info_dc1["dsServiceName"][0]:
+ return (fsmo_info_1, fsmo_info_2)
+ return (fsmo_info_2, fsmo_info_1)
+
+ def _check_exop_failed(self, ctr6, expected_failure):
+ self.assertEqual(ctr6.extended_ret, expected_failure)
+ #self.assertEqual(ctr6.object_count, 0)
+ #self.assertEqual(ctr6.first_object, None)
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes, [])
+ self.assertEqual(ctr6.drs_error[0], 0)
+
+ def test_do_single_repl(self):
+ """
+ Make sure that DRSUAPI_EXOP_REPL_OBJ never replicates more than
+ one object, even when we use DRS_GET_ANC/GET_TGT.
+ """
+
+ ou1 = "OU=get_anc1,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ ou1_id = self._get_identifier(self.ldb_dc1, ou1)
+ ou2 = "OU=get_anc2,%s" % ou1
+ self.ldb_dc1.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+ ou2_id = self._get_identifier(self.ldb_dc1, ou2)
+ dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2)
+ self.ldb_dc1.add({
+ "dn": dc3,
+ "objectclass": "computer",
+ "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT)
+ })
+ dc3_id = self._get_identifier(self.ldb_dc1, dc3)
+
+ # Add some linked attributes (for checking GET_TGT behaviour)
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc2, ou1)
+ m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ ou1_link = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ ou1_id.guid, ou2_id.guid)
+
+ m.dn = ldb.Dn(self.ldb_dc2, dc3)
+ m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ dc3_link = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ dc3_id.guid, ou2_id.guid)
+
+ req = self._getnc_req10(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=ou1,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req)
+ self._check_ctr6(ctr, [ou1], expected_links=[ou1_link])
+
+ # DRSUAPI_DRS_WRIT_REP means that we should only replicate the dn we give (dc3).
+ # DRSUAPI_DRS_GET_ANC means that we should also replicate its ancestors, but
+ # Windows doesn't do this if we use both.
+ req = self._getnc_req10(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=dc3,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req)
+ self._check_ctr6(ctr, [dc3], expected_links=[dc3_link])
+
+ # Even though the ancestor of ou2 (ou1) has changed since last hwm, and we're
+ # sending DRSUAPI_DRS_GET_ANC, the expected response is that it will only try
+ # and replicate the single object still.
+ req = self._getnc_req10(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=ou2,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req)
+ self._check_ctr6(ctr, [ou2])
+
+ def test_do_full_repl_on_ou(self):
+ """
+ Make sure that a full replication on a not-an-nc fails with
+ the right error code
+ """
+
+ non_nc_ou = "OU=not-an-NC,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": non_nc_ou,
+ "objectclass": "organizationalUnit"
+ })
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=non_nc_ou,
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ try:
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 8, req8)
+ self.fail("Expected DsGetNCChanges to fail with WERR_DS_CANT_FIND_EXPECTED_NC")
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_CANT_FIND_EXPECTED_NC)
+
+ def test_InvalidNC_DummyDN_InvalidGUID_REPL_OBJ(self):
+ """Test single object replication on a totally invalid GUID fails with the right error code"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_DN)
+
+ def test_InvalidNC_DummyDN_InvalidGUID_REPL_SECRET(self):
+ """Test single object replication on a totally invalid GUID fails with the right error code"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_DN)
+
+ def test_InvalidNC_DummyDN_InvalidGUID_RID_ALLOC(self):
+ """Test RID Allocation on a totally invalid GUID fails with the right error code"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_NC)
+
+ def test_valid_GUID_only_REPL_OBJ(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.ou, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="",
+ nc_guid=guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"Failed to call GetNCChanges with EXOP_REPL_OBJ and a GUID: {estr}")
+
+ self.assertEqual(ctr.first_object.object.identifier.guid, guid)
+
+ def test_DummyDN_valid_GUID_REPL_OBJ(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.ou, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"Failed to call GetNCChanges with EXOP_REPL_OBJ, DummyDN and a GUID: {estr}")
+
+ self.assertEqual(ctr.first_object.object.identifier.guid, guid)
+
+ def test_DummyDN_valid_GUID_REPL_SECRET(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.ou, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+
+ # We expect to get as far as failing on the missing dest_dsa
+ self.assertEqual(enum, werror.WERR_DS_DRA_DB_ERROR)
+
+ def test_link_utdv_hwm(self):
+ """Test verify the DRS_GET_ANC behavior."""
+
+ ou1 = "OU=get_anc1,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ ou1_id = self._get_identifier(self.ldb_dc1, ou1)
+ ou2 = "OU=get_anc2,%s" % ou1
+ self.ldb_dc1.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+ ou2_id = self._get_identifier(self.ldb_dc1, ou2)
+ dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2)
+ self.ldb_dc1.add({
+ "dn": dc3,
+ "objectclass": "computer",
+ "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT)
+ })
+ dc3_id = self._get_identifier(self.ldb_dc1, dc3)
+
+ (hwm1, utdv1) = self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, ou1)
+ m["displayName"] = ldb.MessageElement("OU1", ldb.FLAG_MOD_ADD, "displayName")
+ self.ldb_dc1.modify(m)
+
+ (hwm2, utdv2) = self._check_replication([ou2, dc3, ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ uptodateness_vector=utdv1)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, ou2)
+ m["displayName"] = ldb.MessageElement("OU2", ldb.FLAG_MOD_ADD, "displayName")
+ self.ldb_dc1.modify(m)
+
+ (hwm3, utdv3) = self._check_replication([dc3, ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ uptodateness_vector=utdv1)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.ou)
+ m["displayName"] = ldb.MessageElement("OU", ldb.FLAG_MOD_ADD, "displayName")
+ self.ldb_dc1.modify(m)
+
+ (hwm4, utdv4) = self._check_replication([dc3, ou1, ou2, self.ou],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([self.ou, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ uptodateness_vector=utdv2)
+
+ cn3 = "CN=get_anc3,%s" % ou2
+ self.ldb_dc1.add({
+ "dn": cn3,
+ "objectclass": "container",
+ })
+
+ (hwm5, utdv5) = self._check_replication([dc3, ou1, ou2, self.ou, cn3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([self.ou, ou1, ou2, dc3, cn3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, ou2)
+ m["managedBy"] = ldb.MessageElement(dc3, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ ou2_managedBy_dc3 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ ou2_id.guid, dc3_id.guid)
+
+ (hwm6, utdv6) = self._check_replication([dc3, ou1, self.ou, cn3, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[ou2_managedBy_dc3])
+
+ # Can fail against Windows due to equal precedence of dc3, cn3
+ self._check_replication([self.ou, ou1, ou2, dc3, cn3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[ou2_managedBy_dc3])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ uptodateness_vector=utdv5,
+ expected_links=[ou2_managedBy_dc3])
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ uptodateness_vector=utdv5)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ uptodateness_vector=utdv5)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, dc3)
+ m["managedBy"] = ldb.MessageElement(ou1, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ dc3_managedBy_ou1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ dc3_id.guid, ou1_id.guid)
+
+ (hwm7, utdv7) = self._check_replication([ou1, self.ou, cn3, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[ou2_managedBy_dc3, dc3_managedBy_ou1])
+
+ # Can fail against Windows due to equal precedence of dc3, cn3
+ # self._check_replication([self.ou,ou1,ou2,dc3,cn3],
+ # drsuapi.DRSUAPI_DRS_WRIT_REP|
+ # drsuapi.DRSUAPI_DRS_GET_ANC,
+ # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1])
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1])
+
+ # GET_TGT seems to override DRS_CRITICAL_ONLY and also returns any
+ # object(s) that relate to the linked attributes (similar to GET_ANC)
+ self._check_replication([ou1, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1], dn_ordered=False)
+
+ # Change DC3's managedBy to OU2 instead of OU1
+ # Note that the OU1 managedBy linked attribute will still exist as
+ # a tombstone object (and so will be returned in the replication still)
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, dc3)
+ m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_REPLACE, "managedBy")
+ self.ldb_dc1.modify(m)
+ dc3_managedBy_ou1.flags &= ~drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ dc3_managedBy_ou2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ dc3_id.guid, ou2_id.guid)
+
+ (hwm8, utdv8) = self._check_replication([ou1, self.ou, cn3, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[ou2_managedBy_dc3, dc3_managedBy_ou1, dc3_managedBy_ou2])
+
+ # Can fail against Windows due to equal precedence of dc3, cn3
+ # self._check_replication([self.ou,ou1,ou2,dc3,cn3],
+ # drsuapi.DRSUAPI_DRS_WRIT_REP|
+ # drsuapi.DRSUAPI_DRS_GET_ANC,
+ # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1,dc3_managedBy_ou2])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2])
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2])
+
+ # GET_TGT will also return any DNs referenced by the linked attributes
+ # (including the Tombstone attribute)
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], dn_ordered=False)
+
+ # Use the highwater-mark prior to changing ManagedBy - this should
+ # only return the old/Tombstone and new linked attributes (we already
+ # know all the DNs)
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ # Repeat the above set of tests using the uptodateness_vector
+ # instead of the highwater-mark
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ def test_FSMONotOwner(self):
+ """Test role transfer with against DC not owner of the role"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa=fsmo_owner["ntds_guid"],
+ invocation_id=fsmo_not_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_REQ_ROLE)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_not_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_FSMO_NOT_OWNER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_not_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_not_owner["invocation_id"]))
+
+ def test_InvalidDestDSA(self):
+ """Test role transfer with invalid destination DSA guid"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_REQ_ROLE)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+ def test_InvalidDestDSA_and_GUID(self):
+ """Test role transfer with invalid destination DSA guid"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_REQ_ROLE)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"DsGetNCChanges failed with {estr}")
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+ def test_InvalidDestDSA_and_GUID_RID_ALLOC(self):
+ """Test role transfer with invalid destination DSA guid"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"DsGetNCChanges failed with {estr}")
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+
+class DrsReplicaPrefixMapTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaPrefixMapTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.ou = "ou=pfm_exop%d,%s" % (random.randint(0, 4294967295),
+ self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+ self.user = "cn=testuser,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": self.user,
+ "objectclass": "user"})
+
+ def tearDown(self):
+ super(DrsReplicaPrefixMapTestCase, self).tearDown()
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e2:
+ (enum, string) = e2.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+
+ def test_missing_prefix_map_dsa(self):
+ partial_attribute_set = self.get_partial_attribute_set()
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(ctr.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ except RuntimeError:
+ self.fail("Missing prefixmap shouldn't have triggered an error")
+
+ def test_invalid_prefix_map_attid(self):
+ # Request for invalid attid
+ partial_attribute_set = self.get_partial_attribute_set([99999])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.fail("Invalid attid (99999) should have triggered an error")
+ except RuntimeError as e3:
+ (ecode, emsg) = e3.args
+ self.assertEqual(ecode, 0x000020E2, "Error code should have been "
+ "WERR_DS_DRA_SCHEMA_MISMATCH")
+
+ def test_secret_prefix_map_attid(self):
+ # Request for a secret attid
+ partial_attribute_set = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_unicodePwd])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ for i, mapping in enumerate(pfm.mappings):
+ # OID: 2.5.4.*
+ # objectClass: 2.5.4.0
+ if mapping.oid.binary_oid == [85, 4]:
+ idx1 = i
+ # OID: 1.2.840.113556.1.4.*
+ # unicodePwd: 1.2.840.113556.1.4.90
+ elif mapping.oid.binary_oid == [42, 134, 72, 134, 247, 20, 1, 4]:
+ idx2 = i
+
+ (pfm.mappings[idx1].id_prefix,
+ pfm.mappings[idx2].id_prefix) = (pfm.mappings[idx2].id_prefix,
+ pfm.mappings[idx1].id_prefix)
+
+ tmp = pfm.mappings
+ tmp[idx1], tmp[idx2] = tmp[idx2], tmp[idx1]
+ pfm.mappings = tmp
+
+ # 90 for unicodePwd (with new prefix = 0)
+ # 589824, 589827 for objectClass and CN
+ # Use of three ensures sorting is correct
+ partial_attribute_set = self.get_partial_attribute_set([90, 589824, 589827])
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ def test_regular_prefix_map_attid(self):
+ # Request for a regular (non-secret) attid
+ partial_attribute_set = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_name])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ for i, mapping in enumerate(pfm.mappings):
+ # OID: 2.5.4.*
+ # objectClass: 2.5.4.0
+ if mapping.oid.binary_oid == [85, 4]:
+ idx1 = i
+ # OID: 1.2.840.113556.1.4.*
+ # name: 1.2.840.113556.1.4.1
+ elif mapping.oid.binary_oid == [42, 134, 72, 134, 247, 20, 1, 4]:
+ idx2 = i
+
+ (pfm.mappings[idx1].id_prefix,
+ pfm.mappings[idx2].id_prefix) = (pfm.mappings[idx2].id_prefix,
+ pfm.mappings[idx1].id_prefix)
+
+ tmp = pfm.mappings
+ tmp[idx1], tmp[idx2] = tmp[idx2], tmp[idx1]
+ pfm.mappings = tmp
+
+ # 1 for name (with new prefix = 0)
+ partial_attribute_set = self.get_partial_attribute_set([1])
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ def test_regular_prefix_map_ex_attid(self):
+ # Request for a regular (non-secret) attid
+ partial_attribute_set = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_name])
+ partial_attribute_set_ex = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_unicodePwd])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ partial_attribute_set_ex=partial_attribute_set_ex,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ for i, mapping in enumerate(pfm.mappings):
+ # OID: 2.5.4.*
+ # objectClass: 2.5.4.0
+ if mapping.oid.binary_oid == [85, 4]:
+ idx1 = i
+ # OID: 1.2.840.113556.1.4.*
+ # name: 1.2.840.113556.1.4.1
+ # unicodePwd: 1.2.840.113556.1.4.90
+ elif mapping.oid.binary_oid == [42, 134, 72, 134, 247, 20, 1, 4]:
+ idx2 = i
+
+ (pfm.mappings[idx1].id_prefix,
+ pfm.mappings[idx2].id_prefix) = (pfm.mappings[idx2].id_prefix,
+ pfm.mappings[idx1].id_prefix)
+
+ tmp = pfm.mappings
+ tmp[idx1], tmp[idx2] = tmp[idx2], tmp[idx1]
+ pfm.mappings = tmp
+
+ # 1 for name (with new prefix = 0)
+ partial_attribute_set = self.get_partial_attribute_set([1])
+ # 90 for unicodePwd (with new prefix = 0)
+ # HOWEVER: Windows doesn't seem to respect incoming maps for PartialAttrSetEx
+ partial_attribute_set_ex = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_unicodePwd])
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ partial_attribute_set_ex=partial_attribute_set_ex,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ def _samdb_fetch_pfm_and_schi(self):
+ """Fetch prefixMap and schemaInfo stored in SamDB using LDB connection"""
+ samdb = self.ldb_dc1
+ res = samdb.search(base=samdb.get_schema_basedn(), scope=SCOPE_BASE,
+ attrs=["prefixMap", "schemaInfo"])
+
+ pfm = ndr_unpack(drsblobs.prefixMapBlob,
+ res[0]['prefixMap'][0])
+
+ schi = drsuapi.DsReplicaOIDMapping()
+ schi.id_prefix = 0
+ if 'schemaInfo' in res[0]:
+ binary_oid = [x if isinstance(x, int) else ord(x) for x in res[0]['schemaInfo'][0]]
+ schi.oid.length = len(binary_oid)
+ schi.oid.binary_oid = binary_oid
+ else:
+ schema_info = drsblobs.schemaInfoBlob()
+ schema_info.revision = 0
+ schema_info.marker = 0xFF
+ schema_info.invocation_id = misc.GUID(samdb.get_invocation_id())
+
+ binary_oid = [x if isinstance(x, int) else ord(x) for x in ndr_pack(schema_info)]
+ # you have to set the length before setting binary_oid
+ schi.oid.length = len(binary_oid)
+ schi.oid.binary_oid = binary_oid
+
+ pfm.ctr.mappings = pfm.ctr.mappings + [schi]
+ pfm.ctr.num_mappings += 1
+ return pfm.ctr
+
+
+class DrsReplicaSyncSortTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaSyncSortTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.ou = "ou=sort_exop%d,%s" % (random.randint(0, 4294967295),
+ self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+
+ def tearDown(self):
+ super(DrsReplicaSyncSortTestCase, self).tearDown()
+ # tidyup groups and users
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e4:
+ (enum, string) = e4.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+
+ def add_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_ADD, attr)
+ self.ldb_dc1.modify(m)
+
+ def remove_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_DELETE, attr)
+ self.ldb_dc1.modify(m)
+
+ def test_sort_behaviour_single_object(self):
+ """Testing sorting behaviour on single objects"""
+
+ user1_dn = "cn=test_user1,%s" % self.ou
+ user2_dn = "cn=test_user2,%s" % self.ou
+ user3_dn = "cn=test_user3,%s" % self.ou
+ group_dn = "cn=test_group,%s" % self.ou
+
+ self.ldb_dc1.add({"dn": user1_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": user2_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": user3_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": group_dn, "objectclass": "group"})
+
+ u1_guid = misc.GUID(self.ldb_dc1.search(base=user1_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+ u2_guid = misc.GUID(self.ldb_dc1.search(base=user2_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+ u3_guid = misc.GUID(self.ldb_dc1.search(base=user3_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+ g_guid = misc.GUID(self.ldb_dc1.search(base=group_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+
+ self.add_linked_attribute(group_dn, user1_dn,
+ attr='member')
+ self.add_linked_attribute(group_dn, user2_dn,
+ attr='member')
+ self.add_linked_attribute(group_dn, user3_dn,
+ attr='member')
+ self.add_linked_attribute(group_dn, user1_dn,
+ attr='managedby')
+ self.add_linked_attribute(group_dn, user2_dn,
+ attr='nonSecurityMember')
+ self.add_linked_attribute(group_dn, user3_dn,
+ attr='nonSecurityMember')
+
+ set_inactive = AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid, u3_guid)
+
+ expected_links = set([set_inactive,
+ AbstractLink(drsuapi.DRSUAPI_ATTID_member,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u1_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_member,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u2_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_member,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u3_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u1_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u2_guid),
+ ])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=group_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ no_inactive = []
+ for link in ctr.linked_attributes:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob).guid
+ no_inactive.append((link, target_guid))
+ self.assertTrue(AbstractLink(link.attid, link.flags,
+ link.identifier.guid,
+ target_guid) in expected_links)
+
+ no_inactive.sort(key=cmp_to_key(_linked_attribute_compare))
+
+ # assert the two arrays are the same
+ self.assertEqual(len(expected_links), ctr.linked_attributes_count)
+ self.assertEqual([x[0] for x in no_inactive], ctr.linked_attributes)
+
+ self.remove_linked_attribute(group_dn, user3_dn,
+ attr='nonSecurityMember')
+
+ # Set the link inactive
+ expected_links.remove(set_inactive)
+ set_inactive.flags = 0
+ expected_links.add(set_inactive)
+
+ has_inactive = []
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ for link in ctr.linked_attributes:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob).guid
+ has_inactive.append((link, target_guid))
+ self.assertTrue(AbstractLink(link.attid, link.flags,
+ link.identifier.guid,
+ target_guid) in expected_links)
+
+ has_inactive.sort(key=cmp_to_key(_linked_attribute_compare))
+
+ # assert the two arrays are the same
+ self.assertEqual(len(expected_links), ctr.linked_attributes_count)
+ self.assertEqual([x[0] for x in has_inactive], ctr.linked_attributes)
+
+ def test_sort_behaviour_ncchanges(self):
+ """Testing sorting behaviour on a group of objects."""
+ user1_dn = "cn=test_user1,%s" % self.ou
+ group_dn = "cn=test_group,%s" % self.ou
+ self.ldb_dc1.add({"dn": user1_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": group_dn, "objectclass": "group"})
+
+ self.add_linked_attribute(group_dn, user1_dn,
+ attr='member')
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ # Make sure the max objects count is high enough
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.base_dn,
+ replica_flags=0,
+ max_objects=100,
+ exop=drsuapi.DRSUAPI_EXOP_NONE)
+
+ # Loop until we get linked attributes, or we get to the end.
+ # Samba sends linked attributes at the end, unlike Windows.
+ while True:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ if ctr.more_data == 0 or ctr.linked_attributes_count != 0:
+ break
+ req8.highwatermark = ctr.new_highwatermark
+
+ self.assertTrue(ctr.linked_attributes_count != 0)
+
+ no_inactive = []
+ for link in ctr.linked_attributes:
+ try:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob).guid
+ except:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3Binary,
+ link.value.blob).guid
+ no_inactive.append((link, target_guid))
+
+ no_inactive.sort(key=cmp_to_key(_linked_attribute_compare))
+
+ # assert the two arrays are the same
+ self.assertEqual([x[0] for x in no_inactive], ctr.linked_attributes)
diff --git a/source4/torture/drs/python/getnc_schema.py b/source4/torture/drs/python/getnc_schema.py
new file mode 100644
index 0000000..60062f9
--- /dev/null
+++ b/source4/torture/drs/python/getnc_schema.py
@@ -0,0 +1,304 @@
+import drs_base
+import ldb
+import time
+import random
+import os
+
+break_me = os.getenv("PLEASE_BREAK_MY_WINDOWS") == "1"
+assert break_me, ("This test breaks Windows active directory after "
+ "a few runs. Set PLEASE_BREAK_MY_WINDOWS=1 to run.")
+
+# This test runs against Windows. To run, set up two Windows AD DCs, join one
+# to the other, and make sure the passwords are the same. SMB_CONF_PATH must
+# also be set to any smb.conf file. Set DC1 to the PDC's hostname, and DC2 to
+# the join'd DC's hostname. Example:
+# PLEASE_BREAK_MY_WINDOWS=1
+# DC1=pdc DC2=joindc
+# SMB_CONF_PATH=st/ad_dc/etc/smb.conf
+# PYTHONPATH=$PYTHONPATH:./source4/torture/drs/python
+# python3 ./source4/scripting/bin/subunitrun getnc_schema
+# -UAdministrator%Password
+
+class SchemaReplicationTests(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(SchemaReplicationTests, self).setUp()
+ self.creds = self.get_credentials()
+ self.cmdline_auth = "-U{}%{}".format(self.creds.get_username(),
+ self.creds.get_password())
+
+ self.from_ldb = self.ldb_dc1
+ self.dest_ldb = self.ldb_dc2
+ self._disable_inbound_repl(self.url_dc1)
+ self._disable_all_repl(self.url_dc1)
+ self.free_offset = 0
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.url_dc1)
+ self._enable_all_repl(self.url_dc1)
+
+ def do_repl(self, partition_dn):
+ self._enable_inbound_repl(self.url_dc1)
+ self._enable_all_repl(self.url_dc1)
+
+ samba_tool_cmd = ["drs", "replicate", self.url_dc2, self.url_dc1]
+ samba_tool_cmd += [partition_dn]
+ username = self.creds.get_username()
+ password = self.creds.get_password()
+ samba_tool_cmd += ["-U{0}%{1}".format(username, password)]
+
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+
+ try:
+ self.assertCmdSuccess(result, out, err)
+ except AssertionError:
+ print("Failed repl, retrying in 10s")
+ time.sleep(10)
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+
+ self._disable_inbound_repl(self.url_dc1)
+ self._disable_all_repl(self.url_dc1)
+
+ self.assertCmdSuccess(result, out, err)
+
+ # Get a unique prefix for some search expression like "([att]=[pref]{i}*)"
+ def get_unique(self, expr_templ):
+ found = True
+ while found:
+ i = random.randint(0, 65535)
+ res = self.from_ldb.search(base=self.schema_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=expr_templ.format(i=i))
+ found = len(res) > 0
+
+ return str(i)
+
+ def unique_gov_id_prefix(self):
+ prefix = "1.3.6.1.4.1.7165.4.6.2.8."
+ return prefix + self.get_unique("(governsId=" + prefix + "{i}.*)")
+
+ def unique_cn_prefix(self, prefix="testobj"):
+ return prefix + self.get_unique("(cn=" + prefix + "{i}x*)") + "x"
+
+ # Make test schema classes linked to each other in a line, then modify
+ # them in reverse order so when we repl, a link crosses the chunk
+ # boundary. Chunk size is 133 by default so we do 150.
+ def test_poss_superiors_across_chunk(self):
+ num_schema_objects_to_add = 150
+ class_name = self.unique_cn_prefix()
+
+ ldif_template = """
+dn: CN={class_name}{i},{schema_dn}
+objectClass: top
+objectClass: classSchema
+adminDescription: {class_name}{i}
+adminDisplayName: {class_name}{i}
+cn: {class_name}{i}
+governsId: {gov_id}.{i}
+instanceType: 4
+objectClassCategory: 1
+systemFlags: 16
+systemOnly: FALSE
+"""
+
+ ldif_kwargs = {'class_name': class_name,
+ 'schema_dn': self.schema_dn}
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(i=0, gov_id=gov_id, **ldif_kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ ldif_template += "systemPossSuperiors: {possSup}\n"
+
+ ids = list(range(num_schema_objects_to_add))
+ got_no_such_attrib = False
+ for i in ids[1:]:
+ last_class_name = class_name + str(i-1)
+ ldif = ldif_template.format(i=i, gov_id=gov_id,
+ possSup=last_class_name,
+ **ldif_kwargs)
+
+ try:
+ self.from_ldb.add_ldif(ldif)
+ if got_no_such_attrib:
+ self.from_ldb.set_schema_update_now()
+ except ldb.LdbError as e:
+ if e.args[0] != ldb.ERR_NO_SUCH_ATTRIBUTE:
+ self.fail(e)
+ if got_no_such_attrib:
+ self.fail(("got NO_SUCH_ATTRIB even after "
+ "setting schemaUpdateNow", str(e)))
+ print("got NO_SUCH_ATTRIB, trying schemaUpdateNow")
+ got_no_such_attrib = True
+ self.from_ldb.set_schema_update_now()
+ self.from_ldb.add_ldif(ldif)
+ self.from_ldb.set_schema_update_now()
+
+ ldif_template = """
+dn: CN={class_name}{i},{schema_dn}
+changetype: modify
+replace: adminDescription
+adminDescription: new_description
+"""
+
+ for i in reversed(ids):
+ ldif = ldif_template.format(i=i, **ldif_kwargs)
+ self.from_ldb.modify_ldif(ldif)
+
+ self.do_repl(self.schema_dn)
+
+ dn_templ = "CN={class_name}{i},{schema_dn}"
+ for i in ids:
+ dn = dn_templ.format(i=i, **ldif_kwargs)
+ res = self.dest_ldb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+
+ # Test for method of adding linked attributes in schema partition
+ # required by other tests.
+ def test_create_linked_attribute_in_schema(self):
+ # Make an object outside of the schema partition that we can link to
+ user_name = self.unique_cn_prefix("user")
+ user_dn = "CN={},CN=Users,{}".format(user_name, self.domain_dn)
+
+ ldif_template = """
+dn: {user_dn}
+objectClass: person
+objectClass: user"""
+ ldif = ldif_template.format(user_dn=user_dn)
+ self.from_ldb.add_ldif(ldif)
+
+ # Make test class name unique so test can run multiple times
+ class_name = self.unique_cn_prefix("class")
+
+ kwargs = {'class_name': class_name,
+ 'schema_dn': self.schema_dn,
+ 'user_dn': user_dn}
+
+ # Add an auxiliary schemaClass (cat 3) class and give it managedBy
+ # so we can create schema objects with linked attributes.
+ ldif_template = """
+dn: CN={class_name},{schema_dn}
+objectClass: classSchema
+governsId: {gov_id}.0
+instanceType: 4
+systemFlags: 16
+systemOnly: FALSE
+objectClassCategory: 3
+mayContain: managedBy
+"""
+
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ # Now make an instance that points back to the user with managedBy,
+ # thus creating an object in the schema with a linked attribute
+ ldif_template = """
+dn: CN=link{class_name},{schema_dn}
+objectClass: classSchema
+objectClass: {class_name}
+instanceType: 4
+governsId: {gov_id}.0
+systemFlags: 16
+managedBy: {user_dn}
+"""
+
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ # Check link exists on test schema object
+ dn_templ = "CN=link{class_name},{schema_dn}"
+ dn = dn_templ.format(**kwargs)
+ res = self.from_ldb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ self.assertIsNotNone(res[0].get("managedBy"))
+ self.assertEqual(str(res[0].get("managedBy")[0]), user_dn)
+
+ # Check backlink on user object
+ res = self.from_ldb.search(base=user_dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ managed_objs = res[0].get("managedObjects")
+ self.assertEqual(len(managed_objs), 1)
+ managed_objs = [str(o) for o in managed_objs]
+ self.assertEqual(managed_objs, [dn_templ.format(**kwargs)])
+
+ def test_schema_linked_attributes(self):
+ num_test_objects = 9
+
+ # Make an object outside of the schema partition that we can link to
+ user_name = self.unique_cn_prefix("user")
+ user_dn = "CN={},CN=Users,{}".format(user_name, self.domain_dn)
+
+ ldif_template = """
+dn: {user_dn}
+objectClass: person
+objectClass: user"""
+ ldif = ldif_template.format(user_dn=user_dn)
+ self.from_ldb.add_ldif(ldif)
+
+ self.do_repl(self.domain_dn)
+
+ # Make test object name prefixes unique so test can run multiple times
+ # in a single testenv (can't delete schema objects)
+ class_name = self.unique_cn_prefix("class")
+ link_class_name = self.unique_cn_prefix("linkClass")
+
+ kwargs = {'class_name': class_name,
+ 'schema_dn': self.schema_dn,
+ 'link_class_name': link_class_name,
+ 'user_dn': user_dn}
+
+ # Add an auxiliary schemaClass (cat 3) class and give it managedBy
+ # so we can create schema objects with linked attributes.
+ ldif_template = """
+dn: CN={class_name},{schema_dn}
+objectClass: classSchema
+governsId: {gov_id}.0
+instanceType: 4
+systemFlags: 16
+systemOnly: FALSE
+objectClassCategory: 3
+mayContain: managedBy
+"""
+
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ # Now make instances that point back to the user with managedBy,
+ # thus creating objects in the schema with linked attributes
+ ldif_template = """
+dn: CN={link_class_name}{i},{schema_dn}
+objectClass: classSchema
+objectClass: {class_name}
+instanceType: 4
+governsId: {gov_id}.0
+systemFlags: 16
+managedBy: {user_dn}
+"""
+
+ id_range = list(range(num_test_objects))
+ for i in id_range:
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(i=i, gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ self.do_repl(self.schema_dn)
+
+ # Check link exists in each test schema objects at destination DC
+ dn_templ = "CN={link_class_name}{i},{schema_dn}"
+ for i in id_range:
+ dn = dn_templ.format(i=i, **kwargs)
+ res = self.dest_ldb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ self.assertIsNotNone(res[0].get("managedBy"))
+ self.assertEqual(str(res[0].get("managedBy")[0]), user_dn)
+
+ # Check backlinks list on user object contains DNs of test objects.
+ res = self.dest_ldb.search(base=user_dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ managed_objs = res[0].get("managedObjects")
+ self.assertIsNotNone(managed_objs)
+ managed_objs_set = {str(el) for el in managed_objs}
+ expected = {dn_templ.format(i=i, **kwargs) for i in id_range}
+ self.assertEqual(managed_objs_set, expected)
diff --git a/source4/torture/drs/python/getnc_unpriv.py b/source4/torture/drs/python/getnc_unpriv.py
new file mode 100644
index 0000000..c53906a
--- /dev/null
+++ b/source4/torture/drs/python/getnc_unpriv.py
@@ -0,0 +1,306 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests replication scenarios with different user privileges.
+# We want to test every replication scenario we can think of against:
+# - users with only GET_CHANGES privileges
+# - users with only GET_ALL_CHANGES privileges
+# - users with both GET_CHANGES and GET_ALL_CHANGES privileges
+# - users with no privileges
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN getnc_unpriv -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+from samba import werror, WERRORError
+
+from samba import sd_utils
+import ldb
+from ldb import SCOPE_BASE
+import random
+
+from samba.dcerpc import drsuapi, security
+from samba.credentials import DONT_USE_KERBEROS
+
+
+class DrsReplicaSyncUnprivTestCase(drs_base.DrsBaseTestCase):
+ """Confirm the behaviour of DsGetNCChanges for unprivileged users"""
+
+ def setUp(self):
+ super(DrsReplicaSyncUnprivTestCase, self).setUp()
+ self.get_changes_user = "get-changes-user"
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.user_pass = samba.generate_random_password(12, 16)
+
+ # add some randomness to the test OU. (Deletion of the last test's
+ # objects can be slow to replicate out. So the OU created by a previous
+ # testenv may still exist at this point).
+ rand = random.randint(1, 10000000)
+ test_ou = "OU=test_getnc_unpriv%d" % rand
+ self.ou = "%s,%s" % (test_ou, self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+ self.ldb_dc1.newuser(self.get_changes_user, self.user_pass,
+ userou=test_ou)
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+
+ self.sd_utils = sd_utils.SDUtils(self.ldb_dc1)
+ self.user_dn = "cn=%s,%s" % (self.get_changes_user, self.ou)
+ user_sid = self.sd_utils.get_object_sid(self.user_dn)
+ self.acl_mod_get_changes = "(OA;;CR;%s;;%s)" % (security.GUID_DRS_GET_CHANGES,
+ str(user_sid))
+ self.acl_mod_get_all_changes = "(OA;;CR;%s;;%s)" % (security.GUID_DRS_GET_ALL_CHANGES,
+ str(user_sid))
+ self.desc_sddl = self.sd_utils.get_sd_as_sddl(self.base_dn)
+
+ # We set DONT_USE_KERBEROS to avoid a race with getting the
+ # user replicated to our selected KDC
+ self.user_creds = self.insta_creds(template=self.get_credentials(),
+ username=self.get_changes_user,
+ userpass=self.user_pass,
+ kerberos_state=DONT_USE_KERBEROS)
+ (self.user_drs, self.user_drs_handle) = self._ds_bind(self.dnsname_dc1,
+ self.user_creds)
+
+ def tearDown(self):
+ self.sd_utils.modify_sd_on_dn(self.base_dn, self.desc_sddl)
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e1:
+ (enum, string) = e1.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ super(DrsReplicaSyncUnprivTestCase, self).tearDown()
+
+ def _test_repl_exop(self, exop, repl_obj, expected_error, dest_dsa=None,
+ partial_attribute_set=None):
+ """
+ Common function to send a replication request and check the result
+ matches what's expected.
+ """
+ req8 = self._exop_req8(dest_dsa=dest_dsa,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=repl_obj,
+ exop=exop,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP,
+ partial_attribute_set=partial_attribute_set)
+
+ if expected_error is None:
+ # user is OK, request should be accepted without throwing an error
+ (level, ctr) = self.user_drs.DsGetNCChanges(self.user_drs_handle,
+ 8, req8)
+ else:
+ # check the request is rejected (with the error we're expecting)
+ try:
+ (level, ctr) = self.user_drs.DsGetNCChanges(self.user_drs_handle,
+ 8, req8)
+ self.fail("Should have failed with user denied access")
+ except WERRORError as e:
+ (enum, estr) = e.args
+ self.assertTrue(enum in expected_error,
+ "Got unexpected error: %s" % estr)
+
+ def _test_repl_single_obj(self, repl_obj, expected_error,
+ partial_attribute_set=None):
+ """
+ Checks that replication on a single object either succeeds or fails as
+ expected (based on the user's access rights)
+ """
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ repl_obj=repl_obj,
+ expected_error=expected_error,
+ partial_attribute_set=partial_attribute_set)
+
+ def _test_repl_secret(self, repl_obj, expected_error, dest_dsa=None):
+ """
+ Checks that REPL_SECRET on an object either succeeds or fails as
+ expected (based on the user's access rights)
+ """
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ repl_obj=repl_obj,
+ expected_error=expected_error,
+ dest_dsa=dest_dsa)
+
+ def _test_repl_full(self, expected_error, partial_attribute_set=None):
+ """
+ Checks that a full replication either succeeds or fails as expected
+ (based on the user's access rights)
+ """
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_NONE,
+ repl_obj=self.ldb_dc1.get_default_basedn(),
+ expected_error=expected_error,
+ partial_attribute_set=partial_attribute_set)
+
+ def _test_repl_full_on_ou(self, repl_obj, expected_error):
+ """
+ Full replication on a specific OU should always fail (it should be done
+ against a base NC). The error may vary based on the user's access rights
+ """
+ # Just try against the OU created in the test setup
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_NONE,
+ repl_obj=repl_obj,
+ expected_error=expected_error)
+
+ def test_repl_getchanges_userpriv(self):
+ """
+ Tests various replication requests made by a user with only GET_CHANGES
+ rights. Some requests will be accepted, but most will be rejected.
+ """
+
+ # Assign the user GET_CHANGES rights
+ self.sd_utils.dacl_add_ace(self.base_dn, self.acl_mod_get_changes)
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ bad_ou = "OU=bad_obj,%s" % self.ou
+ self._test_repl_single_obj(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ self._test_repl_secret(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=self.user_dn,
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=self.user_dn,
+ dest_dsa=self.ldb_dc1.get_ntds_GUID(),
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN])
+
+ self._test_repl_full(expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_full_on_ou(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_CANT_FIND_EXPECTED_NC,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_full_on_ou(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_NC,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ # Partial Attribute Sets don't require GET_ALL_CHANGES rights, so we
+ # expect the following to succeed
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+ self._test_repl_full(expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+
+ def test_repl_getallchanges_userpriv(self):
+ """
+ Tests various replication requests made by a user with only
+ GET_ALL_CHANGES rights. Note that assigning these rights is possible,
+ but doesn't make a lot of sense. We test it anyway for consistency.
+ """
+
+ # Assign the user GET_ALL_CHANGES rights
+ self.sd_utils.dacl_add_ace(self.base_dn, self.acl_mod_get_all_changes)
+
+ # We can expect to get the same responses as an unprivileged user,
+ # i.e. we have permission to see the results, but don't have permission
+ # to ask
+ self.test_repl_no_userpriv()
+
+ def test_repl_both_userpriv(self):
+ """
+ Tests various replication requests made by a privileged user (i.e. has
+ both GET_CHANGES and GET_ALL_CHANGES). We expect any valid requests
+ to be accepted.
+ """
+
+ # Assign the user both GET_CHANGES and GET_ALL_CHANGES rights
+ both_rights = self.acl_mod_get_changes + self.acl_mod_get_all_changes
+ self.sd_utils.dacl_add_ace(self.base_dn, both_rights)
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=None)
+ bad_ou = "OU=bad_obj,%s" % self.ou
+ self._test_repl_single_obj(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN])
+
+ # Microsoft returns DB_ERROR, Samba returns ACCESS_DENIED
+ self._test_repl_secret(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_DRA_DB_ERROR,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=self.user_dn,
+ expected_error=[werror.WERR_DS_DRA_DB_ERROR,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+ # Note that Windows accepts this but Samba rejects it
+ self._test_repl_secret(repl_obj=self.user_dn,
+ dest_dsa=self.ldb_dc1.get_ntds_GUID(),
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ self._test_repl_secret(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN])
+
+ self._test_repl_full(expected_error=None)
+ self._test_repl_full_on_ou(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_CANT_FIND_EXPECTED_NC])
+ self._test_repl_full_on_ou(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_NC,
+ werror.WERR_DS_DRA_BAD_DN])
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+ self._test_repl_full(expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+
+ def test_repl_no_userpriv(self):
+ """
+ Tests various replication requests made by a unprivileged user.
+ We expect all these requests to be rejected.
+ """
+
+ # Microsoft usually returns BAD_DN, Samba returns ACCESS_DENIED
+ usual_error = [werror.WERR_DS_DRA_BAD_DN, werror.WERR_DS_DRA_ACCESS_DENIED]
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=usual_error)
+ bad_ou = "OU=bad_obj,%s" % self.ou
+ self._test_repl_single_obj(repl_obj=bad_ou,
+ expected_error=usual_error)
+
+ self._test_repl_secret(repl_obj=self.ou,
+ expected_error=usual_error)
+ self._test_repl_secret(repl_obj=self.user_dn,
+ expected_error=usual_error)
+ self._test_repl_secret(repl_obj=self.user_dn,
+ dest_dsa=self.ldb_dc1.get_ntds_GUID(),
+ expected_error=usual_error)
+ self._test_repl_secret(repl_obj=bad_ou,
+ expected_error=usual_error)
+
+ self._test_repl_full(expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_full_on_ou(repl_obj=self.ou,
+ expected_error=usual_error)
+ self._test_repl_full_on_ou(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_NC,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=usual_error,
+ partial_attribute_set=self.get_partial_attribute_set())
+ self._test_repl_full(expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED],
+ partial_attribute_set=self.get_partial_attribute_set())
diff --git a/source4/torture/drs/python/getncchanges.py b/source4/torture/drs/python/getncchanges.py
new file mode 100644
index 0000000..6b5456a
--- /dev/null
+++ b/source4/torture/drs/python/getncchanges.py
@@ -0,0 +1,1427 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN \
+# getncchanges -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import ldb
+from ldb import SCOPE_BASE
+import random
+
+from samba.dcerpc import drsuapi, misc
+from samba import WERRORError
+from samba import werror
+
+class DrsReplicaSyncIntegrityTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaSyncIntegrityTestCase, self).setUp()
+
+ self.init_test_state()
+
+ # Note that DC2 is the DC with the testenv-specific quirks (e.g. it's
+ # the vampire_dc), so we point this test directly at that DC
+ self.set_test_ldb_dc(self.ldb_dc2)
+
+ self.ou = str(samba.tests.create_test_ou(self.test_ldb_dc,
+ "getncchanges." + self.id().rsplit(".", 1)[1]))
+
+ self.addCleanup(self.ldb_dc2.delete, self.ou, ["tree_delete:1"])
+
+ self.base_dn = self.test_ldb_dc.get_default_basedn()
+
+ self.default_conn = DcConnection(self, self.ldb_dc2, self.dnsname_dc2)
+ self.set_dc_connection(self.default_conn)
+
+ def init_test_state(self):
+ self.rxd_dn_list = []
+ self.rxd_links = []
+ self.rxd_guids = []
+ self.last_ctr = None
+
+ # 100 is the minimum max_objects that Microsoft seems to honour
+ # (the max honoured is 400ish), so we use that in these tests
+ self.max_objects = 100
+
+ # store whether we used GET_TGT/GET_ANC flags in the requests
+ self.used_get_tgt = False
+ self.used_get_anc = False
+
+ def add_object(self, dn, objectclass="organizationalunit"):
+ """Adds an OU object"""
+ self.test_ldb_dc.add({"dn": dn, "objectclass": objectclass})
+ res = self.test_ldb_dc.search(base=dn, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+
+ def modify_object(self, dn, attr, value):
+ """Modifies an object's USN by adding an attribute value to it"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.test_ldb_dc, dn)
+ m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, attr)
+ self.test_ldb_dc.modify(m)
+
+ def delete_attribute(self, dn, attr, value):
+ """Deletes an attribute from an object"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.test_ldb_dc, dn)
+ m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_DELETE, attr)
+ self.test_ldb_dc.modify(m)
+
+ def start_new_repl_cycle(self):
+ """Resets enough state info to start a new replication cycle"""
+ # reset rxd_links, but leave rxd_guids and rxd_dn_list alone so we know
+ # whether a parent/target is unknown and needs GET_ANC/GET_TGT to
+ # resolve
+ self.rxd_links = []
+
+ self.used_get_tgt = False
+ self.used_get_anc = False
+ # mostly preserve self.last_ctr, so that we use the last HWM
+ if self.last_ctr is not None:
+ self.last_ctr.more_data = True
+
+ def create_object_range(self, start, end, prefix="",
+ children=None, parent_list=None):
+ """
+ Creates a block of objects. Object names are numbered sequentially,
+ using the optional prefix supplied. If the children parameter is
+ supplied it will create a parent-child hierarchy and return the
+ top-level parents separately.
+ """
+ dn_list = []
+
+ # Use dummy/empty lists if we're not creating a parent/child hierarchy
+ if children is None:
+ children = []
+
+ if parent_list is None:
+ parent_list = []
+
+ # Create the parents first, then the children.
+ # This makes it easier to see in debug when GET_ANC takes effect
+ # because the parent/children become interleaved (by default,
+ # this approach means the objects are organized into blocks of
+ # parents and blocks of children together)
+ for x in range(start, end):
+ ou = "OU=test_ou_%s%d,%s" % (prefix, x, self.ou)
+ self.add_object(ou)
+ dn_list.append(ou)
+
+ # keep track of the top-level parents (if needed)
+ parent_list.append(ou)
+
+ # create the block of children (if needed)
+ for x in range(start, end):
+ for child in children:
+ ou = "OU=test_ou_child%s%d,%s" % (child, x, parent_list[x])
+ self.add_object(ou)
+ dn_list.append(ou)
+
+ return dn_list
+
+ def assert_expected_data(self, expected_list):
+ """
+ Asserts that we received all the DNs that we expected and
+ none are missing.
+ """
+ received_list = self.rxd_dn_list
+
+ # Note that with GET_ANC Windows can end up sending the same parent
+ # object multiple times, so this might be noteworthy but doesn't
+ # warrant failing the test
+ num_received = len(received_list)
+ num_expected = len(expected_list)
+ if num_received != num_expected:
+ print("Note: received %d objects but expected %d" % (num_received,
+ num_expected))
+
+ # Check that we received every object that we were expecting
+ for dn in expected_list:
+ self.assertTrue(dn in received_list,
+ "DN '%s' missing from replication." % dn)
+
+ def test_repl_integrity(self):
+ """
+ Modify the objects being replicated while the replication is still
+ in progress and check that no object loss occurs.
+ """
+
+ # The server behaviour differs between samba and Windows. Samba returns
+ # the objects in the original order (up to the pre-modify HWM). Windows
+ # incorporates the modified objects and returns them in the new order
+ # (i.e. modified objects last), up to the post-modify HWM. The
+ # Microsoft docs state the Windows behaviour is optional.
+
+ # Create a range of objects to replicate.
+ expected_dn_list = self.create_object_range(0, 400)
+ (orig_hwm, unused) = self._get_highest_hwm_utdv(self.test_ldb_dc)
+
+ # We ask for the first page of 100 objects.
+ # For this test, we don't care what order we receive the objects in,
+ # so long as by the end we've received everything
+ self.repl_get_next()
+
+ # Modify some of the second page of objects. This should bump the
+ # highwatermark
+ for x in range(100, 200):
+ self.modify_object(expected_dn_list[x], "displayName", "OU%d" % x)
+
+ (post_modify_hwm, _) = self._get_highest_hwm_utdv(self.test_ldb_dc)
+ self.assertTrue(post_modify_hwm.highest_usn > orig_hwm.highest_usn)
+
+ # Get the remaining blocks of data
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we still receive all the objects we're expecting
+ self.assert_expected_data(expected_dn_list)
+
+ def is_parent_known(self, dn, known_dn_list):
+ """
+ Returns True if the parent of the dn specified is in known_dn_list
+ """
+
+ # we can sometimes get system objects like the RID Manager returned.
+ # Ignore anything that is not under the test OU we created
+ if self.ou not in dn:
+ return True
+
+ # Remove the child portion from the name to get the parent's DN
+ name_substrings = dn.split(",")
+ del name_substrings[0]
+
+ parent_dn = ",".join(name_substrings)
+
+ # check either this object is a parent (it's parent is the top-level
+ # test object), or its parent has been seen previously
+ return parent_dn == self.ou or parent_dn in known_dn_list
+
+ def _repl_send_request(self, get_anc=False, get_tgt=False):
+ """
+ Sends a GetNCChanges request for the next block of replication data.
+ """
+
+ # we're just trying to mimic regular client behaviour here, so just
+ # use the highwatermark in the last response we received
+ if self.last_ctr:
+ highwatermark = self.last_ctr.new_highwatermark
+ uptodateness_vector = self.last_ctr.uptodateness_vector
+ else:
+ # this is the first replication chunk
+ highwatermark = None
+ uptodateness_vector = None
+
+ # Ask for the next block of replication data
+ replica_flags = drsuapi.DRSUAPI_DRS_WRIT_REP
+ more_flags = 0
+
+ if get_anc:
+ replica_flags |= drsuapi.DRSUAPI_DRS_GET_ANC
+ self.used_get_anc = True
+
+ if get_tgt:
+ more_flags = drsuapi.DRSUAPI_DRS_GET_TGT
+ self.used_get_tgt = True
+
+ # return the response from the DC
+ return self._get_replication(replica_flags,
+ max_objects=self.max_objects,
+ highwatermark=highwatermark,
+ uptodateness_vector=uptodateness_vector,
+
+ more_flags=more_flags)
+
+ def repl_get_next(self, get_anc=False, get_tgt=False, assert_links=False):
+ """
+ Requests the next block of replication data. This tries to simulate
+ client behaviour - if we receive a replicated object that we don't know
+ the parent of, then re-request the block with the GET_ANC flag set.
+ If we don't know the target object for a linked attribute, then
+ re-request with GET_TGT.
+ """
+
+ # send a request to the DC and get the response
+ ctr6 = self._repl_send_request(get_anc=get_anc, get_tgt=get_tgt)
+
+ # extract the object DNs and their GUIDs from the response
+ rxd_dn_list = self._get_ctr6_dn_list(ctr6)
+ rxd_guid_list = self._get_ctr6_object_guids(ctr6)
+
+ # we'll add new objects as we discover them, so take a copy of the
+ # ones we already know about, so we can modify these lists safely
+ known_objects = self.rxd_dn_list[:]
+ known_guids = self.rxd_guids[:]
+
+ # check that we know the parent for every object received
+ for i in range(0, len(rxd_dn_list)):
+
+ dn = rxd_dn_list[i]
+ guid = rxd_guid_list[i]
+
+ if self.is_parent_known(dn, known_objects):
+
+ # the new DN is now known so add it to the list.
+ # It may be the parent of another child in this block
+ known_objects.append(dn)
+ known_guids.append(guid)
+ else:
+ # If we've already set the GET_ANC flag then it should mean
+ # we receive the parents before the child
+ self.assertFalse(get_anc, "Unknown parent for object %s" % dn)
+
+ print("Unknown parent for %s - try GET_ANC" % dn)
+
+ # try the same thing again with the GET_ANC flag set this time
+ return self.repl_get_next(get_anc=True, get_tgt=get_tgt,
+ assert_links=assert_links)
+
+ # check we know about references to any objects in the linked attrs
+ received_links = self._get_ctr6_links(ctr6)
+
+ # This is so that older versions of Samba fail - we want the links to
+ # be sent roughly with the objects, rather than getting all links at
+ # the end
+ if assert_links:
+ self.assertTrue(len(received_links) > 0,
+ "Links were expected in the GetNCChanges response")
+
+ for link in received_links:
+
+ # skip any links that aren't part of the test
+ if self.ou not in link.targetDN:
+ continue
+
+ # check the source object is known (Windows can actually send links
+ # where we don't know the source object yet). Samba shouldn't ever
+ # hit this case because it gets the links based on the source
+ if link.identifier not in known_guids:
+
+ # If we've already set the GET_ANC flag then it should mean
+ # this case doesn't happen
+ self.assertFalse(get_anc, "Unknown source object for GUID %s"
+ % link.identifier)
+
+ print("Unknown source GUID %s - try GET_ANC" % link.identifier)
+
+ # try the same thing again with the GET_ANC flag set this time
+ return self.repl_get_next(get_anc=True, get_tgt=get_tgt,
+ assert_links=assert_links)
+
+ # check we know the target object
+ if link.targetGUID not in known_guids:
+
+ # If we've already set the GET_TGT flag then we should have
+ # already received any objects we need to know about
+ self.assertFalse(get_tgt, "Unknown linked target for object %s"
+ % link.targetDN)
+
+ print("Unknown target for %s - try GET_TGT" % link.targetDN)
+
+ # try the same thing again with the GET_TGT flag set this time
+ return self.repl_get_next(get_anc=get_anc, get_tgt=True,
+ assert_links=assert_links)
+
+ # store the last successful result so we know what HWM to request next
+ self.last_ctr = ctr6
+
+ # store the objects, GUIDs, and links we received
+ self.rxd_dn_list += self._get_ctr6_dn_list(ctr6)
+ self.rxd_links += self._get_ctr6_links(ctr6)
+ self.rxd_guids += self._get_ctr6_object_guids(ctr6)
+
+ return ctr6
+
+ def replication_complete(self):
+ """Returns True if the current/last replication cycle is complete"""
+
+ if self.last_ctr is None or self.last_ctr.more_data:
+ return False
+ else:
+ return True
+
+ def test_repl_integrity_get_anc(self):
+ """
+ Modify the parent objects being replicated while the replication is
+ still in progress (using GET_ANC) and check that no object loss occurs.
+ """
+
+ # Note that GET_ANC behaviour varies between Windows and Samba.
+ # On Samba GET_ANC results in the replication restarting from the very
+ # beginning. After that, Samba remembers GET_ANC and also sends the
+ # parents in subsequent requests (regardless of whether GET_ANC is
+ # specified in the later request).
+ # Windows only sends the parents if GET_ANC was specified in the last
+ # request. It will also resend a parent, even if it's already sent the
+ # parent in a previous response (whereas Samba doesn't).
+
+ # Create a small block of 50 parents, each with 2 children (A and B)
+ # This is so that we receive some children in the first block, so we
+ # can resend with GET_ANC before we learn too many parents
+ parent_dn_list = []
+ expected_dn_list = self.create_object_range(0, 50, prefix="parent",
+ children=("A", "B"),
+ parent_list=parent_dn_list)
+
+ # create the remaining parents and children
+ expected_dn_list += self.create_object_range(50, 150, prefix="parent",
+ children=("A", "B"),
+ parent_list=parent_dn_list)
+
+ # We've now got objects in the following order:
+ # [50 parents][100 children][100 parents][200 children]
+
+ # Modify the first parent so that it's now ordered last by USN
+ # This means we set the GET_ANC flag pretty much straight away
+ # because we receive the first child before the first parent
+ self.modify_object(parent_dn_list[0], "displayName", "OU0")
+
+ # modify a later block of parents so they also get reordered
+ for x in range(50, 100):
+ self.modify_object(parent_dn_list[x], "displayName", "OU%d" % x)
+
+ # Get the first block of objects - this should resend the request with
+ # GET_ANC set because we won't know about the first child's parent.
+ # On samba GET_ANC essentially starts the sync from scratch again, so
+ # we get this over with early before we learn too many parents
+ self.repl_get_next()
+
+ # modify the last chunk of parents. They should now have a USN higher
+ # than the highwater-mark for the replication cycle
+ for x in range(100, 150):
+ self.modify_object(parent_dn_list[x], "displayName", "OU%d" % x)
+
+ # Get the remaining blocks of data - this will resend the request with
+ # GET_ANC if it encounters an object it doesn't have the parent for.
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_ANC flag. If this doesn't
+ # actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_dn_list)
+
+ def assert_expected_links(self, objects_with_links, link_attr="managedBy",
+ num_expected=None):
+ """
+ Asserts that a GetNCChanges response contains any expected links
+ for the objects it contains.
+ """
+ received_links = self.rxd_links
+
+ if num_expected is None:
+ num_expected = len(objects_with_links)
+
+ self.assertTrue(len(received_links) == num_expected,
+ "Received %d links but expected %d"
+ % (len(received_links), num_expected))
+
+ for dn in objects_with_links:
+ self.assert_object_has_link(dn, link_attr, received_links)
+
+ def assert_object_has_link(self, dn, link_attr, received_links):
+ """
+ Queries the object in the DB and asserts there is a link in the
+ GetNCChanges response that matches.
+ """
+
+ # Look up the link attribute in the DB
+ # The extended_dn option will dump the GUID info for the link
+ # attribute (as a hex blob)
+ res = self.test_ldb_dc.search(ldb.Dn(self.test_ldb_dc, dn),
+ attrs=[link_attr],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+
+ # We didn't find the expected link attribute in the DB for the object.
+ # Something has gone wrong somewhere...
+ self.assertTrue(link_attr in res[0],
+ "%s in DB doesn't have attribute %s" % (dn, link_attr))
+
+ # find the received link in the list and assert that the target and
+ # source GUIDs match what's in the DB
+ for val in [str(val) for val in res[0][link_attr]]:
+ # Work out the expected source and target GUIDs for the DB link
+ target_dn = ldb.Dn(self.test_ldb_dc, val)
+ targetGUID_blob = target_dn.get_extended_component("GUID")
+ sourceGUID_blob = res[0].dn.get_extended_component("GUID")
+
+ found = False
+
+ for link in received_links:
+ if link.selfGUID_blob == sourceGUID_blob and \
+ link.targetGUID_blob == targetGUID_blob:
+
+ found = True
+
+ if self._debug:
+ print("Link %s --> %s" % (dn[:25], link.targetDN[:25]))
+ break
+
+ self.assertTrue(found,
+ "Did not receive expected link for DN %s" % dn)
+
+ def test_repl_get_tgt(self):
+ """
+ Creates a scenario where we should receive the linked attribute before
+ we know about the target object, and therefore need to use GET_TGT.
+ Note: Samba currently avoids this problem by sending all its links last
+ """
+
+ # create the test objects
+ reportees = self.create_object_range(0, 100, prefix="reportee")
+ managers = self.create_object_range(0, 100, prefix="manager")
+ all_objects = managers + reportees
+ expected_links = reportees
+
+ # add a link attribute to each reportee object that points to the
+ # corresponding manager object as the target
+ for i in range(0, 100):
+ self.modify_object(reportees[i], "managedBy", managers[i])
+
+ # touch the managers (the link-target objects) again to make sure the
+ # reportees (link source objects) get returned first by the replication
+ for i in range(0, 100):
+ self.modify_object(managers[i], "displayName", "OU%d" % i)
+
+ links_expected = True
+
+ # Get all the replication data - this code should resend the requests
+ # with GET_TGT
+ while not self.replication_complete():
+
+ # get the next block of replication data (this sets GET_TGT
+ # if needed)
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_TGT flag. If this doesn't
+ # actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links for all the reportees
+ self.assert_expected_links(expected_links)
+
+ def test_repl_get_tgt_chain(self):
+ """
+ Tests the behaviour of GET_TGT with a more complicated scenario.
+ Here we create a chain of objects linked together, so if we follow
+ the link target, then we'd traverse ~200 objects each time.
+ """
+
+ # create the test objects
+ objectsA = self.create_object_range(0, 100, prefix="AAA")
+ objectsB = self.create_object_range(0, 100, prefix="BBB")
+ objectsC = self.create_object_range(0, 100, prefix="CCC")
+
+ # create a complex set of object links:
+ # A0-->B0-->C1-->B2-->C3-->B4-->and so on...
+ # Basically each object-A should link to a circular chain of 200 B/C
+ # objects. We create the links in separate chunks here, as it makes it
+ # clearer what happens with the USN (links on Windows have their own
+ # USN, so this approach means the A->B/B->C links aren't interleaved)
+ for i in range(0, 100):
+ self.modify_object(objectsA[i], "managedBy", objectsB[i])
+
+ for i in range(0, 100):
+ self.modify_object(objectsB[i], "managedBy",
+ objectsC[(i + 1) % 100])
+
+ for i in range(0, 100):
+ self.modify_object(objectsC[i], "managedBy",
+ objectsB[(i + 1) % 100])
+
+ all_objects = objectsA + objectsB + objectsC
+ expected_links = all_objects
+
+ # the default order the objects now get returned in should be:
+ # [A0-A99][B0-B99][C0-C99]
+
+ links_expected = True
+
+ # Get all the replication data - this code should resend the requests
+ # with GET_TGT
+ while not self.replication_complete():
+
+ # get the next block of replication data (this sets GET_TGT
+ # if needed)
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_TGT flag. If this doesn't
+ # actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links for all the reportees
+ self.assert_expected_links(expected_links)
+
+ def test_repl_integrity_link_attr(self):
+ """
+ Tests adding links to new objects while a replication is in progress.
+ """
+
+ # create some source objects for the linked attributes, sandwiched
+ # between 2 blocks of filler objects
+ filler = self.create_object_range(0, 100, prefix="filler")
+ reportees = self.create_object_range(0, 100, prefix="reportee")
+ filler += self.create_object_range(100, 200, prefix="filler")
+
+ # Start the replication and get the first block of filler objects
+ # (We're being mean here and setting the GET_TGT flag right from the
+ # start. On earlier Samba versions, if the client encountered an
+ # unknown target object and retried with GET_TGT, it would restart the
+ # replication cycle from scratch, which avoids the problem).
+ self.repl_get_next(get_tgt=True)
+
+ # create the target objects and add the links. These objects should be
+ # outside the scope of the Samba replication cycle, but the links
+ # should still get sent with the source object
+ managers = self.create_object_range(0, 100, prefix="manager")
+
+ for i in range(0, 100):
+ self.modify_object(reportees[i], "managedBy", managers[i])
+
+ expected_objects = managers + reportees + filler
+ expected_links = reportees
+
+ # complete the replication
+ while not self.replication_complete():
+ self.repl_get_next(get_tgt=True)
+
+ # If we didn't receive the most recently created objects in the last
+ # replication cycle, then kick off another replication to get them
+ if len(self.rxd_dn_list) < len(expected_objects):
+ self.repl_get_next()
+
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_objects)
+
+ # Check we received links for all the parents
+ self.assert_expected_links(expected_links)
+
+ def test_repl_get_anc_link_attr(self):
+ """
+ A basic GET_ANC test where the parents have linked attributes
+ """
+
+ # Create a block of 100 parents and 100 children
+ parent_dn_list = []
+ expected_dn_list = self.create_object_range(0, 100, prefix="parent",
+ children=("A"),
+ parent_list=parent_dn_list)
+
+ # Add links from the parents to the children
+ for x in range(0, 100):
+ self.modify_object(parent_dn_list[x], "managedBy",
+ expected_dn_list[x + 100])
+
+ # add some filler objects at the end. This allows us to easily see
+ # which chunk the links get sent in
+ expected_dn_list += self.create_object_range(0, 100, prefix="filler")
+
+ # We've now got objects in the following order:
+ # [100 x children][100 x parents][100 x filler]
+
+ # Get the replication data - because the block of children come first,
+ # this should retry the request with GET_ANC
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_dn_list)
+
+ # Check we received links for all the parents
+ self.assert_expected_links(parent_dn_list)
+
+ def test_repl_get_tgt_and_anc(self):
+ """
+ Check we can resolve an unknown ancestor when fetching the link target,
+ i.e. tests using GET_TGT and GET_ANC in combination
+ """
+
+ # Create some parent/child objects (the child will be the link target)
+ parents = []
+ all_objects = self.create_object_range(0, 100, prefix="parent",
+ children=["la_tgt"],
+ parent_list=parents)
+
+ children = [item for item in all_objects if item not in parents]
+
+ # create the link source objects and link them to the child/target
+ la_sources = self.create_object_range(0, 100, prefix="la_src")
+ all_objects += la_sources
+
+ for i in range(0, 100):
+ self.modify_object(la_sources[i], "managedBy", children[i])
+
+ expected_links = la_sources
+
+ # modify the children/targets so they come after the link source
+ for x in range(0, 100):
+ self.modify_object(children[x], "displayName", "OU%d" % x)
+
+ # modify the parents, so they now come last in the replication
+ for x in range(0, 100):
+ self.modify_object(parents[x], "displayName", "OU%d" % x)
+
+ # We've now got objects in the following order:
+ # [100 la_source][100 la_target][100 parents (of la_target)]
+
+ links_expected = True
+
+ # Get all the replication data - this code should resend the requests
+ # with GET_TGT and GET_ANC
+ while not self.replication_complete():
+
+ # get the next block of replication data (this sets
+ # GET_TGT/GET_ANC)
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_TGT/GET_ANC flags. If this
+ # doesn't actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links for all the link sources
+ self.assert_expected_links(expected_links)
+
+ # Second part of test. Add some extra objects and kick off another
+ # replication. The test code will use the HWM from the last replication
+ # so we'll only receive the objects we modify below
+ self.start_new_repl_cycle()
+
+ # add an extra level of grandchildren that hang off a child
+ # that got created last time
+ new_parent = "OU=test_new_parent,%s" % children[0]
+ self.add_object(new_parent)
+ new_children = []
+
+ for x in range(0, 50):
+ dn = "OU=test_new_la_tgt%d,%s" % (x, new_parent)
+ self.add_object(dn)
+ new_children.append(dn)
+
+ # replace half of the links to point to the new children
+ for x in range(0, 50):
+ self.delete_attribute(la_sources[x], "managedBy", children[x])
+ self.modify_object(la_sources[x], "managedBy", new_children[x])
+
+ # add some filler objects to fill up the 1st chunk
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ # modify the new children/targets so they come after the link source
+ for x in range(0, 50):
+ self.modify_object(new_children[x], "displayName", "OU-%d" % x)
+
+ # modify the parent, so it now comes last in the replication
+ self.modify_object(new_parent, "displayName", "OU%d" % x)
+
+ # We should now get the modified objects in the following order:
+ # [50 links (x 2)][100 filler][50 new children][new parent]
+ # Note that the link sources aren't actually sent (their new linked
+ # attributes are sent, but apart from that, nothing has changed)
+ all_objects = filler + new_children + [new_parent]
+ expected_links = la_sources[:50]
+
+ links_expected = True
+
+ while not self.replication_complete():
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links (50 deleted links and 50 new)
+ self.assert_expected_links(expected_links, num_expected=100)
+
+ def _repl_integrity_obj_deletion(self, delete_link_source=True):
+ """
+ Tests deleting link objects while a replication is in progress.
+ """
+
+ # create some objects and link them together, with some filler
+ # object in between the link sources
+ la_sources = self.create_object_range(0, 100, prefix="la_source")
+ la_targets = self.create_object_range(0, 100, prefix="la_targets")
+
+ for i in range(0, 50):
+ self.modify_object(la_sources[i], "managedBy", la_targets[i])
+
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ for i in range(50, 100):
+ self.modify_object(la_sources[i], "managedBy", la_targets[i])
+
+ # touch the targets so that the sources get replicated first
+ for i in range(0, 100):
+ self.modify_object(la_targets[i], "displayName", "OU%d" % i)
+
+ # objects should now be in the following USN order:
+ # [50 la_source][100 filler][50 la_source][100 la_target]
+
+ # Get the first block containing 50 link sources
+ self.repl_get_next()
+
+ # delete either the link targets or link source objects
+ if delete_link_source:
+ objects_to_delete = la_sources
+ # in GET_TGT testenvs we only receive the first 50 source objects
+ expected_objects = la_sources[:50] + la_targets + filler
+ else:
+ objects_to_delete = la_targets
+ expected_objects = la_sources + filler
+
+ for obj in objects_to_delete:
+ self.ldb_dc2.delete(obj)
+
+ # complete the replication
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_objects)
+
+ # we can't use assert_expected_links() here because it tries to check
+ # against the deleted objects on the DC. (Although we receive some
+ # links from the first block processed, the Samba client should end up
+ # deleting these, as the source/target object involved is deleted)
+ self.assertTrue(len(self.rxd_links) == 50,
+ "Expected 50 links, not %d" % len(self.rxd_links))
+
+ def test_repl_integrity_src_obj_deletion(self):
+ self._repl_integrity_obj_deletion(delete_link_source=True)
+
+ def test_repl_integrity_tgt_obj_deletion(self):
+ self._repl_integrity_obj_deletion(delete_link_source=False)
+
+ def restore_deleted_object(self, guid, new_dn):
+ """Re-animates a deleted object"""
+
+ guid_str = self._GUID_string(guid)
+ res = self.test_ldb_dc.search(base="<GUID=%s>" % guid_str,
+ attrs=["isDeleted"],
+ controls=['show_deleted:1'],
+ scope=ldb.SCOPE_BASE)
+ if len(res) != 1:
+ return
+
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["isDeleted"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE,
+ "isDeleted")
+ msg["distinguishedName"] = ldb.MessageElement([new_dn],
+ ldb.FLAG_MOD_REPLACE,
+ "distinguishedName")
+ self.test_ldb_dc.modify(msg, ["show_deleted:1"])
+
+ def sync_DCs(self, nc_dn=None):
+ # make sure DC1 has all the changes we've made to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2,
+ nc_dn=nc_dn)
+
+ def get_object_guid(self, dn):
+ res = self.test_ldb_dc.search(base=dn, attrs=["objectGUID"],
+ scope=ldb.SCOPE_BASE)
+ return res[0]['objectGUID'][0]
+
+ def set_dc_connection(self, conn):
+ """
+ Switches over the connection state info that the underlying drs_base
+ class uses so that we replicate with a different DC.
+ """
+ self.default_hwm = conn.default_hwm
+ self.default_utdv = conn.default_utdv
+ self.drs = conn.drs
+ self.drs_handle = conn.drs_handle
+ self.set_test_ldb_dc(conn.ldb_dc)
+
+ def assert_DCs_replication_is_consistent(self, peer_conn, all_objects,
+ expected_links):
+ """
+ Replicates against both the primary and secondary DCs in the testenv
+ and checks that both return the expected results.
+ """
+ print("Checking replication against primary test DC...")
+
+ # get the replication data from the test DC first
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we get all the objects and links we're expecting
+ self.assert_expected_data(all_objects)
+ self.assert_expected_links(expected_links)
+
+ # switch over the DC state info so we now talk to the peer DC
+ self.set_dc_connection(peer_conn)
+ self.init_test_state()
+
+ print("Checking replication against secondary test DC...")
+
+ # check that we get the same information from the 2nd DC
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ self.assert_expected_data(all_objects)
+ self.assert_expected_links(expected_links)
+
+ # switch back to using the default connection
+ self.set_dc_connection(self.default_conn)
+
+ def test_repl_integrity_obj_reanimation(self):
+ """
+ Checks receiving links for a re-animated object doesn't lose links.
+ We test this against the peer DC to make sure it doesn't drop links.
+ """
+
+ # This test is a little different in that we're particularly interested
+ # in exercising the replmd client code on the second DC.
+ # First, make sure the peer DC has the base OU, then connect to it (so
+ # we store its initial HWM)
+ self.sync_DCs()
+ peer_conn = DcConnection(self, self.ldb_dc1, self.dnsname_dc1)
+
+ # create the link source/target objects
+ la_sources = self.create_object_range(0, 100, prefix="la_src")
+ la_targets = self.create_object_range(0, 100, prefix="la_tgt")
+
+ # store the target object's GUIDs (we need to know these to
+ # reanimate them)
+ target_guids = []
+
+ for dn in la_targets:
+ target_guids.append(self.get_object_guid(dn))
+
+ # delete the link target
+ for x in range(0, 100):
+ self.ldb_dc2.delete(la_targets[x])
+
+ # sync the DCs, then disable replication. We want the peer DC to get
+ # all the following changes in a single replication cycle
+ self.sync_DCs()
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # restore the target objects for the linked attributes again
+ for x in range(0, 100):
+ self.restore_deleted_object(target_guids[x], la_targets[x])
+
+ # add the links
+ for x in range(0, 100):
+ self.modify_object(la_sources[x], "managedBy", la_targets[x])
+
+ # create some additional filler objects
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ # modify the targets so they now come last
+ for x in range(0, 100):
+ self.modify_object(la_targets[x], "displayName", "OU-%d" % x)
+
+ # the objects should now be sent in the following order:
+ # [la sources + links][filler][la targets]
+ all_objects = la_sources + la_targets + filler
+ expected_links = la_sources
+
+ # Enable replication again make sure the 2 DCs are back in sync
+ self._enable_all_repl(self.dnsname_dc2)
+ self.sync_DCs()
+
+ # Get the replication data from each DC in turn.
+ # Check that both give us all the objects and links we're expecting,
+ # i.e. no links were lost
+ self.assert_DCs_replication_is_consistent(peer_conn, all_objects,
+ expected_links)
+
+ def _test_repl_integrity_cross_partition_links(self, get_tgt=False):
+ """
+ Checks that a cross-partition link to an unknown target object does
+ not result in missing links.
+ """
+
+ # check the peer DC is up-to-date, then connect (storing its HWM)
+ self.sync_DCs()
+ peer_conn = DcConnection(self, self.ldb_dc1, self.dnsname_dc1)
+
+ # stop replication so the peer gets the following objects in one go
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # optionally force the client-side to use GET_TGT locally, by adding a
+ # one-way link to a missing/deleted target object
+ if get_tgt:
+ missing_target = "OU=missing_tgt,%s" % self.ou
+ self.add_object(missing_target)
+ get_tgt_source = "CN=get_tgt_src,%s" % self.ou
+ self.add_object(get_tgt_source,
+ objectclass="msExchConfigurationContainer")
+ self.modify_object(get_tgt_source, "addressBookRoots2",
+ missing_target)
+ self.test_ldb_dc.delete(missing_target)
+
+ # create a link source object in the main NC
+ la_source = "OU=cross_nc_src,%s" % self.ou
+ self.add_object(la_source)
+
+ # create the link target (a server object) in the config NC
+ sites_dn = "CN=Sites,%s" % self.config_dn
+ servers_dn = "CN=Servers,CN=Default-First-Site-Name,%s" % sites_dn
+ rand = random.randint(1, 10000000)
+ la_target = "CN=getncchanges-%d,%s" % (rand, servers_dn)
+ self.add_object(la_target, objectclass="server")
+
+ # add a cross-partition link between the two
+ self.modify_object(la_source, "managedBy", la_target)
+
+ # First, sync to the peer the NC containing the link source object
+ self.sync_DCs()
+
+ # Now, before the peer has received the partition containing the target
+ # object, try replicating from the peer. It will only know about half
+ # of the link at this point, but it should be a valid scenario
+ self.set_dc_connection(peer_conn)
+
+ while not self.replication_complete():
+ # pretend we've received other link targets out of order and that's
+ # forced us to use GET_TGT. This checks the peer doesn't fail
+ # trying to fetch a cross-partition target object that doesn't
+ # exist
+ self.repl_get_next(get_tgt=True)
+
+ self.set_dc_connection(self.default_conn)
+
+ # delete the GET_TGT test object. We're not interested in asserting its
+ # links - it was just there to make the client use GET_TGT (and it
+ # creates an inconsistency because one DC correctly ignores the link,
+ # because it points to a deleted object)
+ if get_tgt:
+ self.test_ldb_dc.delete(get_tgt_source)
+
+ self.init_test_state()
+
+ # Now sync across the partition containing the link target object
+ self.sync_DCs(nc_dn=self.config_dn)
+ self._enable_all_repl(self.dnsname_dc2)
+
+ # Get the replication data from each DC in turn.
+ # Check that both return the cross-partition link (note we're not
+ # checking the config domain NC here for simplicity)
+ self.assert_DCs_replication_is_consistent(peer_conn,
+ all_objects=[la_source],
+ expected_links=[la_source])
+
+ # the cross-partition linked attribute has a missing backlink. Check
+ # that we can still delete it successfully
+ self.delete_attribute(la_source, "managedBy", la_target)
+ self.sync_DCs()
+
+ res = self.test_ldb_dc.search(ldb.Dn(self.ldb_dc1, la_source),
+ attrs=["managedBy"],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+ self.assertFalse("managedBy" in res[0],
+ "%s in DB still has managedBy attribute" % la_source)
+ res = self.test_ldb_dc.search(ldb.Dn(self.ldb_dc2, la_source),
+ attrs=["managedBy"],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+ self.assertFalse("managedBy" in res[0],
+ "%s in DB still has managedBy attribute" % la_source)
+
+ # Check receiving a cross-partition link to a deleted target.
+ # Delete the target and make sure the deletion is sync'd between DCs
+ target_guid = self.get_object_guid(la_target)
+ self.test_ldb_dc.delete(la_target)
+ self.sync_DCs(nc_dn=self.config_dn)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # re-animate the target
+ self.restore_deleted_object(target_guid, la_target)
+ self.modify_object(la_source, "managedBy", la_target)
+
+ # now sync the link - because the target is in another partition, the
+ # peer DC receives a link for a deleted target, which it should accept
+ self.sync_DCs()
+ res = self.test_ldb_dc.search(ldb.Dn(self.ldb_dc1, la_source),
+ attrs=["managedBy"],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+ self.assertTrue("managedBy" in res[0],
+ "%s in DB missing managedBy attribute" % la_source)
+
+ # cleanup the server object we created in the Configuration partition
+ self.test_ldb_dc.delete(la_target)
+ self._enable_all_repl(self.dnsname_dc2)
+
+ def test_repl_integrity_cross_partition_links(self):
+ self._test_repl_integrity_cross_partition_links(get_tgt=False)
+
+ def test_repl_integrity_cross_partition_links_with_tgt(self):
+ self._test_repl_integrity_cross_partition_links(get_tgt=True)
+
+ def test_repl_get_tgt_multivalued_links(self):
+ """Tests replication with multi-valued link attributes."""
+
+ # create the target/source objects and link them together
+ la_targets = self.create_object_range(0, 500, prefix="la_tgt")
+ la_source = "CN=la_src,%s" % self.ou
+ self.add_object(la_source, objectclass="msExchConfigurationContainer")
+
+ for tgt in la_targets:
+ self.modify_object(la_source, "addressBookRoots2", tgt)
+
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ # We should receive the objects/links in the following order:
+ # [500 targets + 1 source][500 links][100 filler]
+ expected_objects = la_targets + [la_source] + filler
+ link_only_chunk = False
+
+ # First do the replication without needing GET_TGT
+ while not self.replication_complete():
+ ctr6 = self.repl_get_next()
+
+ if ctr6.object_count == 0 and ctr6.linked_attributes_count != 0:
+ link_only_chunk = True
+
+ # we should receive one chunk that contains only links
+ self.assertTrue(link_only_chunk,
+ "Expected to receive a chunk containing only links")
+
+ # check we received all the expected objects/links
+ self.assert_expected_data(expected_objects)
+ self.assert_expected_links([la_source], link_attr="addressBookRoots2",
+ num_expected=500)
+
+ # Do the replication again, forcing the use of GET_TGT this time
+ self.init_test_state()
+
+ for x in range(0, 500):
+ self.modify_object(la_targets[x], "displayName", "OU-%d" % x)
+
+ # The objects/links should get sent in the following order:
+ # [1 source][500 targets][500 links][100 filler]
+
+ while not self.replication_complete():
+ ctr6 = self.repl_get_next()
+
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+
+ # check we received all the expected objects/links
+ self.assert_expected_data(expected_objects)
+ self.assert_expected_links([la_source], link_attr="addressBookRoots2",
+ num_expected=500)
+
+
+ def test_InvalidNC_DummyDN_InvalidGUID_full_repl(self):
+ """Test full replication on a totally invalid GUID fails with the right error code"""
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ max_objects=1)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_NC)
+
+ def test_DummyDN_valid_GUID_full_repl(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.base_dn, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=guid,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ max_objects=1)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"Failed to call GetNCChanges with DummyDN and a GUID: {estr}")
+
+ # The NC should be the first object returned due to GET_ANC
+ self.assertEqual(ctr.first_object.object.identifier.guid, guid)
+
+ def _test_do_full_repl_no_overlap(self, mix=True, get_anc=False):
+ self.default_hwm = drsuapi.DsReplicaHighWaterMark()
+
+ # We set get_anc=True so we can assert the BASE DN will be the
+ # first object
+ ctr6 = self._repl_send_request(get_anc=get_anc)
+ guid_list_1 = self._get_ctr6_object_guids(ctr6)
+
+ if mix:
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.ldb_dc1.get_default_basedn(),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (level, ctr_repl_obj) = self.drs.DsGetNCChanges(self.drs_handle, 8, req8)
+
+ self.assertEqual(ctr_repl_obj.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+
+ repl_obj_guid_list = self._get_ctr6_object_guids(ctr_repl_obj)
+
+ self.assertEqual(len(repl_obj_guid_list), 1)
+
+ # This should be the first object in the main replication due
+ # to get_anc=True above in one case, and a rule that the NC must be first regardless otherwise
+ self.assertEqual(repl_obj_guid_list[0], guid_list_1[0])
+
+ self.last_ctr = ctr6
+ ctr6 = self._repl_send_request(get_anc=True)
+ guid_list_2 = self._get_ctr6_object_guids(ctr6)
+
+ self.assertNotEqual(guid_list_1, guid_list_2)
+
+ def test_do_full_repl_no_overlap_get_anc(self):
+ """
+ Make sure that a full replication on an nc succeeds to the goal despite needing multiple passes
+ """
+ self._test_do_full_repl_no_overlap(mix=False, get_anc=True)
+
+ def test_do_full_repl_no_overlap(self):
+ """
+ Make sure that a full replication on an nc succeeds to the goal despite needing multiple passes
+ """
+ self._test_do_full_repl_no_overlap(mix=False)
+
+ def test_do_full_repl_mix_no_overlap(self):
+ """
+ Make sure that a full replication on an nc succeeds to the goal despite needing multiple passes
+
+ Assert this is true even if we do a REPL_OBJ in between the replications
+
+ """
+ self._test_do_full_repl_no_overlap(mix=True)
+
+ def nc_change(self):
+ old_base_msg = self.default_conn.ldb_dc.search(base=self.base_dn,
+ scope=SCOPE_BASE,
+ attrs=["oEMInformation"])
+ rec_cleanup = {"dn": self.base_dn,
+ "oEMInformation": old_base_msg[0]["oEMInformation"][0]}
+ m_cleanup = ldb.Message.from_dict(self.default_conn.ldb_dc,
+ rec_cleanup,
+ ldb.FLAG_MOD_REPLACE)
+
+ self.addCleanup(self.default_conn.ldb_dc.modify, m_cleanup)
+
+ rec = {"dn": self.base_dn,
+ "oEMInformation": f"Tortured by Samba's getncchanges.py {self.id()} against {self.default_conn.dnsname_dc}"}
+ m = ldb.Message.from_dict(self.default_conn.ldb_dc, rec, ldb.FLAG_MOD_REPLACE)
+ self.default_conn.ldb_dc.modify(m)
+
+ def _test_repl_nc_is_first(self, start_at_zero=True, nc_change=True, ou_change=True, mid_change=False):
+ """Tests that the NC is always replicated first, but does not move the
+ tmp_highest_usn at that point, just like 'early' GET_ANC objects.
+ """
+
+ # create objects, twice more than the page size of 133
+ objs = self.create_object_range(0, 300, prefix="obj")
+
+ if nc_change:
+ self.nc_change()
+
+ if mid_change:
+ # create even more objects
+ objs = self.create_object_range(301, 450, prefix="obj2")
+
+ base_msg = self.default_conn.ldb_dc.search(base=self.base_dn,
+ scope=SCOPE_BASE,
+ attrs=["uSNChanged",
+ "objectGUID"])
+
+ base_guid = misc.GUID(base_msg[0]["objectGUID"][0])
+ base_usn = int(base_msg[0]["uSNChanged"][0])
+
+ if ou_change:
+ # Make one more modification. We want to assert we have
+ # caught up to the base DN, but Windows both promotes the NC
+ # to the front and skips including it in the tmp_highest_usn,
+ # so we make a later modification that will be to show we get
+ # this change.
+ rec = {"dn": self.ou,
+ "postalCode": "0"}
+ m = ldb.Message.from_dict(self.default_conn.ldb_dc, rec, ldb.FLAG_MOD_REPLACE)
+ self.default_conn.ldb_dc.modify(m)
+
+ ou_msg = self.default_conn.ldb_dc.search(base=self.ou,
+ scope=SCOPE_BASE,
+ attrs=["uSNChanged",
+ "objectGUID"])
+
+ ou_guid = misc.GUID(ou_msg[0]["objectGUID"][0])
+ ou_usn = int(ou_msg[0]["uSNChanged"][0])
+
+ # Check some predicates about USN ordering that the below tests will rely on
+ if ou_change and nc_change:
+ self.assertGreater(ou_usn, base_usn)
+ elif not ou_change and nc_change:
+ self.assertGreater(base_usn, ou_usn)
+
+ ctr6 = self.repl_get_next()
+
+ guid_list_1 = self._get_ctr6_object_guids(ctr6)
+ if nc_change or start_at_zero:
+ self.assertEqual(base_guid, misc.GUID(guid_list_1[0]))
+ self.assertIn(str(base_guid), guid_list_1)
+ self.assertNotIn(str(base_guid), guid_list_1[1:])
+ else:
+ self.assertNotEqual(base_guid, misc.GUID(guid_list_1[0]))
+ self.assertNotIn(str(base_guid), guid_list_1)
+
+ self.assertTrue(ctr6.more_data)
+
+ if not ou_change and nc_change:
+ self.assertLess(ctr6.new_highwatermark.tmp_highest_usn, base_usn)
+
+ i = 0
+ while not self.replication_complete():
+ i = i + 1
+ last_tmp_highest_usn = ctr6.new_highwatermark.tmp_highest_usn
+ ctr6 = self.repl_get_next()
+ guid_list_2 = self._get_ctr6_object_guids(ctr6)
+ if len(guid_list_2) > 0:
+ self.assertNotEqual(last_tmp_highest_usn, ctr6.new_highwatermark.tmp_highest_usn)
+
+ if (nc_change or start_at_zero) and base_usn > last_tmp_highest_usn:
+ self.assertEqual(base_guid, misc.GUID(guid_list_2[0]),
+ f"pass={i} more_data={ctr6.more_data} base_usn={base_usn} tmp_highest_usn={ctr6.new_highwatermark.tmp_highest_usn} last_tmp_highest_usn={last_tmp_highest_usn}")
+ self.assertIn(str(base_guid), guid_list_2,
+ f"pass {i}·more_data={ctr6.more_data} base_usn={base_usn} tmp_highest_usn={ctr6.new_highwatermark.tmp_highest_usn} last_tmp_highest_usn={last_tmp_highest_usn}")
+ else:
+ self.assertNotIn(str(base_guid), guid_list_2,
+ f"pass {i}·more_data={ctr6.more_data} base_usn={base_usn} tmp_highest_usn={ctr6.new_highwatermark.tmp_highest_usn} last_tmp_highest_usn={last_tmp_highest_usn}")
+
+ if ou_change:
+ # The modification to the base OU should be in the final chunk
+ self.assertIn(str(ou_guid), guid_list_2)
+ self.assertGreaterEqual(ctr6.new_highwatermark.highest_usn,
+ ou_usn)
+ else:
+ # Show that the NC root change does not show up in the
+ # highest_usn. We either get the change before or after
+ # it.
+ self.assertNotEqual(ctr6.new_highwatermark.highest_usn,
+ base_usn)
+ self.assertEqual(ctr6.new_highwatermark.highest_usn,
+ ctr6.new_highwatermark.tmp_highest_usn)
+
+ self.assertFalse(ctr6.more_data)
+
+ def test_repl_nc_is_first_start_zero_nc_change(self):
+ self.default_hwm = drsuapi.DsReplicaHighWaterMark()
+ self._test_repl_nc_is_first(start_at_zero=True, nc_change=True, ou_change=True)
+
+ def test_repl_nc_is_first_start_zero(self):
+ # Get the NC change in the middle of the replication stream, certainly not at the start or end
+ self.nc_change()
+ self.default_hwm = drsuapi.DsReplicaHighWaterMark()
+ self._test_repl_nc_is_first(start_at_zero=True, nc_change=False, ou_change=False)
+
+ def test_repl_nc_is_first_mid(self):
+ # This is a modification of the next test, that Samba
+ # will pass as it will always include the NC in the
+ # tmp_highest_usn at the point where it belongs
+ self._test_repl_nc_is_first(start_at_zero=False,
+ nc_change=True,
+ ou_change=True,
+ mid_change=True)
+
+ def test_repl_nc_is_first(self):
+ # This is a modification of the next test, that Samba
+ # will pass as it will always include the NC in the
+ # tmp_highest_usn at the point where it belongs
+ self._test_repl_nc_is_first(start_at_zero=False, nc_change=True, ou_change=True)
+
+ def test_repl_nc_is_first_nc_change_only(self):
+ # This shows that the NC change is not reflected in the tmp_highest_usn
+ self._test_repl_nc_is_first(start_at_zero=False, nc_change=True, ou_change=False)
+
+ def test_repl_nc_is_first_no_change(self):
+ # The NC should not be present in this replication
+ self._test_repl_nc_is_first(start_at_zero=False, nc_change=False, ou_change=False)
+
+class DcConnection:
+ """Helper class to track a connection to another DC"""
+
+ def __init__(self, drs_base, ldb_dc, dnsname_dc):
+ self.ldb_dc = ldb_dc
+ (self.drs, self.drs_handle) = drs_base._ds_bind(dnsname_dc)
+ (self.default_hwm, utdv) = drs_base._get_highest_hwm_utdv(ldb_dc)
+ self.default_utdv = utdv
+ self.dnsname_dc = dnsname_dc
diff --git a/source4/torture/drs/python/link_conflicts.py b/source4/torture/drs/python/link_conflicts.py
new file mode 100644
index 0000000..d344b7e
--- /dev/null
+++ b/source4/torture/drs/python/link_conflicts.py
@@ -0,0 +1,763 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests replication scenarios that involve conflicting linked attribute
+# information between the 2 DCs.
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN \
+# link_conflicts -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import ldb
+from ldb import SCOPE_BASE
+import random
+import time
+
+from drs_base import AbstractLink
+from samba.dcerpc import drsuapi, misc
+from samba.dcerpc.drsuapi import DRSUAPI_EXOP_ERR_SUCCESS
+
+# specifies the order to sync DCs in
+DC1_TO_DC2 = 1
+DC2_TO_DC1 = 2
+
+
+class DrsReplicaLinkConflictTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaLinkConflictTestCase, self).setUp()
+
+ self.ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "test_link_conflict")
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+ (self.drs2, self.drs2_handle) = self._ds_bind(self.dnsname_dc2)
+
+ # disable replication for the tests so we can control at what point
+ # the DCs try to replicate
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ def tearDown(self):
+ # re-enable replication
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ super(DrsReplicaLinkConflictTestCase, self).tearDown()
+
+ def get_guid(self, samdb, dn):
+ """Returns an object's GUID (in string format)"""
+ res = samdb.search(base=dn, attrs=["objectGUID"], scope=ldb.SCOPE_BASE)
+ return self._GUID_string(res[0]['objectGUID'][0])
+
+ def add_object(self, samdb, dn, objectclass="organizationalunit"):
+ """Adds an object"""
+ samdb.add({"dn": dn, "objectclass": objectclass})
+ return self.get_guid(samdb, dn)
+
+ def modify_object(self, samdb, dn, attr, value):
+ """Modifies an attribute for an object"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, dn)
+ m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, attr)
+ samdb.modify(m)
+
+ def add_link_attr(self, samdb, source_dn, attr, target_dn):
+ """Adds a linked attribute between 2 objects"""
+ # add the specified attribute to the source object
+ self.modify_object(samdb, source_dn, attr, target_dn)
+
+ def del_link_attr(self, samdb, src, attr, target):
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, src)
+ m[attr] = ldb.MessageElement(target, ldb.FLAG_MOD_DELETE, attr)
+ samdb.modify(m)
+
+ def sync_DCs(self, sync_order=DC1_TO_DC2):
+ """Manually syncs the 2 DCs to ensure they're in sync"""
+ if sync_order == DC1_TO_DC2:
+ # sync DC1-->DC2, then DC2-->DC1
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2)
+ else:
+ # sync DC2-->DC1, then DC1-->DC2
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2)
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1)
+
+ def ensure_unique_timestamp(self):
+ """Waits a second to ensure a unique timestamp between 2 objects"""
+ time.sleep(1)
+
+ def unique_dn(self, obj_name):
+ """Returns a unique object DN"""
+ # Because we run each test case twice, we need to create a unique DN so
+ # that the 2nd run doesn't hit objects that already exist. Add some
+ # randomness to the object DN to make it unique
+ rand = random.randint(1, 10000000)
+ return "%s-%d,%s" % (obj_name, rand, self.ou)
+
+ def assert_attrs_match(self, res1, res2, attr, expected_count):
+ """
+ Asserts that the search results contain the expected number of
+ attributes and the results match on both DCs
+ """
+ actual_len = len(res1[0][attr])
+ self.assertTrue(actual_len == expected_count,
+ "Expected %u %s attributes, got %u" % (expected_count,
+ attr,
+ actual_len))
+ actual_len = len(res2[0][attr])
+ self.assertTrue(actual_len == expected_count,
+ "Expected %u %s attributes, got %u" % (expected_count,
+ attr,
+ actual_len))
+
+ # check DCs both agree on the same linked attributes
+ for val in res1[0][attr]:
+ self.assertTrue(val in res2[0][attr],
+ "%s '%s' not found on DC2" % (attr, val))
+
+ def zero_highwatermark(self):
+ """Returns a zeroed highwatermark so that all DRS data gets returned"""
+ hwm = drsuapi.DsReplicaHighWaterMark()
+ hwm.tmp_highest_usn = 0
+ hwm.reserved_usn = 0
+ hwm.highest_usn = 0
+ return hwm
+
+ def _check_replicated_links(self, src_obj_dn, expected_links):
+ """Checks that replication sends back the expected linked attributes"""
+ self._check_replication([src_obj_dn],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ dest_dsa=None,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_SUCCESS,
+ nc_dn_str=src_obj_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ expected_links=expected_links,
+ highwatermark=self.zero_highwatermark())
+
+ # Check DC2 as well
+ self.set_test_ldb_dc(self.ldb_dc2)
+
+ self._check_replication([src_obj_dn],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ dest_dsa=None,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_SUCCESS,
+ nc_dn_str=src_obj_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ expected_links=expected_links,
+ highwatermark=self.zero_highwatermark(),
+ drs=self.drs2, drs_handle=self.drs2_handle)
+ self.set_test_ldb_dc(self.ldb_dc1)
+
+ def _test_conflict_single_valued_link(self, sync_order):
+ """
+ Tests a simple single-value link conflict, i.e. each DC adds a link to
+ the same source object but linking to different targets.
+ """
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # create a unique target on each DC
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc2, target2_ou)
+
+ # link the test OU to the respective targets created
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+
+ # sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurrence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target2_ou,
+ "Expected most recent update to win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check DRS
+ # to make sure the DC kept a copy of the conflicting link
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_single_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_single_valued_link(sync_order=DC1_TO_DC2)
+ self._test_conflict_single_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_duplicate_single_valued_link(self, sync_order):
+ """
+ Adds the same single-valued link on 2 DCs and checks we don't end up
+ with 2 copies of the link.
+ """
+ # create unique objects for the link
+ target_ou = self.unique_dn("OU=target")
+ self.add_object(self.ldb_dc1, target_ou)
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # link the same test OU to the same target on both DCs
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target_ou)
+
+ # sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurrence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ def test_duplicate_single_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_duplicate_single_valued_link(sync_order=DC1_TO_DC2)
+ self._test_duplicate_single_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_conflict_multi_valued_link(self, sync_order):
+ """
+ Tests a simple multi-valued link conflict. This adds 2 objects with the
+ same username on 2 different DCs and checks their group membership is
+ preserved after the conflict is resolved.
+ """
+
+ # create a common link source
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ self.sync_DCs()
+
+ # create the same user (link target) on each DC.
+ # Note that the GUIDs will differ between the DCs
+ target_dn = self.unique_dn("CN=target")
+ target1_guid = self.add_object(self.ldb_dc1, target_dn,
+ objectclass="user")
+ self.ensure_unique_timestamp()
+ target2_guid = self.add_object(self.ldb_dc2, target_dn,
+ objectclass="user")
+
+ # link the src group to the respective target created
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ # sync the 2 DCs. We expect the more recent target2 object to win
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ target1_conflict = False
+
+ # we expect exactly 2 members in our test group (both DCs should agree)
+ self.assert_attrs_match(res1, res2, "member", 2)
+
+ for val in [str(val) for val in res1[0]["member"]]:
+ # check the expected conflicting object was renamed
+ self.assertFalse("CNF:%s" % target2_guid in val)
+ if "CNF:%s" % target1_guid in val:
+ target1_conflict = True
+
+ self.assertTrue(target1_conflict,
+ "Expected link to conflicting target object not found")
+
+ def test_conflict_multi_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_multi_valued_link(sync_order=DC1_TO_DC2)
+ self._test_conflict_multi_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_duplicate_multi_valued_link(self, sync_order):
+ """
+ Adds the same multivalued link on 2 DCs and checks we don't end up
+ with 2 copies of the link.
+ """
+
+ # create the link source/target objects
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ target_dn = self.unique_dn("CN=target")
+ self.add_object(self.ldb_dc1, target_dn, objectclass="user")
+ self.sync_DCs()
+
+ # link the src group to the same target user separately on each DC
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+
+ # we expect to still have only 1 member in our test group
+ self.assert_attrs_match(res1, res2, "member", 1)
+
+ def test_duplicate_multi_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_duplicate_multi_valued_link(sync_order=DC1_TO_DC2)
+ self._test_duplicate_multi_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_conflict_backlinks(self, sync_order):
+ """
+ Tests that resolving a source object conflict fixes up any backlinks,
+ e.g. the same user is added to a conflicting group.
+ """
+
+ # create a common link target
+ target_dn = self.unique_dn("CN=target")
+ target_guid = self.add_object(self.ldb_dc1, target_dn,
+ objectclass="user")
+ self.sync_DCs()
+
+ # create the same group (link source) on each DC.
+ # Note that the GUIDs will differ between the DCs
+ src_dn = self.unique_dn("CN=src")
+ src1_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ self.ensure_unique_timestamp()
+ src2_guid = self.add_object(self.ldb_dc2, src_dn, objectclass="group")
+
+ # link the src group to the respective target created
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ # sync the 2 DCs. We expect the more recent src2 object to win
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % target_guid,
+ scope=SCOPE_BASE, attrs=["memberOf"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % target_guid,
+ scope=SCOPE_BASE, attrs=["memberOf"])
+ src1_backlink = False
+
+ # our test user should still be a member of 2 groups (check both
+ # DCs agree)
+ self.assert_attrs_match(res1, res2, "memberOf", 2)
+
+ for val in [str(val) for val in res1[0]["memberOf"]]:
+ # check the conflicting object was renamed
+ self.assertFalse("CNF:%s" % src2_guid in val)
+ if "CNF:%s" % src1_guid in val:
+ src1_backlink = True
+
+ self.assertTrue(src1_backlink,
+ "Backlink to conflicting source object not found")
+
+ def test_conflict_backlinks(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_backlinks(sync_order=DC1_TO_DC2)
+ self._test_conflict_backlinks(sync_order=DC2_TO_DC1)
+
+ def _test_link_deletion_conflict(self, sync_order):
+ """
+ Checks that a deleted link conflicting with an active link is
+ resolved correctly.
+ """
+
+ # Add the link objects
+ target_dn = self.unique_dn("CN=target")
+ self.add_object(self.ldb_dc1, target_dn, objectclass="user")
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ self.sync_DCs()
+
+ # add the same link on both DCs, and resolve any conflict
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.sync_DCs(sync_order=sync_order)
+
+ # delete and re-add the link on one DC
+ self.del_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ # just delete it on the other DC
+ self.ensure_unique_timestamp()
+ self.del_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+ # sanity-check the link is gone on this DC
+ res1 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ self.assertFalse("member" in res1[0], "Couldn't delete member attr")
+
+ # sync the 2 DCs. We expect the more older DC1 attribute to win
+ # because it has a higher version number (even though it's older)
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+
+ # our test user should still be a member of the group (check both
+ # DCs agree)
+ self.assertTrue("member" in res1[0],
+ "Expected member attribute missing")
+ self.assert_attrs_match(res1, res2, "member", 1)
+
+ def test_link_deletion_conflict(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_link_deletion_conflict(sync_order=DC1_TO_DC2)
+ self._test_link_deletion_conflict(sync_order=DC2_TO_DC1)
+
+ def _test_obj_deletion_conflict(self, sync_order, del_target):
+ """
+ Checks that a receiving a new link for a deleted object gets
+ resolved correctly.
+ """
+
+ target_dn = self.unique_dn("CN=target")
+ target_guid = self.add_object(self.ldb_dc1, target_dn,
+ objectclass="user")
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+
+ self.sync_DCs()
+
+ # delete the object on one DC
+ if del_target:
+ search_guid = src_guid
+ self.ldb_dc2.delete(target_dn)
+ else:
+ search_guid = target_guid
+ self.ldb_dc2.delete(src_dn)
+
+ # add a link on the other DC
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ self.sync_DCs(sync_order=sync_order)
+
+ # the object deletion should trump the link addition.
+ # Check the link no longer exists on the remaining object
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % search_guid,
+ scope=SCOPE_BASE,
+ attrs=["member", "memberOf"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % search_guid,
+ scope=SCOPE_BASE,
+ attrs=["member", "memberOf"])
+
+ self.assertFalse("member" in res1[0], "member attr shouldn't exist")
+ self.assertFalse("member" in res2[0], "member attr shouldn't exist")
+ self.assertFalse("memberOf" in res1[0], "member attr shouldn't exist")
+ self.assertFalse("memberOf" in res2[0], "member attr shouldn't exist")
+
+ def test_obj_deletion_conflict(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_obj_deletion_conflict(sync_order=DC1_TO_DC2,
+ del_target=True)
+ self._test_obj_deletion_conflict(sync_order=DC2_TO_DC1,
+ del_target=True)
+
+ # and also try deleting the source object instead of the link target
+ self._test_obj_deletion_conflict(sync_order=DC1_TO_DC2,
+ del_target=False)
+ self._test_obj_deletion_conflict(sync_order=DC2_TO_DC1,
+ del_target=False)
+
+ def _test_full_sync_link_conflict(self, sync_order):
+ """
+ Checks that doing a full sync doesn't affect how conflicts get resolved
+ """
+
+ # create the objects for the linked attribute
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ target_dn = self.unique_dn("CN=target")
+ self.add_object(self.ldb_dc1, target_dn, objectclass="user")
+ self.sync_DCs()
+
+ # add the same link on both DCs
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ # Do a couple of full syncs which should resolve the conflict
+ # (but only for one DC)
+ if sync_order == DC1_TO_DC2:
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ full_sync=True)
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ full_sync=True)
+ else:
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2,
+ full_sync=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2,
+ full_sync=True)
+
+ # delete and re-add the link on one DC
+ self.del_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ # just delete the link on the 2nd DC
+ self.ensure_unique_timestamp()
+ self.del_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ # sync the 2 DCs. We expect DC1 to win based on version number
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+
+ # check the membership still exits (and both DCs agree)
+ self.assertTrue("member" in res1[0],
+ "Expected member attribute missing")
+ self.assert_attrs_match(res1, res2, "member", 1)
+
+ def test_full_sync_link_conflict(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_full_sync_link_conflict(sync_order=DC1_TO_DC2)
+ self._test_full_sync_link_conflict(sync_order=DC2_TO_DC1)
+
+ def _singleval_link_conflict_deleted_winner(self, sync_order):
+ """
+ Tests a single-value link conflict where the more-up-to-date link value
+ is deleted.
+ """
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # create a unique target on each DC
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc2, target2_ou)
+
+ # add the links for the respective targets, and delete one of the links
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+ self.ensure_unique_timestamp()
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+
+ # sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # Although the more up-to-date link value is deleted, this shouldn't
+ # trump DC1's active link
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target2_ou,
+ "Expected active link win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check that
+ # the deleted links exist using DRS
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_single_valued_link_deleted_winner(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._singleval_link_conflict_deleted_winner(sync_order=DC1_TO_DC2)
+ self._singleval_link_conflict_deleted_winner(sync_order=DC2_TO_DC1)
+
+ def _singleval_link_conflict_deleted_loser(self, sync_order):
+ """
+ Tests a single-valued link conflict, where the losing link value is
+ deleted.
+ """
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # create a unique target on each DC
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc2, target2_ou)
+
+ # add the links - we want the link to end up deleted on DC2, but active
+ # on DC1. DC1 has the better version and DC2 has the better timestamp -
+ # the better version should win
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+ self.del_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurrence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target1_ou,
+ "Expected most recent update to win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check DRS
+ # to make sure the DC kept a copy of the conflicting link
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_single_valued_link_deleted_loser(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._singleval_link_conflict_deleted_loser(sync_order=DC1_TO_DC2)
+ self._singleval_link_conflict_deleted_loser(sync_order=DC2_TO_DC1)
+
+ def _test_conflict_existing_single_valued_link(self, sync_order):
+ """
+ Tests a single-valued link conflict, where the conflicting link value
+ already exists (as inactive) on both DCs.
+ """
+ # create the link objects
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc1, target2_ou)
+
+ # add the links, but then delete them
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target2_ou)
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target2_ou)
+ self.sync_DCs()
+
+ # re-add the links independently on each DC
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+
+ # try to sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurrence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ # here we expect DC2 to win because it has the more recent link
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target2_ou,
+ "Expected most recent update to win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check DRS
+ # to make sure the DC kept a copy of the conflicting link
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_existing_single_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_existing_single_valued_link(sync_order=DC1_TO_DC2)
+ self._test_conflict_existing_single_valued_link(sync_order=DC2_TO_DC1)
+
+ def test_link_attr_version(self):
+ """
+ Checks the link attribute version starts from the correct value
+ """
+ # create some objects and add a link
+ src_ou = self.unique_dn("OU=src")
+ self.add_object(self.ldb_dc1, src_ou)
+ target1_ou = self.unique_dn("OU=target1")
+ self.add_object(self.ldb_dc1, target1_ou)
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+
+ # get the link info via replication
+ ctr6 = self._get_replication(drsuapi.DRSUAPI_DRS_WRIT_REP,
+ dest_dsa=None,
+ drs_error=DRSUAPI_EXOP_ERR_SUCCESS,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ highwatermark=self.zero_highwatermark(),
+ nc_dn_str=src_ou)
+
+ self.assertTrue(ctr6.linked_attributes_count == 1,
+ "DRS didn't return a link")
+ link = ctr6.linked_attributes[0]
+ rcvd_version = link.meta_data.version
+ self.assertTrue(rcvd_version == 1,
+ "Link version started from %u, not 1" % rcvd_version)
diff --git a/source4/torture/drs/python/linked_attributes_drs.py b/source4/torture/drs/python/linked_attributes_drs.py
new file mode 100644
index 0000000..93ad313
--- /dev/null
+++ b/source4/torture/drs/python/linked_attributes_drs.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Originally based on ./sam.py
+import sys
+
+sys.path.insert(0, "bin/python")
+import ldb
+
+from samba.dcerpc import drsuapi, misc
+from samba.ndr import ndr_unpack, ndr_pack
+
+import drs_base
+
+
+class LATestException(Exception):
+ pass
+
+
+class LATests(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(LATests, self).setUp()
+ # DrsBaseTestCase sets up self.ldb_dc1, self.ldb_dc2
+ # we're only using one
+ self.samdb = self.ldb_dc1
+
+ self.base_dn = self.samdb.domain_dn()
+ self.ou = "OU=la,%s" % self.base_dn
+ if True:
+ try:
+ self.samdb.delete(self.ou, ['tree_delete:1'])
+ except ldb.LdbError as e:
+ pass
+ self.samdb.add({'objectclass': 'organizationalUnit',
+ 'dn': self.ou})
+
+ self.dc_guid = self.samdb.get_invocation_id()
+ self.drs, self.drs_handle = self._ds_bind(self.dnsname_dc1)
+
+ def tearDown(self):
+ super(LATests, self).tearDown()
+ try:
+ self.samdb.delete(self.ou, ['tree_delete:1'])
+ except ldb.LdbError as e:
+ pass
+
+ def delete_user(self, user):
+ self.samdb.delete(user['dn'])
+ del self.users[self.users.index(user)]
+
+ def add_object(self, cn, objectclass):
+ dn = "CN=%s,%s" % (cn, self.ou)
+ self.samdb.add({'cn': cn,
+ 'objectclass': objectclass,
+ 'dn': dn})
+
+ return dn
+
+ def add_objects(self, n, objectclass, prefix=None):
+ if prefix is None:
+ prefix = objectclass
+ dns = []
+ for i in range(n):
+ dns.append(self.add_object("%s%d" % (prefix, i + 1),
+ objectclass))
+ return dns
+
+ def add_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_ADD, attr)
+ self.samdb.modify(m)
+
+ def remove_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_DELETE, attr)
+ self.samdb.modify(m)
+
+ def attr_search(self, obj, expected, attr, scope=ldb.SCOPE_BASE):
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=self.dc_guid,
+ nc_dn_str=obj,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ level, ctr = self.drs.DsGetNCChanges(self.drs_handle, 8, req8)
+ expected_attid = getattr(drsuapi, 'DRSUAPI_ATTID_' + attr)
+
+ links = []
+ for link in ctr.linked_attributes:
+ if link.attid == expected_attid:
+ unpacked = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob)
+ active = link.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ links.append((str(unpacked.dn), bool(active)))
+
+ return links
+
+ def assert_forward_links(self, obj, expected, attr='member'):
+ results = self.attr_search(obj, expected, attr)
+ self.assertEqual(len(results), len(expected))
+
+ for k, v in results:
+ self.assertTrue(k in expected)
+ self.assertEqual(expected[k], v, "%s active flag should be %d, not %d" %
+ (k, expected[k], v))
+
+ def get_object_guid(self, dn):
+ res = self.samdb.search(dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['objectGUID'])
+ return str(misc.GUID(res[0]['objectGUID'][0]))
+
+ def test_links_all_delete_group(self):
+ u1, u2 = self.add_objects(2, 'user', 'u_all_del_group')
+ g1, g2 = self.add_objects(2, 'group', 'g_all_del_group')
+ g2guid = self.get_object_guid(g2)
+
+ self.add_linked_attribute(g1, u1)
+ self.add_linked_attribute(g2, u1)
+ self.add_linked_attribute(g2, u2)
+
+ self.samdb.delete(g2)
+ self.assert_forward_links(g1, {u1: True})
+ res = self.samdb.search('<GUID=%s>' % g2guid,
+ scope=ldb.SCOPE_BASE,
+ controls=['show_deleted:1'])
+ new_dn = res[0].dn
+ self.assert_forward_links(new_dn, {})
+
+ def test_la_links_delete_link(self):
+ u1, u2 = self.add_objects(2, 'user', 'u_del_link')
+ g1, g2 = self.add_objects(2, 'group', 'g_del_link')
+
+ self.add_linked_attribute(g1, u1)
+ self.add_linked_attribute(g2, u1)
+ self.add_linked_attribute(g2, u2)
+
+ self.remove_linked_attribute(g2, u1)
+
+ self.assert_forward_links(g1, {u1: True})
+ self.assert_forward_links(g2, {u1: False, u2: True})
+
+ self.add_linked_attribute(g2, u1)
+ self.remove_linked_attribute(g2, u2)
+ self.assert_forward_links(g2, {u1: True, u2: False})
+ self.remove_linked_attribute(g2, u1)
+ self.assert_forward_links(g2, {u1: False, u2: False})
+
+ def test_la_links_delete_user(self):
+ u1, u2 = self.add_objects(2, 'user', 'u_del_user')
+ g1, g2 = self.add_objects(2, 'group', 'g_del_user')
+
+ self.add_linked_attribute(g1, u1)
+ self.add_linked_attribute(g2, u1)
+ self.add_linked_attribute(g2, u2)
+
+ self.samdb.delete(u1)
+
+ self.assert_forward_links(g1, {})
+ self.assert_forward_links(g2, {u2: True})
diff --git a/source4/torture/drs/python/repl_move.py b/source4/torture/drs/python/repl_move.py
new file mode 100644
index 0000000..c206ab8
--- /dev/null
+++ b/source4/torture/drs/python/repl_move.py
@@ -0,0 +1,2608 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN repl_move -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import time
+import samba.tests
+
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+from samba.dcerpc import misc
+from samba.drs_utils import drs_DsBind
+
+from ldb import (
+ SCOPE_BASE,
+ SCOPE_SUBTREE,
+)
+
+import drs_base
+import ldb
+from samba.dcerpc.drsuapi import (
+ drsuapi,
+ DRSUAPI_ATTID_accountExpires,
+ DRSUAPI_ATTID_cn,
+ DRSUAPI_ATTID_codePage,
+ DRSUAPI_ATTID_countryCode,
+ DRSUAPI_ATTID_dBCSPwd,
+ DRSUAPI_ATTID_description,
+ DRSUAPI_ATTID_instanceType,
+ DRSUAPI_ATTID_isDeleted,
+ DRSUAPI_ATTID_isRecycled,
+ DRSUAPI_ATTID_lastKnownParent,
+ DRSUAPI_ATTID_lmPwdHistory,
+ DRSUAPI_ATTID_logonHours,
+ DRSUAPI_ATTID_name,
+ DRSUAPI_ATTID_ntPwdHistory,
+ DRSUAPI_ATTID_ntSecurityDescriptor,
+ DRSUAPI_ATTID_objectCategory,
+ DRSUAPI_ATTID_objectClass,
+ DRSUAPI_ATTID_objectSid,
+ DRSUAPI_ATTID_ou,
+ DRSUAPI_ATTID_primaryGroupID,
+ DRSUAPI_ATTID_pwdLastSet,
+ DRSUAPI_ATTID_sAMAccountName,
+ DRSUAPI_ATTID_sAMAccountType,
+ DRSUAPI_ATTID_unicodePwd,
+ DRSUAPI_ATTID_userAccountControl,
+ DRSUAPI_ATTID_userPrincipalName,
+ DRSUAPI_ATTID_whenCreated,
+ DRSUAPI_DRS_SYNC_FORCED,
+ DRSUAPI_EXOP_REPL_OBJ,
+ DsGetNCChangesRequest8,
+ DsReplicaHighWaterMark,
+ DsReplicaObjectIdentifier)
+
+
+class DrsMoveObjectTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsMoveObjectTestCase, self).setUp()
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ self.top_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "replica_move")
+
+ self.ou1_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU1")
+ self.ou1_dn.add_base(self.top_ou)
+ ou1 = {}
+ ou1["dn"] = self.ou1_dn
+ ou1["objectclass"] = "organizationalUnit"
+ ou1["ou"] = self.ou1_dn.get_component_value(0)
+ self.ldb_dc1.add(ou1)
+
+ self.ou2_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2")
+ self.ou2_dn.add_base(self.top_ou)
+ ou2 = {}
+ ou2["dn"] = self.ou2_dn
+ ou2["objectclass"] = "organizationalUnit"
+ ou2["ou"] = self.ou2_dn.get_component_value(0)
+ self.ldb_dc1.add(ou2)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self.dc1_guid = self.ldb_dc1.get_invocation_id()
+ self.dc2_guid = self.ldb_dc2.get_invocation_id()
+
+ self.drs_dc1 = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ self.drs_dc2 = self._ds_bind(self.dnsname_dc2, ip=self.url_dc2)
+
+ def tearDown(self):
+ try:
+ self.ldb_dc1.delete(self.top_ou, ["tree_delete:1"])
+ except ldb.LdbError as e:
+ (enum, string) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsMoveObjectTestCase, self).tearDown()
+
+ def _make_username(self):
+ return "DrsMoveU_" + time.strftime("%s", time.gmtime())
+
+ def _check_metadata(self, user_dn, sam_ldb, drs, metadata, expected):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, metadata[0])
+
+ self.assertEqual(len(repl.ctr.array), len(expected))
+
+ i = 0
+ for o in repl.ctr.array:
+ e = expected[i]
+ (attid, orig_dsa, version) = e
+ self.assertEqual(attid, o.attid,
+ "(LDAP) Wrong attid "
+ "for expected value %d, wanted 0x%08x got 0x%08x"
+ % (i, attid, o.attid))
+ self.assertEqual(o.originating_invocation_id,
+ misc.GUID(orig_dsa),
+ "(LDAP) Wrong originating_invocation_id "
+ "for expected value %d, attid 0x%08x, wanted %s got %s"
+ % (i, o.attid,
+ misc.GUID(orig_dsa),
+ o.originating_invocation_id))
+ # Allow version to be skipped when it does not matter
+ if version is not None:
+ self.assertEqual(o.version, version,
+ "(LDAP) Wrong version for expected value %d, "
+ "attid 0x%08x, "
+ "wanted %d got %d"
+ % (i, o.attid,
+ version, o.version))
+ i = i + 1
+
+ if drs is None:
+ return
+
+ req8 = DsGetNCChangesRequest8()
+
+ req8.source_dsa_invocation_id = misc.GUID(sam_ldb.get_invocation_id())
+ req8.naming_context = DsReplicaObjectIdentifier()
+ req8.naming_context.dn = str(user_dn)
+ req8.highwatermark = DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = DRSUAPI_DRS_SYNC_FORCED
+ req8.max_object_count = 1
+ req8.max_ndr_size = 402116
+ req8.extended_op = DRSUAPI_EXOP_REPL_OBJ
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ (drs_conn, drs_handle) = drs
+
+ (level, drs_ctr) = drs_conn.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6)
+ self.assertEqual(drs_ctr.object_count, 1)
+
+ self.assertEqual(len(drs_ctr.first_object.meta_data_ctr.meta_data), len(expected) - 1)
+ att_idx = 0
+ for o in drs_ctr.first_object.meta_data_ctr.meta_data:
+ i = 0
+ drs_attid = drs_ctr.first_object.object.attribute_ctr.attributes[att_idx]
+ e = expected[i]
+ (attid, orig_dsa, version) = e
+
+ # Skip the RDN from the expected set, it is not sent over DRS
+ if (user_dn.get_rdn_name().upper() == "CN"
+ and attid == DRSUAPI_ATTID_cn) \
+ or (user_dn.get_rdn_name().upper() == "OU"
+ and attid == DRSUAPI_ATTID_ou):
+ i = i + 1
+ e = expected[i]
+ (attid, orig_dsa, version) = e
+
+ self.assertEqual(attid, drs_attid.attid,
+ "(DRS) Wrong attid "
+ "for expected value %d, wanted 0x%08x got 0x%08x"
+ % (i, attid, drs_attid.attid))
+
+ self.assertEqual(o.originating_invocation_id,
+ misc.GUID(orig_dsa),
+ "(DRS) Wrong originating_invocation_id "
+ "for expected value %d, attid 0x%08x, wanted %s got %s"
+ % (i, attid,
+ misc.GUID(orig_dsa),
+ o.originating_invocation_id))
+ # Allow version to be skipped when it does not matter
+ if version is not None:
+ self.assertEqual(o.version, version,
+ "(DRS) Wrong version for expected value %d, "
+ "attid 0x%08x, "
+ "wanted %d got %d"
+ % (i, attid, version, o.version))
+ break
+ i = i + 1
+ att_idx = att_idx + 1
+
+ # now also used to check the group
+ def _check_obj(self, sam_ldb, obj_orig, is_deleted, expected_metadata=None, drs=None):
+ # search the user by guid as it may be deleted
+ guid_str = self._GUID_string(obj_orig["objectGUID"][0])
+ res = sam_ldb.search(base='<GUID=%s>' % guid_str,
+ controls=["show_deleted:1"],
+ attrs=["*", "parentGUID",
+ "replPropertyMetaData"])
+ self.assertEqual(len(res), 1)
+ user_cur = res[0]
+ rdn_orig = str(obj_orig[user_cur.dn.get_rdn_name()][0])
+ rdn_cur = str(user_cur[user_cur.dn.get_rdn_name()][0])
+ name_orig = str(obj_orig["name"][0])
+ name_cur = str(user_cur["name"][0])
+ dn_orig = obj_orig["dn"]
+ dn_cur = user_cur["dn"]
+ # now check properties of the user
+ if is_deleted:
+ self.assertTrue("isDeleted" in user_cur)
+ self.assertEqual(rdn_cur.split('\n')[0], rdn_orig)
+ self.assertEqual(name_cur.split('\n')[0], name_orig)
+ self.assertEqual(dn_cur.get_rdn_value().split('\n')[0],
+ dn_orig.get_rdn_value())
+ self.assertEqual(name_cur, rdn_cur)
+ else:
+ self.assertFalse("isDeleted" in user_cur)
+ self.assertEqual(rdn_cur, rdn_orig)
+ self.assertEqual(name_cur, name_orig)
+ self.assertEqual(dn_cur, dn_orig)
+ self.assertEqual(name_cur, rdn_cur)
+ parent_cur = user_cur["parentGUID"][0]
+ try:
+ parent_orig = obj_orig["parentGUID"][0]
+ self.assertEqual(parent_orig, parent_cur)
+ except KeyError:
+ pass
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+
+ if expected_metadata is not None:
+ self._check_metadata(dn_cur, sam_ldb, drs, user_cur["replPropertyMetaData"],
+ expected_metadata)
+
+ return user_cur
+
+ def test_ReplicateMoveObject1(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is replicated properly
+ - the OU is renamed
+ - We verify that after replication,
+ that the user has the correct DN (under OU2)
+ - the OU is deleted
+ - the OU is modified on DC2
+ - We verify that after replication,
+ that the user has the correct DN (deleted) and has not description
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ moved_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata_dc2)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=True, expected_metadata=deleted_metadata)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = new_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ modified_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=modified_metadata)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_modified_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_modified_metadata_dc2)
+ self.assertFalse("description" in user_cur)
+
+ # trigger replication from DC2 to DC1, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ deleted_modified_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_modified_metadata_dc1)
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveObject2(self):
+ """Verifies how a moved container with a user inside is not
+ replicated between two DCs as no replication is triggered
+ This test should verify that:
+ - the OU is not replicated
+ - the user is not replicated
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved_orig = ldb_res[0]
+
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # check user info on DC2 - should not be there, we have not done replication
+ ldb_res = self.ldb_dc2.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 0)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ # trigger replication from DC2 to DC1, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ def test_ReplicateMoveObject3(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is created on DC1
+ - the OU is renamed on DC1
+ - We verify that after replication,
+ that the user has the correct DN (under OU2).
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ def test_ReplicateMoveObject3b(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is created on DC1
+ - the OU is renamed on DC1
+ - We verify that after replication,
+ that the user has the correct DN (under OU2).
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # trigger replication from DC2 (Which has never seen the object) to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ def test_ReplicateMoveObject4(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is replicated properly
+ - the user is modified on DC2
+ - the OU is renamed on DC1
+ - We verify that after replication DC1 -> DC2,
+ that the user has the correct DN (under OU2), and the description
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ initial_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata_dc2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = user_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ modified_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_orig,
+ is_deleted=False,
+ expected_metadata=modified_metadata)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ modified_renamed_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=modified_renamed_metadata)
+
+ self.assertTrue("description" in user_cur)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=modified_renamed_metadata)
+
+ self.assertTrue("description" in user_cur)
+
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ self.assertFalse("description" in user_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 3),
+ (DRSUAPI_ATTID_description, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveObject5(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is replicated properly
+ - the user is modified on DC2
+ - the OU is renamed on DC1
+ - We verify that after replication DC2 -> DC1,
+ that the user has the correct DN (under OU2), and the description
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = user_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertTrue("description" in user_cur)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertTrue("description" in user_cur)
+
+ # delete user on DC2
+ self.ldb_dc2.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1 for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=True)
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveObject6(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 has the correct DN (under OU2), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject7(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1 to be under OU2
+ - We verify that after replication DC2 -> DC1,
+ that the OU1 has the correct DN (under OU2), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject8(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1 to OU1-renamed
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 has the correct DN (OU1-renamed), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s-renamed" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou1_dn.parent())
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject9(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1 to be under OU2
+ - the OU1 is renamed on DC1 to OU1-renamed
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 has the correct DN (OU1-renamed), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s-renamed" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou1_dn.parent())
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject10(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is deleted on DC1
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 is deleted, and the description has gone away
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject11(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is deleted on DC1
+ - We verify that after replication DC2 -> DC1,
+ that the OU1 is deleted, and the description has gone away
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+
+class DrsMoveBetweenTreeOfObjectTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsMoveBetweenTreeOfObjectTestCase, self).setUp()
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ self.top_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "replica_move")
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ self.ou1_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU1")
+ self.ou1_dn.add_base(self.top_ou)
+ self.ou1 = {}
+ self.ou1["dn"] = self.ou1_dn
+ self.ou1["objectclass"] = "organizationalUnit"
+ self.ou1["ou"] = self.ou1_dn.get_component_value(0)
+
+ self.ou2_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2,OU=DrsOU1")
+ self.ou2_dn.add_base(self.top_ou)
+ self.ou2 = {}
+ self.ou2["dn"] = self.ou2_dn
+ self.ou2["objectclass"] = "organizationalUnit"
+ self.ou2["ou"] = self.ou2_dn.get_component_value(0)
+
+ self.ou2b_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2B,OU=DrsOU1")
+ self.ou2b_dn.add_base(self.top_ou)
+ self.ou2b = {}
+ self.ou2b["dn"] = self.ou2b_dn
+ self.ou2b["objectclass"] = "organizationalUnit"
+ self.ou2b["ou"] = self.ou2b_dn.get_component_value(0)
+
+ self.ou2c_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2C,OU=DrsOU1")
+ self.ou2c_dn.add_base(self.top_ou)
+
+ self.ou3_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou3_dn.add_base(self.top_ou)
+ self.ou3 = {}
+ self.ou3["dn"] = self.ou3_dn
+ self.ou3["objectclass"] = "organizationalUnit"
+ self.ou3["ou"] = self.ou3_dn.get_component_value(0)
+
+ self.ou4_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU4,OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou4_dn.add_base(self.top_ou)
+ self.ou4 = {}
+ self.ou4["dn"] = self.ou4_dn
+ self.ou4["objectclass"] = "organizationalUnit"
+ self.ou4["ou"] = self.ou4_dn.get_component_value(0)
+
+ self.ou5_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU5,OU=DrsOU4,OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou5_dn.add_base(self.top_ou)
+ self.ou5 = {}
+ self.ou5["dn"] = self.ou5_dn
+ self.ou5["objectclass"] = "organizationalUnit"
+ self.ou5["ou"] = self.ou5_dn.get_component_value(0)
+
+ self.ou6_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU6,OU=DrsOU5,OU=DrsOU4,OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou6_dn.add_base(self.top_ou)
+ self.ou6 = {}
+ self.ou6["dn"] = self.ou6_dn
+ self.ou6["objectclass"] = "organizationalUnit"
+ self.ou6["ou"] = self.ou6_dn.get_component_value(0)
+
+ def tearDown(self):
+ self.ldb_dc1.delete(self.top_ou, ["tree_delete:1"])
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsMoveBetweenTreeOfObjectTestCase, self).tearDown()
+
+ def _make_username(self):
+ return "DrsTreeU_" + time.strftime("%s", time.gmtime())
+
+ # now also used to check the group
+ def _check_obj(self, sam_ldb, obj_orig, is_deleted):
+ # search the user by guid as it may be deleted
+ guid_str = self._GUID_string(obj_orig["objectGUID"][0])
+ res = sam_ldb.search(base='<GUID=%s>' % guid_str,
+ controls=["show_deleted:1"],
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(res), 1)
+ user_cur = res[0]
+ cn_orig = str(obj_orig["cn"][0])
+ cn_cur = str(user_cur["cn"][0])
+ name_orig = str(obj_orig["name"][0])
+ name_cur = str(user_cur["name"][0])
+ dn_orig = obj_orig["dn"]
+ dn_cur = user_cur["dn"]
+ # now check properties of the user
+ if is_deleted:
+ self.assertTrue("isDeleted" in user_cur)
+ self.assertEqual(cn_cur.split('\n')[0], cn_orig)
+ self.assertEqual(name_cur.split('\n')[0], name_orig)
+ self.assertEqual(dn_cur.get_rdn_value().split('\n')[0],
+ dn_orig.get_rdn_value())
+ self.assertEqual(name_cur, cn_cur)
+ else:
+ self.assertFalse("isDeleted" in user_cur)
+ self.assertEqual(cn_cur, cn_orig)
+ self.assertEqual(name_cur, name_orig)
+ self.assertEqual(dn_cur, dn_orig)
+ self.assertEqual(name_cur, cn_cur)
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+
+ return user_cur
+
+ def test_ReplicateMoveInTree1(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou3)
+ self.ldb_dc1.add(self.ou4)
+ self.ldb_dc1.add(self.ou5)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou5_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateMoveInTree2(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ - that a rename back works correctly, and is replicated
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou2b)
+ self.ldb_dc1.add(self.ou3)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou3_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ new_dn3 = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou3_dn.get_component_value(0))
+ new_dn3.add_base(self.ou2b_dn)
+ self.ldb_dc1.rename(self.ou3_dn, new_dn3)
+
+ ldb_res = self.ldb_dc1.search(base=new_dn3,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ # Rename on DC1
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou1_dn)
+ self.ldb_dc1.rename(user_moved_dn, new_dn)
+
+ # Modify description on DC2
+ msg = ldb.Message()
+ msg.dn = user_moved_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertTrue("description" in user_cur)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=True)
+ self.assertFalse("description" in user_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=True)
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveInTree3(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ - that a rename back works correctly, and is replicated
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou2b)
+ self.ldb_dc1.add(self.ou3)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou3_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ new_dn3 = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou3_dn.get_component_value(0))
+ new_dn3.add_base(self.ou2b_dn)
+ self.ldb_dc1.rename(self.ou3_dn, new_dn3)
+
+ ldb_res = self.ldb_dc1.search(base=new_dn3,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_moved_dn, new_dn)
+
+ self.ldb_dc1.rename(self.ou2_dn, self.ou2c_dn)
+ self.ldb_dc1.rename(self.ou2b_dn, self.ou2_dn)
+ self.ldb_dc1.rename(self.ou2c_dn, self.ou2b_dn)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved_orig["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateMoveInTree3b(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ - that a rename back works correctly, and is replicated
+ - that a complex rename suffle, combined with unrelated changes to the object,
+ is replicated correctly. The aim here is the send the objects out-of-order
+ when sorted by usnChanged.
+ - confirm that the OU tree and (in particular the user DN) is identical between
+ the DCs once this has been replicated.
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou2b)
+ self.ldb_dc1.add(self.ou3)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ msg = ldb.Message()
+ msg.dn = new_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ # The sleep(1) calls here ensure that the name objects get a
+ # new 1-sec based timestamp, and so we select how the conflict
+ # resolution resolves.
+ self.ldb_dc1.rename(self.ou2_dn, self.ou2c_dn)
+ time.sleep(1)
+ self.ldb_dc1.rename(self.ou2b_dn, self.ou2_dn)
+ time.sleep(1)
+ self.ldb_dc1.rename(self.ou2c_dn, self.ou2b_dn)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]), new_dn)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2b_dn
+ msg["description"] = ldb.MessageElement("OU2b Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertEqual(user_cur["parentGUID"][0], user_moved_orig["parentGUID"][0])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateMoveInTree4(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - an OU and user can be replicated correctly, even after a rename
+ - The creation and rename of the OU has been combined with unrelated changes to the object,
+ The aim here is the send the objects out-of-order when sorted by usnChanged.
+ - That is, the OU will be sorted by usnChanged after the user that is within that OU.
+ - That will cause the client to need to get the OU first, by use of the GET_ANC flag
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInOU(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - an OU and user can be replicated correctly
+ - The creation of the OU has been combined with unrelated changes to the object,
+ The aim here is the send the objects out-of-order when sorted by usnChanged.
+ - That is, the OU will be sorted by usnChanged after the user that is within that OU.
+ - That will cause the client to need to get the OU first, by use of the GET_ANC flag
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+
+ msg = ldb.Message()
+ msg.dn = self.ou1_dn
+ msg["description"] = ldb.MessageElement("OU1 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_orig["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInMovedOU(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - an OU and user can be replicated correctly
+ - The creation of the OU has been combined with unrelated changes to the object,
+ The aim here is the send the objects out-of-order when sorted by usnChanged.
+ - That is, the OU will be sorted by usnChanged after the user that is within that OU.
+ - That will cause the client to need to get the OU first, by use of the GET_ANC flag
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+ self.ldb_dc1.add(self.ou2)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ self.ldb_dc1.rename(self.ou2_dn, self.ou2b_dn)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInConflictOU_time(self):
+ """Verifies how an object is replicated between two DCs, when created in an ambiguous location
+ This test should verify that:
+ - Without replication, two conflicting objects can be created
+ - force the conflict resolution algorithm so we know which copy will win
+ (by sleeping while creating the objects, therefore increasing that timestamp on 'name')
+ - confirm that the user object, created on DC1, ends up in the right place on DC2
+ - therefore confirm that the conflict algorithm worked correctly, and that parentGUID was used.
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Now create two, conflicting objects. This gives the user
+ # object something to be under on both DCs.
+
+ # We sleep between the two adds so that DC1 adds second, and
+ # so wins the conflict resolution due to a later creation time
+ # (modification timestamp on the name attribute).
+ self.ldb_dc2.add(self.ou2)
+ time.sleep(1)
+ self.ldb_dc1.add(self.ou2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ # Now that we have renamed the user (and so bumped the
+ # usnChanged), bump the value on the OUs.
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be under the OU2 from DC1
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInConflictOU2(self):
+ """Verifies how an object is replicated between two DCs, when created in an ambiguous location
+ This test should verify that:
+ - Without replication, two conflicting objects can be created
+ - force the conflict resolution algorithm so we know which copy will win
+ (by changing the description twice, therefore increasing that version count)
+ - confirm that the user object, created on DC1, ends up in the right place on DC2
+ - therefore confirm that the conflict algorithm worked correctly, and that parentGUID was used.
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Now create two, conflicting objects. This gives the user
+ # object something to be under on both DCs. We create it on
+ # DC1 1sec later so that it will win the conflict resolution.
+
+ self.ldb_dc2.add(self.ou2)
+ time.sleep(1)
+ self.ldb_dc1.add(self.ou2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ # Now that we have renamed the user (and so bumped the
+ # usnChanged), bump the value on the OUs.
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved = ldb_res[0]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be under the OU2 from DC1
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
diff --git a/source4/torture/drs/python/repl_rodc.py b/source4/torture/drs/python/repl_rodc.py
new file mode 100644
index 0000000..ab3d6fa
--- /dev/null
+++ b/source4/torture/drs/python/repl_rodc.py
@@ -0,0 +1,735 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Test replication scenarios involving an RODC
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc1_dns_name [this is unused for the test, but it'll still try to connect]
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN repl_rodc -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import ldb
+
+from samba import WERRORError
+from samba.join import DCJoinContext
+from samba.dcerpc import drsuapi, misc, drsblobs, security
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.samdb import dsdb_Dn
+from samba.credentials import Credentials
+
+import random
+import time
+
+
+def drs_get_rodc_partial_attribute_set(samdb, samdb1, exceptions=None):
+ '''get a list of attributes for RODC replication'''
+ if exceptions is None:
+ exceptions = []
+
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+ partial_attribute_set.version = 1
+
+ attids = []
+
+ # the exact list of attids we send is quite critical. Note that
+ # we do ask for the secret attributes, but set SPECIAL_SECRET_PROCESSING
+ # to zero them out
+ schema_dn = samdb.get_schema_basedn()
+ res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="objectClass=attributeSchema",
+ attrs=["lDAPDisplayName", "systemFlags",
+ "searchFlags"])
+
+ for r in res:
+ ldap_display_name = str(r["lDAPDisplayName"][0])
+ if "systemFlags" in r:
+ system_flags = str(r["systemFlags"][0])
+ if (int(system_flags) & (samba.dsdb.DS_FLAG_ATTR_NOT_REPLICATED |
+ samba.dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED)):
+ continue
+ if "searchFlags" in r:
+ search_flags = str(r["searchFlags"][0])
+ if (int(search_flags) & samba.dsdb.SEARCH_FLAG_RODC_ATTRIBUTE):
+ continue
+ try:
+ attid = samdb1.get_attid_from_lDAPDisplayName(ldap_display_name)
+ if attid not in exceptions:
+ attids.append(int(attid))
+ except:
+ pass
+
+ # the attids do need to be sorted, or windows doesn't return
+ # all the attributes we need
+ attids.sort()
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+ return partial_attribute_set
+
+
+class DrsRodcTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a semi-black box test case for replication involving
+ an RODC."""
+
+ def setUp(self):
+ super(DrsRodcTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+
+ self.ou = samba.tests.create_test_ou(self.ldb_dc1, "test_drs_rodc")
+ self.allowed_group = "CN=Allowed RODC Password Replication Group,CN=Users,%s" % self.base_dn
+
+ self.site = self.ldb_dc1.server_site_name()
+ self.rodc_name = "TESTRODCDRS%s" % random.randint(1, 10000000)
+ self.rodc_pass = "password12#"
+ self.computer_dn = "CN=%s,OU=Domain Controllers,%s" % (self.rodc_name, self.base_dn)
+
+ self.rodc_ctx = DCJoinContext(server=self.ldb_dc1.host_dns_name(),
+ creds=self.get_credentials(),
+ lp=self.get_loadparm(), site=self.site,
+ netbios_name=self.rodc_name,
+ targetdir=None, domain=None,
+ machinepass=self.rodc_pass)
+ self._create_rodc(self.rodc_ctx)
+ self.rodc_ctx.create_tmp_samdb()
+ self.tmp_samdb = self.rodc_ctx.tmp_samdb
+
+ rodc_creds = Credentials()
+ rodc_creds.guess(self.rodc_ctx.lp)
+ rodc_creds.set_username(self.rodc_name + '$')
+ rodc_creds.set_password(self.rodc_pass)
+ self.rodc_creds = rodc_creds
+
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+ (self.rodc_drs, self.rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, rodc_creds)
+
+ def tearDown(self):
+ self.rodc_ctx.cleanup_old_join()
+ super(DrsRodcTestCase, self).tearDown()
+
+ def test_admin_repl_secrets(self):
+ """
+ When a secret attribute is set to be replicated to an RODC with the
+ admin credentials, it should always replicate regardless of whether
+ or not it's in the Allowed RODC Password Replication Group.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcA_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_admin_repl_secrets_DummyDN_GUID(self):
+ """
+ When a secret attribute is set to be replicated to an RODC with the
+ admin credentials, it should always replicate regardless of whether
+ or not it's in the Allowed RODC Password Replication Group.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcA_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ res = self.ldb_dc1.search(base=user_dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ user_guid = misc.GUID(res[0]["objectGUID"][0])
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str="DummyDN",
+ nc_guid=user_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ try:
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"DsGetNCChanges failed with {estr}")
+
+ # Check that the user has been added to msDSRevealedUsers
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_rodc_repl_secrets(self):
+ """
+ When a secret attribute is set to be replicated to an RODC with
+ the RODC account credentials, it should not replicate if it's in
+ the Allowed RODC Password Replication Group. Once it is added to
+ the group, it should replicate.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcB_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e:
+ (enum, estr) = e.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ # send the same request again and we should get the same response
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ # Retry with Administrator credentials, ignores password replication groups
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_rodc_repl_secrets_follow_on_req(self):
+ """
+ Checks that an RODC can't subvert an existing (valid) GetNCChanges
+ request to reveal secrets it shouldn't have access to.
+ """
+
+ # send an acceptable request that will match as many GUIDs as possible.
+ # Here we set the SPECIAL_SECRET_PROCESSING flag so that the request gets accepted.
+ # (On the server, this builds up the getnc_state->guids array)
+ req8 = self._exop_req8(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=self.ldb_dc1.domain_dn(),
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ max_objects=1,
+ replica_flags=drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING)
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 8, req8)
+
+ # Get the next replication chunk, but set REPL_SECRET this time. This
+ # is following on the the previous accepted request, but we've changed
+ # exop to now request secrets. This request should fail
+ try:
+ req8 = self._exop_req8(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=self.ldb_dc1.domain_dn(),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET)
+ req8.highwatermark = ctr.new_highwatermark
+
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 8, req8)
+
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except RuntimeError as e2:
+ (enum, estr) = e2.args
+ pass
+
+ def test_msDSRevealedUsers_admin(self):
+ """
+ When a secret attribute is to be replicated to an RODC, the contents
+ of the attribute should be added to the msDSRevealedUsers attribute
+ of the computer object corresponding to the RODC.
+ """
+
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcC_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ (packed_attrs_1, unpacked_attrs_1) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ # Change the user's password on DC1
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password + "1", False, user_name)
+
+ (packed_attrs_2, unpacked_attrs_2) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_equals(unpacked_attrs_1, unpacked_attrs_2)
+
+ # Replicate to RODC again with EXOP_REPL_SECRETS
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # This is important for Windows, because the entry won't have been
+ # updated in time if we don't have it. Even with this sleep, it only
+ # passes some of the time...
+ time.sleep(5)
+
+ # Check that the entry in msDSRevealedUsers has been updated
+ (packed_attrs_3, unpacked_attrs_3) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_changed(unpacked_attrs_2, unpacked_attrs_3, expected_user_attributes)
+
+ # We should be able to delete the user
+ self.ldb_dc1.deleteuser(user_name)
+
+ res = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=self.computer_dn,
+ attrs=["msDS-RevealedUsers"])
+ self.assertFalse("msDS-RevealedUsers" in res[0])
+
+ def test_msDSRevealedUsers(self):
+ """
+ When a secret attribute is to be replicated to an RODC, the contents
+ of the attribute should be added to the msDSRevealedUsers attribute
+ of the computer object corresponding to the RODC.
+ """
+
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcD_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ (packed_attrs_1, unpacked_attrs_1) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ # Change the user's password on DC1
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password + "1", False, user_name)
+
+ (packed_attrs_2, unpacked_attrs_2) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_equals(unpacked_attrs_1, unpacked_attrs_2)
+
+ # Replicate to RODC again with EXOP_REPL_SECRETS
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+
+ # This is important for Windows, because the entry won't have been
+ # updated in time if we don't have it. Even with this sleep, it only
+ # passes some of the time...
+ time.sleep(5)
+
+ # Check that the entry in msDSRevealedUsers has been updated
+ (packed_attrs_3, unpacked_attrs_3) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_changed(unpacked_attrs_2, unpacked_attrs_3, expected_user_attributes)
+
+ # We should be able to delete the user
+ self.ldb_dc1.deleteuser(user_name)
+
+ res = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=self.computer_dn,
+ attrs=["msDS-RevealedUsers"])
+ self.assertFalse("msDS-RevealedUsers" in res[0])
+
+ def test_msDSRevealedUsers_pas(self):
+ """
+ If we provide a Partial Attribute Set when replicating to an RODC,
+ we should ignore it and replicate all of the secret attributes anyway
+ msDSRevealedUsers attribute.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+ pas_exceptions = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcE_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ pas = drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb, exceptions=pas_exceptions)
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=pas,
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Make sure that we still replicate the secrets
+ for attribute in ctr.first_object.object.attribute_ctr.attributes:
+ if attribute.attid in pas_exceptions:
+ pas_exceptions.remove(attribute.attid)
+ for attribute in pas_exceptions:
+ self.fail("%d was not replicated even though the partial attribute set should be ignored."
+ % attribute)
+
+ # Check that the user has been added to msDSRevealedUsers
+ (packed_attrs_1, unpacked_attrs_1) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_msDSRevealedUsers_using_other_RODC(self):
+ """
+ Ensure that the machine account is tied to the destination DSA.
+ """
+ # Create a new identical RODC with just the first letter missing
+ other_rodc_name = self.rodc_name[1:]
+ other_rodc_ctx = DCJoinContext(server=self.ldb_dc1.host_dns_name(),
+ creds=self.get_credentials(),
+ lp=self.get_loadparm(), site=self.site,
+ netbios_name=other_rodc_name,
+ targetdir=None, domain=None,
+ machinepass=self.rodc_pass)
+ self._create_rodc(other_rodc_ctx)
+
+ other_rodc_creds = Credentials()
+ other_rodc_creds.guess(other_rodc_ctx.lp)
+ other_rodc_creds.set_username(other_rodc_name + '$')
+ other_rodc_creds.set_password(self.rodc_pass)
+
+ (other_rodc_drs, other_rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, other_rodc_creds)
+
+ rand = random.randint(1, 10000000)
+
+ user_name = "test_rodcF_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ req10 = self._getnc_req10(dest_dsa=str(other_rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e3:
+ (enum, estr) = e3.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ try:
+ (level, ctr) = other_rodc_drs.DsGetNCChanges(other_rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e4:
+ (enum, estr) = e4.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ def test_msDSRevealedUsers_local_deny_allow(self):
+ """
+ Ensure that the deny trumps allow, and we can modify these
+ attributes directly instead of the global groups.
+
+ This may fail on Windows due to tokenGroup calculation caching.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcF_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.computer_dn)
+
+ m["msDS-RevealOnDemandGroup"] = \
+ ldb.MessageElement(user_dn, ldb.FLAG_MOD_ADD,
+ "msDS-RevealOnDemandGroup")
+ self.ldb_dc1.modify(m)
+
+ # In local allow, should be success
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ except:
+ self.fail("Should have succeeded when in local allow group")
+
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ (self.rodc_drs, self.rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, self.rodc_creds)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.computer_dn)
+
+ m["msDS-NeverRevealGroup"] = \
+ ldb.MessageElement(user_dn, ldb.FLAG_MOD_ADD,
+ "msDS-NeverRevealGroup")
+ self.ldb_dc1.modify(m)
+
+ # In local allow and deny, should be failure
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e5:
+ (enum, estr) = e5.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.computer_dn)
+
+ m["msDS-RevealOnDemandGroup"] = \
+ ldb.MessageElement(user_dn, ldb.FLAG_MOD_DELETE,
+ "msDS-RevealOnDemandGroup")
+ self.ldb_dc1.modify(m)
+
+ # In local deny, should be failure
+ (self.rodc_drs, self.rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, self.rodc_creds)
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e6:
+ (enum, estr) = e6.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ def _assert_in_revealed_users(self, user_dn, attrlist):
+ res = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=self.computer_dn,
+ attrs=["msDS-RevealedUsers"])
+ revealed_users = res[0]["msDS-RevealedUsers"]
+ actual_attrids = []
+ packed_attrs = []
+ unpacked_attrs = []
+ for attribute in revealed_users:
+ attribute = attribute.decode('utf8')
+ dsdb_dn = dsdb_Dn(self.ldb_dc1, attribute)
+ metadata = ndr_unpack(drsblobs.replPropertyMetaData1, dsdb_dn.get_bytes())
+ if user_dn in attribute:
+ unpacked_attrs.append(metadata)
+ packed_attrs.append(dsdb_dn.get_bytes())
+ actual_attrids.append(metadata.attid)
+
+ self.assertEqual(sorted(actual_attrids), sorted(attrlist))
+
+ return (packed_attrs, unpacked_attrs)
+
+ def _assert_attrlist_equals(self, list_1, list_2):
+ return self._assert_attrlist_changed(list_1, list_2, [], num_changes=0, expected_new_usn=False)
+
+ def _assert_attrlist_changed(self, list_1, list_2, changed_attributes, num_changes=1, expected_new_usn=True):
+ for i in range(len(list_2)):
+ self.assertEqual(list_1[i].attid, list_2[i].attid)
+ self.assertEqual(list_1[i].originating_invocation_id, list_2[i].originating_invocation_id)
+ self.assertEqual(list_1[i].version + num_changes, list_2[i].version)
+
+ if expected_new_usn:
+ self.assertTrue(list_1[i].originating_usn < list_2[i].originating_usn)
+ self.assertTrue(list_1[i].local_usn < list_2[i].local_usn)
+ else:
+ self.assertEqual(list_1[i].originating_usn, list_2[i].originating_usn)
+ self.assertEqual(list_1[i].local_usn, list_2[i].local_usn)
+
+ if list_1[i].attid in changed_attributes:
+ # We do the changes too quickly, so unless we put sleeps
+ # in between calls, these remain the same. Checking the USNs
+ # is enough.
+ pass
+ #self.assertTrue(list_1[i].originating_change_time < list_2[i].originating_change_time)
+ else:
+ self.assertEqual(list_1[i].originating_change_time, list_2[i].originating_change_time)
+
+ def _create_rodc(self, ctx):
+ ctx.nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+ ctx.full_nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+ ctx.krbtgt_dn = "CN=krbtgt_%s,CN=Users,%s" % (ctx.myname, ctx.base_dn)
+
+ ctx.never_reveal_sid = ["<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_DENY),
+ "<SID=%s>" % security.SID_BUILTIN_ADMINISTRATORS,
+ "<SID=%s>" % security.SID_BUILTIN_SERVER_OPERATORS,
+ "<SID=%s>" % security.SID_BUILTIN_BACKUP_OPERATORS,
+ "<SID=%s>" % security.SID_BUILTIN_ACCOUNT_OPERATORS]
+ ctx.reveal_sid = "<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_ALLOW)
+
+ mysid = ctx.get_mysid()
+ admin_dn = "<SID=%s>" % mysid
+ ctx.managedby = admin_dn
+
+ ctx.userAccountControl = (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT |
+ samba.dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION |
+ samba.dsdb.UF_PARTIAL_SECRETS_ACCOUNT)
+
+ ctx.connection_dn = "CN=RODC Connection (FRS),%s" % ctx.ntds_dn
+ ctx.secure_channel_type = misc.SEC_CHAN_RODC
+ ctx.RODC = True
+ ctx.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
+ drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING)
+
+ ctx.join_add_objects()
diff --git a/source4/torture/drs/python/repl_schema.py b/source4/torture/drs/python/repl_schema.py
new file mode 100644
index 0000000..9c039a5
--- /dev/null
+++ b/source4/torture/drs/python/repl_schema.py
@@ -0,0 +1,444 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN repl_schema -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import time
+import random
+import ldb
+import drs_base
+
+from ldb import (
+ ERR_NO_SUCH_OBJECT,
+ LdbError,
+ SCOPE_BASE,
+ Message,
+ FLAG_MOD_ADD,
+ FLAG_MOD_REPLACE
+)
+from samba.dcerpc import drsuapi, misc
+from samba.drs_utils import drs_DsBind
+from samba import dsdb
+
+
+class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
+
+ # prefix for all objects created
+ obj_prefix = None
+ # current Class or Attribute object id
+ obj_id = 0
+
+ def _exop_req8(self, dest_dsa, invocation_id, nc_dn_str, exop,
+ replica_flags=0, max_objects=0):
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ req8.destination_dsa_guid = misc.GUID(dest_dsa) if dest_dsa else misc.GUID()
+ req8.source_dsa_invocation_id = misc.GUID(invocation_id)
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = str(nc_dn_str)
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = replica_flags
+ req8.max_object_count = max_objects
+ req8.max_ndr_size = 402116
+ req8.extended_op = exop
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ return req8
+
+ def setUp(self):
+ super(DrsReplSchemaTestCase, self).setUp()
+
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # initialize objects prefix if not done yet
+ if self.obj_prefix is None:
+ t = time.strftime("%s", time.gmtime())
+ DrsReplSchemaTestCase.obj_prefix = "DrsReplSchema-%s" % t
+
+ def tearDown(self):
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsReplSchemaTestCase, self).tearDown()
+
+ def _make_obj_names(self, base_name):
+ '''Try to create a unique name for an object
+ that is to be added to schema'''
+ self.obj_id += 1
+ obj_name = "%s-%d-%s" % (self.obj_prefix, self.obj_id, base_name)
+ obj_ldn = obj_name.replace("-", "")
+ obj_dn = ldb.Dn(self.ldb_dc1, "CN=X")
+ obj_dn.add_base(ldb.Dn(self.ldb_dc1, self.schema_dn))
+ obj_dn.set_component(0, "CN", obj_name)
+ return (obj_dn, obj_name, obj_ldn)
+
+ def _schema_new_class(self, ldb_ctx, base_name, base_int, oc_cat=1, attrs=None):
+ (class_dn, class_name, class_ldn) = self._make_obj_names(base_name)
+ rec = {"dn": class_dn,
+ "objectClass": ["top", "classSchema"],
+ "cn": class_name,
+ "lDAPDisplayName": class_ldn,
+ "governsId": "1.3.6.1.4.1.7165.4.6.2.5."
+ + str((100000 * base_int) + random.randint(1, 100000)) + ".1.5.13",
+ "instanceType": "4",
+ "objectClassCategory": "%d" % oc_cat,
+ "subClassOf": "top",
+ "systemOnly": "FALSE"}
+ # allow overriding/adding attributes
+ if attrs is not None:
+ rec.update(attrs)
+ # add it to the Schema
+ try:
+ ldb_ctx.add(rec)
+ except LdbError as e:
+ (enum, estr) = e.args
+ self.fail("Adding record failed with %d/%s" % (enum, estr))
+
+ self._ldap_schemaUpdateNow(ldb_ctx)
+ return (rec["lDAPDisplayName"], rec["dn"])
+
+ def _schema_new_attr(self, ldb_ctx, base_name, base_int, attrs=None):
+ (attr_dn, attr_name, attr_ldn) = self._make_obj_names(base_name)
+ rec = {"dn": attr_dn,
+ "objectClass": ["top", "attributeSchema"],
+ "cn": attr_name,
+ "lDAPDisplayName": attr_ldn,
+ "attributeId": "1.3.6.1.4.1.7165.4.6.1.5."
+ + str((100000 * base_int) + random.randint(1, 100000)) + ".1.5.13",
+ "attributeSyntax": "2.5.5.12",
+ "omSyntax": "64",
+ "instanceType": "4",
+ "isSingleValued": "TRUE",
+ "systemOnly": "FALSE"}
+ # allow overriding/adding attributes
+ if attrs is not None:
+ rec.update(attrs)
+ # add it to the Schema
+ ldb_ctx.add(rec)
+ self._ldap_schemaUpdateNow(ldb_ctx)
+ return (rec["lDAPDisplayName"], rec["dn"])
+
+ def _check_object(self, obj_dn):
+ '''Check if object obj_dn exists on both DCs'''
+ res_dc1 = self.ldb_dc1.search(base=obj_dn,
+ scope=SCOPE_BASE,
+ attrs=["*"])
+ self.assertEqual(len(res_dc1), 1,
+ "%s doesn't exists on %s" % (obj_dn, self.dnsname_dc1))
+ try:
+ res_dc2 = self.ldb_dc2.search(base=obj_dn,
+ scope=SCOPE_BASE,
+ attrs=["*"])
+ except LdbError as e1:
+ (enum, estr) = e1.args
+ if enum == ERR_NO_SUCH_OBJECT:
+ self.fail("%s doesn't exists on %s" % (obj_dn, self.dnsname_dc2))
+ raise
+ self.assertEqual(len(res_dc2), 1,
+ "%s doesn't exists on %s" % (obj_dn, self.dnsname_dc2))
+
+ def test_class(self):
+ """Simple test for classSchema replication"""
+ # add new classSchema object
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-S", 0)
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check object is replicated
+ self._check_object(c_dn)
+
+ def test_classInheritance(self):
+ """Test inheritance through subClassOf
+ I think 5 levels of inheritance is pretty decent for now."""
+ # add 5 levels deep hierarchy
+ c_dn_list = []
+ c_ldn_last = None
+ for i in range(1, 6):
+ base_name = "cls-I-%02d" % i
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, base_name, i)
+ c_dn_list.append(c_dn)
+ if c_ldn_last:
+ # inherit from last class added
+ m = Message.from_dict(self.ldb_dc1,
+ {"dn": c_dn,
+ "subClassOf": c_ldn_last},
+ FLAG_MOD_REPLACE)
+ self.ldb_dc1.modify(m)
+ # store last class ldapDisplayName
+ c_ldn_last = c_ldn
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check objects are replicated
+ for c_dn in c_dn_list:
+ self._check_object(c_dn)
+
+ def test_classWithCustomAttribute(self):
+ """Create new Attribute and a Class,
+ that has value for newly created attribute.
+ This should check code path that searches for
+ AttributeID_id in Schema cache"""
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-A", 7)
+ # add a base classSchema class so we can use our new
+ # attribute in class definition in a sibling class
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-A", 8,
+ 1,
+ {"systemMayContain": a_ldn,
+ "subClassOf": "classSchema"})
+ # add new classSchema object with value for a_ldb attribute
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-B", 9,
+ 1,
+ {"objectClass": ["top", "classSchema", c_ldn],
+ a_ldn: "test_classWithCustomAttribute"})
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+
+ def test_classWithCustomLinkAttribute(self):
+ """Create new Attribute and a Class,
+ that has value for newly created attribute.
+ This should check code path that searches for
+ AttributeID_id in Schema cache"""
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-Link-X", 10,
+ attrs={'linkID': "1.2.840.113556.1.2.50",
+ "attributeSyntax": "2.5.5.1",
+ "omSyntax": "127"})
+ # add a base classSchema class so we can use our new
+ # attribute in class definition in a sibling class
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-Link-Y", 11,
+ 1,
+ {"systemMayContain": a_ldn,
+ "subClassOf": "classSchema"})
+ # add new classSchema object with value for a_ldb attribute
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-Link-Z", 12,
+ 1,
+ {"objectClass": ["top", "classSchema", c_ldn],
+ a_ldn: self.schema_dn})
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+
+ res = self.ldb_dc1.search(base="",
+ scope=SCOPE_BASE,
+ attrs=["domainFunctionality"])
+
+ if int(res[0]["domainFunctionality"][0]) > dsdb.DS_DOMAIN_FUNCTION_2000:
+ res = self.ldb_dc1.search(base=a_dn,
+ scope=SCOPE_BASE,
+ attrs=["msDS-IntId"])
+ self.assertEqual(1, len(res))
+ self.assertTrue("msDS-IntId" in res[0])
+ int_id = int(res[0]["msDS-IntId"][0])
+ if int_id < 0:
+ int_id += (1 << 32)
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=c_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_SYNC_FORCED)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ for link in ctr.linked_attributes:
+ self.assertTrue(link.attid != int_id,
+ 'Got %d for both' % link.attid)
+
+ def test_attribute(self):
+ """Simple test for attributeSchema replication"""
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-S", 13)
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check object is replicated
+ self._check_object(a_dn)
+
+ def test_attribute_on_ou(self):
+ """Simple test having an OU with a custom attribute replicated correctly
+
+ This ensures that the server
+ """
+
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-OU-S", 14)
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-OU-A", 15,
+ 3,
+ {"mayContain": a_ldn})
+ ou_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+ ou_dn.add_base(self.ldb_dc1.get_default_basedn())
+ ou_dn.set_component(0, "OU", a_dn.get_component_value(0))
+ rec = {"dn": ou_dn,
+ "objectClass": ["top", "organizationalUnit", c_ldn],
+ "ou": ou_dn.get_component_value(0),
+ a_ldn: "test OU"}
+ self.ldb_dc1.add(rec)
+
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.domain_dn, forced=True)
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+ self._check_object(ou_dn)
+ self.ldb_dc1.delete(ou_dn)
+
+ def test_all(self):
+ """Basic plan is to create bunch of classSchema
+ and attributeSchema objects, replicate Schema NC
+ and then check all objects are replicated correctly"""
+
+ # add new classSchema object
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-A", 16)
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-A", 17)
+
+ # add attribute to the class we have
+ m = Message.from_dict(self.ldb_dc1,
+ {"dn": c_dn,
+ "mayContain": a_ldn},
+ FLAG_MOD_ADD)
+ self.ldb_dc1.modify(m)
+
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+
+ def test_classWithCustomBinaryDNLinkAttribute(self):
+ # Add a new attribute to the schema, which has binary DN syntax (2.5.5.7)
+ (bin_ldn, bin_dn) = self._schema_new_attr(self.ldb_dc1, "attr-Link-Bin", 18,
+ attrs={"linkID": "1.2.840.113556.1.2.50",
+ "attributeSyntax": "2.5.5.7",
+ "omSyntax": "127"})
+
+ (bin_ldn_b, bin_dn_b) = self._schema_new_attr(self.ldb_dc1, "attr-Link-Bin-Back", 19,
+ attrs={"linkID": bin_ldn,
+ "attributeSyntax": "2.5.5.1",
+ "omSyntax": "127"})
+
+ # Add a new class to the schema which can have the binary DN attribute
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-Link-Bin", 20,
+ 3,
+ {"mayContain": bin_ldn})
+ (c_ldn_b, c_dn_b) = self._schema_new_class(self.ldb_dc1, "cls-Link-Bin-Back", 21,
+ 3,
+ {"mayContain": bin_ldn_b})
+
+ link_end_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+ link_end_dn.add_base(self.ldb_dc1.get_default_basedn())
+ link_end_dn.set_component(0, "OU", bin_dn_b.get_component_value(0))
+
+ ou_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+ ou_dn.add_base(self.ldb_dc1.get_default_basedn())
+ ou_dn.set_component(0, "OU", bin_dn.get_component_value(0))
+
+ # Add an instance of the class to be pointed at
+ rec = {"dn": link_end_dn,
+ "objectClass": ["top", "organizationalUnit", c_ldn_b],
+ "ou": link_end_dn.get_component_value(0)}
+ self.ldb_dc1.add(rec)
+
+ # .. and one that does, and points to the first one
+ rec = {"dn": ou_dn,
+ "objectClass": ["top", "organizationalUnit", c_ldn],
+ "ou": ou_dn.get_component_value(0)}
+ self.ldb_dc1.add(rec)
+
+ m = Message.from_dict(self.ldb_dc1,
+ {"dn": ou_dn,
+ bin_ldn: "B:8:1234ABCD:%s" % str(link_end_dn)},
+ FLAG_MOD_ADD)
+ self.ldb_dc1.modify(m)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.domain_dn, forced=True)
+
+ self._check_object(c_dn)
+ self._check_object(bin_dn)
+
+ # Make sure we can delete the backlink
+ self.ldb_dc1.delete(link_end_dn)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.domain_dn, forced=True)
+
+ def test_rename(self):
+ """Basic plan is to create a classSchema
+ and attributeSchema objects, replicate Schema NC
+ and then check all objects are replicated correctly"""
+
+ # add new classSchema object
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-B", 20)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+
+ # check objects are replicated
+ self._check_object(c_dn)
+
+ # rename the Class CN
+ c_dn_new = ldb.Dn(self.ldb_dc1, str(c_dn))
+ c_dn_new.set_component(0,
+ "CN",
+ c_dn.get_component_value(0) + "-NEW")
+ try:
+ self.ldb_dc1.rename(c_dn, c_dn_new)
+ except LdbError as e2:
+ (num, _) = e2.args
+ self.fail("failed to change CN for %s: %s" % (c_dn, _))
+
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+
+ # check objects are replicated
+ self._check_object(c_dn_new)
diff --git a/source4/torture/drs/python/repl_secdesc.py b/source4/torture/drs/python/repl_secdesc.py
new file mode 100644
index 0000000..38ae25a
--- /dev/null
+++ b/source4/torture/drs/python/repl_secdesc.py
@@ -0,0 +1,400 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd. 2017
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import drs_base
+import ldb
+import samba
+from samba import sd_utils
+from ldb import LdbError
+
+class ReplAclTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(ReplAclTestCase, self).setUp()
+ self.mod = "(A;CIOI;GA;;;SY)"
+ self.mod_becomes = "(A;OICIIO;GA;;;SY)"
+ self.mod_inherits_as = "(A;OICIIOID;GA;;;SY)"
+
+ self.sd_utils_dc1 = sd_utils.SDUtils(self.ldb_dc1)
+ self.sd_utils_dc2 = sd_utils.SDUtils(self.ldb_dc2)
+
+ self.ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "test_acl_inherit")
+
+ # disable replication for the tests so we can control at what point
+ # the DCs try to replicate
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ def tearDown(self):
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+
+ # re-enable replication
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+
+ super(ReplAclTestCase, self).tearDown()
+
+ def test_acl_inheirt_new_object_1_pass(self):
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inheirt_new_object(self):
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inherit_existing_object(self):
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm it is now replicated
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=[])
+
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inheirt_existing_object_1_pass(self):
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inheirt_renamed_object(self):
+ # Make a new object
+ new_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "acl_test_l2")
+
+ sub_ou_dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm it is now replicated
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+
+ # Set the inherited ACL on the parent OU on DC1
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Rename to under self.ou
+
+ self.ldb_dc1.rename(new_ou, sub_ou_dn)
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm inherited ACLs are identical and were inherited
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn),
+ self.sd_utils_dc2.get_sd_as_sddl(sub_ou_dn))
+
+
+ def test_acl_inheirt_renamed_child_object(self):
+ # Make a new OU
+ new_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "acl_test_l2")
+
+ # Here is where the new OU will end up at the end.
+ sub2_ou_dn_final = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+
+ sub3_ou_dn = ldb.Dn(self.ldb_dc1, "OU=l3,%s" % new_ou)
+ sub3_ou_dn_final = ldb.Dn(self.ldb_dc1, "OU=l3,%s" % sub2_ou_dn_final)
+
+ self.ldb_dc1.add({"dn": sub3_ou_dn,
+ "objectclass": "organizationalUnit"})
+
+ sub4_ou_dn = ldb.Dn(self.ldb_dc1, "OU=l4,%s" % sub3_ou_dn)
+ sub4_ou_dn_final = ldb.Dn(self.ldb_dc1, "OU=l4,%s" % sub3_ou_dn_final)
+
+ self.ldb_dc1.add({"dn": sub4_ou_dn,
+ "objectclass": "organizationalUnit"})
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm it is now replicated
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+
+ #
+ # Given a tree new_ou -> l3 -> l4
+ #
+
+ # Set the inherited ACL on the grandchild OU (l3) on DC1
+ self.sd_utils_dc1.dacl_add_ace(sub3_ou_dn, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(sub3_ou_dn))
+
+ # Rename new_ou (l2) to under self.ou (this must happen second). If the
+ # inheritance between l3 and l4 is name-based, this could
+ # break.
+
+ # The tree is now self.ou -> l2 -> l3 -> l4
+
+ self.ldb_dc1.rename(new_ou, sub2_ou_dn_final)
+
+ # Assert ACL set remained as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(sub3_ou_dn_final))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm set ACLs (on l3 ) are identical and were inherited
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(sub3_ou_dn_final))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub3_ou_dn_final),
+ self.sd_utils_dc2.get_sd_as_sddl(sub3_ou_dn_final))
+
+ # Confirm inherited ACLs (from l3 to l4) are identical
+ # and were inherited
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub4_ou_dn_final))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub4_ou_dn_final),
+ self.sd_utils_dc2.get_sd_as_sddl(sub4_ou_dn_final))
+
+
+ def test_acl_inheirt_renamed_object_in_conflict(self):
+ # Make a new object to be renamed under self.ou
+ new_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "acl_test_l2")
+
+ # Make a new OU under self.ou (on DC2)
+ sub_ou_dn = ldb.Dn(self.ldb_dc2, "OU=l2,%s" % self.ou)
+ self.ldb_dc2.add({"dn": sub_ou_dn,
+ "objectclass": "organizationalUnit"})
+
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Rename to under self.ou
+ self.ldb_dc1.rename(new_ou, sub_ou_dn)
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn))
+
+ # Replicate to DC2 (will cause a conflict, DC1 to win, version
+ # is higher since named twice)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ children = self.ldb_dc2.search(scope=ldb.SCOPE_ONELEVEL,
+ base=self.ou,
+ attrs=[])
+ for child in children:
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc2.get_sd_as_sddl(child.dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn),
+ self.sd_utils_dc2.get_sd_as_sddl(child.dn))
+
+ # Replicate back
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2,
+ forced=True)
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn))
+
+ for child in children:
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(child.dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(child.dn),
+ self.sd_utils_dc2.get_sd_as_sddl(child.dn))
diff --git a/source4/torture/drs/python/replica_sync.py b/source4/torture/drs/python/replica_sync.py
new file mode 100644
index 0000000..f40b16d
--- /dev/null
+++ b/source4/torture/drs/python/replica_sync.py
@@ -0,0 +1,747 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import time
+import ldb
+
+from ldb import (
+ SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT)
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a black box test case for DsReplicaSync
+ implementation. It should test the behavior of this
+ case in cases when inbound replication is disabled"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+
+ # This OU avoids this test conflicting with anything
+ # that may already be in the DB
+ self.top_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "replica_sync")
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ self.ou1 = None
+ self.ou2 = None
+
+ def tearDown(self):
+ self._cleanup_object(self.ou1)
+ self._cleanup_object(self.ou2)
+ self._cleanup_dn(self.top_ou)
+
+ # re-enable replication
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _cleanup_dn(self, dn):
+ try:
+ self.ldb_dc2.delete(dn, ["tree_delete:1"])
+ except LdbError as e:
+ (num, _) = e.args
+ self.assertEqual(num, ERR_NO_SUCH_OBJECT)
+ try:
+ self.ldb_dc1.delete(dn, ["tree_delete:1"])
+ except LdbError as e1:
+ (num, _) = e1.args
+ self.assertEqual(num, ERR_NO_SUCH_OBJECT)
+
+ def _cleanup_object(self, guid):
+ """Cleans up a test object, if it still exists"""
+ if guid is not None:
+ self._cleanup_dn('<GUID=%s>' % guid)
+
+ def test_ReplEnabled(self):
+ """Tests we can replicate when replication is enabled"""
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False)
+
+ def test_ReplDisabled(self):
+ """Tests we can't replicate when replication is disabled"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+
+ ccache_name = self.get_creds_ccache_name()
+
+ # Tunnel the command line credentials down to the
+ # subcommand to avoid a new kinit
+ cmdline_auth = "--use-krb5-ccache=%s" % ccache_name
+
+ # bin/samba-tool drs <drs_command> <cmdline_auth>
+ cmd_list = ["drs", "replicate", cmdline_auth]
+
+ nc_dn = self.domain_dn
+ # bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
+ cmd_list += [self.dnsname_dc1, self.dnsname_dc2, nc_dn]
+
+ (result, out, err) = self.runsubcmd(*cmd_list)
+ self.assertCmdFail(result)
+ self.assertTrue('WERR_DS_DRA_SINK_DISABLED' in err)
+
+ def test_ReplDisabledForced(self):
+ """Tests we can force replicate when replication is disabled"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ def test_ReplLocal(self):
+ """Tests we can replicate direct to the local db"""
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False, local=True, full_sync=True)
+
+ def _create_ou(self, samdb, name):
+ ldif = """
+dn: %s,%s
+objectClass: organizationalUnit
+""" % (name, self.top_ou)
+ samdb.add_ldif(ldif)
+ res = samdb.search(base="%s,%s" % (name, self.top_ou),
+ scope=SCOPE_BASE, attrs=["objectGUID"])
+ return self._GUID_string(res[0]["objectGUID"][0])
+
+ def _check_deleted(self, sam_ldb, guid):
+ # search the user by guid as it may be deleted
+ res = sam_ldb.search(base='<GUID=%s>' % guid,
+ controls=["show_deleted:1"],
+ attrs=["isDeleted", "objectCategory", "ou"])
+ self.assertEqual(len(res), 1)
+ ou_cur = res[0]
+ # Deleted Object base DN
+ dodn = self._deleted_objects_dn(sam_ldb)
+ # now check properties of the user
+ name_cur = ou_cur["ou"][0]
+ self.assertEqual(ou_cur["isDeleted"][0], b"TRUE")
+ self.assertTrue(not("objectCategory" in ou_cur))
+ self.assertTrue(dodn in str(ou_cur["dn"]),
+ "OU %s is deleted but it is not located under %s!" % (name_cur, dodn))
+
+ def test_ReplConflictsFullSync(self):
+ """Tests that objects created in conflict become conflict DNs (honour full sync override)"""
+
+ # First confirm local replication (so when we test against windows, this fails fast without creating objects)
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
+
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Full Sync")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Full Sync")
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
+
+ # Check that DC2 got the DC1 object, and OU1 was make into conflict
+ res1 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC2
+
+ self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc2.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=True)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRemoteWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Conflict")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsLocalWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC2 object created first
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Local Conflict")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Local Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU2 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]), "Got %s for %s" % (str(res2[0]["name"][0]), self.ou2))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRemoteWin_with_child(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Conflict")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Conflict")
+ # Create children on DC2
+ ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Conflict")
+ ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+
+ def test_ReplConflictsRenamedVsNewRemoteWin(self):
+ """Tests resolving a DN conflict between a renamed object and a new object"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create an OU and rename it on DC1
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict orig")
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict,%s" % self.top_ou)
+
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+
+ # create a conflicting object with the same DN on DC2
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was made into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRenamedVsNewLocalWin(self):
+ """Tests resolving a DN conflict between a renamed object and a new object"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, where the DC2 object has been renamed
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Rename Local Conflict orig")
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Rename Local Conflict,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Rename Local Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU2 was made into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRenameRemoteWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict 2")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict 3,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Remote Rename Conflict 3,%s" % self.top_ou)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRenameRemoteWin_with_child(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Rename Conflict")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Rename Conflict 2")
+ # Create children on DC2
+ ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Rename Conflict")
+ ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Rename Conflict 2")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Parent Remote Rename Conflict 3,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Parent Remote Rename Conflict 3,%s" % self.top_ou)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+
+ def test_ReplConflictsRenameLocalWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Rename Local Conflict")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Rename Local Conflict 2")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Rename Local Conflict 3,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Rename Local Conflict 3,%s" % self.top_ou)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU2 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplLostAndFound(self):
+ """Tests that objects created under a OU deleted eleswhere end up in lostAndFound"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create two OUs on DC2
+ self.ou1 = self._create_ou(self.ldb_dc2, "OU=Deleted parent")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Deleted parent 2")
+
+ # replicate them from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ # Create children on DC2
+ ou1_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Deleted parent")
+ ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Deleted parent 2")
+
+ # Replicate from DC2
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check the sub-OUs are now in lostAndFound and the first one is a conflict DN
+
+ # Check that DC2 got the DC1 object, and one or other object was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % ou1_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % ou2_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % ou1_child in str(res1[0]["name"][0]) or 'CNF:%s' % ou2_child in str(res2[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete all objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % ou1_child)
+ self.ldb_dc1.delete('<GUID=%s>' % ou2_child)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ # Check all deleted on DC1
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ # Check all deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+
+ def test_ReplRenames(self):
+ """Tests that objects created under a OU deleted eleswhere end up in lostAndFound"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create two OUs on DC2
+ self.ou1 = self._create_ou(self.ldb_dc2, "OU=Original parent")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Original parent 2")
+
+ # replicate them from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Create children on DC1
+ ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Original parent")
+ ou2_child = self._create_ou(self.ldb_dc1, "OU=Test Child 2,OU=Original parent")
+ ou3_child = self._create_ou(self.ldb_dc1, "OU=Test Case Child,OU=Original parent")
+
+ # replicate them from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self.ldb_dc1.rename("<GUID=%s>" % ou2_child, "OU=Test Child 3,OU=Original parent 2,%s" % self.top_ou)
+ self.ldb_dc1.rename("<GUID=%s>" % ou1_child, "OU=Test Child 2,OU=Original parent 2,%s" % self.top_ou)
+ self.ldb_dc1.rename("<GUID=%s>" % ou2_child, "OU=Test Child,OU=Original parent 2,%s" % self.top_ou)
+ self.ldb_dc1.rename("<GUID=%s>" % ou3_child, "OU=Test CASE Child,OU=Original parent,%s" % self.top_ou)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Original parent 3,%s" % self.top_ou)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou1, "OU=Original parent 2,%s" % self.top_ou)
+
+ # replicate them from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ # Check the sub-OUs are now under Original Parent 3 (original
+ # parent 2 for Test CASE Child), and both have the right names
+
+ # Check that DC2 got the DC1 object, and the renames are all correct
+ res1 = self.ldb_dc2.search(base="<GUID=%s>" % ou1_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % ou2_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res3 = self.ldb_dc2.search(base="<GUID=%s>" % ou3_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0].dn)
+ print(res2[0].dn)
+ print(res3[0].dn)
+ self.assertEqual('Test Child 2', str(res1[0]["name"][0]))
+ self.assertEqual('Test Child', str(res2[0]["name"][0]))
+ self.assertEqual('Test CASE Child', str(res3[0]["name"][0]))
+ self.assertEqual(str(res1[0].dn), "OU=Test Child 2,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res2[0].dn), "OU=Test Child,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res3[0].dn), "OU=Test CASE Child,OU=Original parent 2,%s" % self.top_ou)
+
+ # replicate them from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC1 got the DC2 object, and the renames are all correct
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % ou1_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % ou2_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res3 = self.ldb_dc1.search(base="<GUID=%s>" % ou3_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0].dn)
+ print(res2[0].dn)
+ print(res3[0].dn)
+ self.assertEqual('Test Child 2', str(res1[0]["name"][0]))
+ self.assertEqual('Test Child', str(res2[0]["name"][0]))
+ self.assertEqual('Test CASE Child', str(res3[0]["name"][0]))
+ self.assertEqual(str(res1[0].dn), "OU=Test Child 2,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res2[0].dn), "OU=Test Child,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res3[0].dn), "OU=Test CASE Child,OU=Original parent 2,%s" % self.top_ou)
+
+ # Delete all objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % ou1_child)
+ self.ldb_dc1.delete('<GUID=%s>' % ou2_child)
+ self.ldb_dc1.delete('<GUID=%s>' % ou3_child)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ # Check all deleted on DC1
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ self._check_deleted(self.ldb_dc1, ou3_child)
+ # Check all deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+ self._check_deleted(self.ldb_dc2, ou3_child)
+
+ def reanimate_object(self, samdb, guid, new_dn):
+ """Re-animates a deleted object"""
+ res = samdb.search(base="<GUID=%s>" % guid, attrs=["isDeleted"],
+ controls=['show_deleted:1'], scope=SCOPE_BASE)
+ if len(res) != 1:
+ return
+
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["isDeleted"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "isDeleted")
+ msg["distinguishedName"] = ldb.MessageElement([new_dn], ldb.FLAG_MOD_REPLACE, "distinguishedName")
+ samdb.modify(msg, ["show_deleted:1"])
+
+ def test_ReplReanimationConflict(self):
+ """
+ Checks that if a reanimated object conflicts with a new object, then
+ the conflict is resolved correctly.
+ """
+
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # create an object, "accidentally" delete it, and replicate the changes to both DCs
+ self.ou1 = self._create_ou(self.ldb_dc2, "OU=Conflict object")
+ self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Now pretend that the admin for one DC resolves the problem by
+ # re-animating the object...
+ self.reanimate_object(self.ldb_dc1, self.ou1, "OU=Conflict object,%s" % self.top_ou)
+
+ # ...whereas another admin just creates a user with the same name
+ # again on a different DC
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Conflict object")
+
+ # Now sync the DCs to resolve the conflict
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check the latest change won and SELF.OU1 was made into a conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+
+ # Delete both objects by GUID on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
diff --git a/source4/torture/drs/python/replica_sync_rodc.py b/source4/torture/drs/python/replica_sync_rodc.py
new file mode 100644
index 0000000..cbdcc12
--- /dev/null
+++ b/source4/torture/drs/python/replica_sync_rodc.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Test conflict scenarios on the RODC
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Catalyst.NET Ltd 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name (RODC)
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync_rodc -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import time
+import ldb
+from samba.common import get_string
+
+from ldb import (
+ SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT)
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a black box test case for DsReplicaSync
+ implementation. It should test the behavior of this
+ case in cases when inbound replication is disabled"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+ self._disable_all_repl(self.dnsname_dc1)
+ self.ou1 = None
+ self.ou2 = None
+
+ def tearDown(self):
+ # re-enable replication
+ self._enable_all_repl(self.dnsname_dc1)
+
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _create_ou(self, samdb, name):
+ ldif = """
+dn: %s,%s
+objectClass: organizationalUnit
+""" % (name, self.domain_dn)
+ samdb.add_ldif(ldif)
+ res = samdb.search(base="%s,%s" % (name, self.domain_dn),
+ scope=SCOPE_BASE, attrs=["objectGUID"])
+ return get_string(self._GUID_string(res[0]["objectGUID"][0]))
+
+ def _check_deleted(self, sam_ldb, guid):
+ # search the user by guid as it may be deleted
+ res = sam_ldb.search(base='<GUID=%s>' % guid,
+ controls=["show_deleted:1"],
+ attrs=["isDeleted", "objectCategory", "ou"])
+ self.assertEqual(len(res), 1)
+ ou_cur = res[0]
+ # Deleted Object base DN
+ dodn = self._deleted_objects_dn(sam_ldb)
+ # now check properties of the user
+ name_cur = ou_cur["ou"][0]
+ self.assertEqual(ou_cur["isDeleted"][0], "TRUE")
+ self.assertTrue(not("objectCategory" in ou_cur))
+ self.assertTrue(dodn in str(ou_cur["dn"]),
+ "OU %s is deleted but it is not located under %s!" % (name_cur, dodn))
+
+ def test_ReplConflictsRODC(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ # Replicate all objects to RODC beforehand
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ name = "OU=Test RODC Conflict"
+ self.ou1 = self._create_ou(self.ldb_dc1, name)
+
+ # Replicate single object
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+
+ # Delete the object, so another can be added
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+
+ # Create a conflicting DN as it would appear to the RODC
+ self.ou2 = self._create_ou(self.ldb_dc1, name)
+
+ try:
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+ except:
+ # Cleanup the object
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+ return
+
+ # Replicate cannot succeed, HWM would be updated incorrectly.
+ self.fail("DRS replicate should have failed.")
+
+ def test_ReplConflictsRODCRename(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ # Replicate all objects to RODC beforehand
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ name = "OU=Test RODC Rename Conflict"
+ self.ou1 = self._create_ou(self.ldb_dc1, name)
+
+ # Replicate single object
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+
+ # Create a non-conflicting DN to rename as conflicting
+ free_name = "OU=Test RODC Rename No Conflict"
+ self.ou2 = self._create_ou(self.ldb_dc1, free_name)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (free_name, self.domain_dn),
+ local=True, single=True, forced=True)
+
+ # Delete the object, so we can rename freely
+ # DO NOT REPLICATE TO THE RODC
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+
+ # Collide the name from the RODC perspective
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou2, "%s,%s" % (name, self.domain_dn))
+
+ try:
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+ except:
+ # Cleanup the object
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+ return
+
+ # Replicate cannot succeed, HWM would be updated incorrectly.
+ self.fail("DRS replicate should have failed.")
diff --git a/source4/torture/drs/python/ridalloc_exop.py b/source4/torture/drs/python/ridalloc_exop.py
new file mode 100644
index 0000000..ecd5cec
--- /dev/null
+++ b/source4/torture/drs/python/ridalloc_exop.py
@@ -0,0 +1,802 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various RID allocation scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+# Copyright (C) Catalyst IT Ltd. 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN ridalloc_exop -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+
+import ldb
+from ldb import SCOPE_BASE
+
+from samba.dcerpc import drsuapi, misc
+from samba.samdb import SamDB
+
+import shutil
+import os
+from samba.auth import system_session, admin_session
+from samba.dbchecker import dbcheck
+from samba.ndr import ndr_pack
+from samba.dcerpc import security
+from samba import drs_utils, dsdb
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a semi-black box test case for DsGetNCChanges
+ implementation for extended operations. It should be testing
+ how DsGetNCChanges handles different input params (mostly invalid).
+ Final goal is to make DsGetNCChanges as binary compatible to
+ Windows implementation as possible"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+
+ def tearDown(self):
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _determine_fSMORoleOwner(self, fsmo_obj_dn):
+ """Returns (owner, not_owner) pair where:
+ owner: dns name for FSMO owner
+ not_owner: dns name for DC not owning the FSMO"""
+ # collect info to return later
+ fsmo_info_1 = {"dns_name": self.dnsname_dc1,
+ "invocation_id": self.ldb_dc1.get_invocation_id(),
+ "ntds_guid": self.ldb_dc1.get_ntds_GUID(),
+ "server_dn": self.ldb_dc1.get_serverName()}
+ fsmo_info_2 = {"dns_name": self.dnsname_dc2,
+ "invocation_id": self.ldb_dc2.get_invocation_id(),
+ "ntds_guid": self.ldb_dc2.get_ntds_GUID(),
+ "server_dn": self.ldb_dc2.get_serverName()}
+
+ msgs = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=fsmo_info_1["server_dn"], attrs=["serverReference"])
+ fsmo_info_1["server_acct_dn"] = ldb.Dn(self.ldb_dc1, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_1["rid_set_dn"] = ldb.Dn(self.ldb_dc1, "CN=RID Set") + fsmo_info_1["server_acct_dn"]
+
+ msgs = self.ldb_dc2.search(scope=ldb.SCOPE_BASE, base=fsmo_info_2["server_dn"], attrs=["serverReference"])
+ fsmo_info_2["server_acct_dn"] = ldb.Dn(self.ldb_dc2, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_2["rid_set_dn"] = ldb.Dn(self.ldb_dc2, "CN=RID Set") + fsmo_info_2["server_acct_dn"]
+
+ # determine the owner dc
+ res = self.ldb_dc1.search(fsmo_obj_dn,
+ scope=SCOPE_BASE, attrs=["fSMORoleOwner"])
+ assert len(res) == 1, "Only one fSMORoleOwner value expected for %s!" % fsmo_obj_dn
+ fsmo_owner = res[0]["fSMORoleOwner"][0]
+ if fsmo_owner == self.info_dc1["dsServiceName"][0]:
+ return (fsmo_info_1, fsmo_info_2)
+ return (fsmo_info_2, fsmo_info_1)
+
+ def _check_exop_failed(self, ctr6, expected_failure):
+ self.assertEqual(ctr6.extended_ret, expected_failure)
+ #self.assertEqual(ctr6.object_count, 0)
+ #self.assertEqual(ctr6.first_object, None)
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes, [])
+ self.assertEqual(ctr6.drs_error[0], 0)
+
+ def test_InvalidDestDSA_ridalloc(self):
+ """Test RID allocation with invalid destination DSA guid"""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+ def test_do_ridalloc(self):
+ """Test doing a RID allocation with a valid destination DSA guid"""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa=fsmo_not_owner["ntds_guid"],
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+ ctr6 = ctr
+ self.assertEqual(ctr6.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ self.assertEqual(ctr6.object_count, 3)
+ self.assertNotEqual(ctr6.first_object, None)
+ self.assertEqual(ldb.Dn(self.ldb_dc1, ctr6.first_object.object.identifier.dn), fsmo_dn)
+ self.assertNotEqual(ctr6.first_object.next_object, None)
+ self.assertNotEqual(ctr6.first_object.next_object.next_object, None)
+ second_object = ctr6.first_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, second_object.identifier.dn), fsmo_not_owner["rid_set_dn"])
+ third_object = ctr6.first_object.next_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, third_object.identifier.dn), fsmo_not_owner["server_acct_dn"])
+
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.drs_error[0], 0)
+ # We don't check the linked_attributes_count as if the domain
+ # has an RODC, it can gain links on the server account object
+
+ def test_do_ridalloc_get_anc(self):
+ """Test doing a RID allocation with a valid destination DSA guid and GET_ANC flag"""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa=fsmo_not_owner["ntds_guid"],
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC,
+ replica_flags=drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+ ctr6 = ctr
+ self.assertEqual(ctr6.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ self.assertEqual(ctr6.object_count, 3)
+ self.assertNotEqual(ctr6.first_object, None)
+ self.assertEqual(ldb.Dn(self.ldb_dc1, ctr6.first_object.object.identifier.dn), fsmo_dn)
+ self.assertNotEqual(ctr6.first_object.next_object, None)
+ self.assertNotEqual(ctr6.first_object.next_object.next_object, None)
+ second_object = ctr6.first_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, second_object.identifier.dn), fsmo_not_owner["rid_set_dn"])
+ third_object = ctr6.first_object.next_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, third_object.identifier.dn), fsmo_not_owner["server_acct_dn"])
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.drs_error[0], 0)
+ # We don't check the linked_attributes_count as if the domain
+ # has an RODC, it can gain links on the server account object
+
+ def test_edit_rid_master(self):
+ """Test doing a RID allocation after changing the RID master from the original one.
+ This should set rIDNextRID to 0 on the new RID master."""
+ # 1. a. Transfer role to non-RID master
+ # b. Check that it succeeds correctly
+ #
+ # 2. a. Call the RID alloc against the former master.
+ # b. Check that it succeeds.
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ # 1. Swap RID master role
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, "")
+ m["becomeRidMaster"] = ldb.MessageElement("1", ldb.FLAG_MOD_REPLACE,
+ "becomeRidMaster")
+
+ # Make sure that ldb_dc1 == RID Master
+
+ server_dn = str(ldb.Dn(self.ldb_dc1, self.ldb_dc1.get_dsServiceName()).parent())
+
+ # self.ldb_dc1 == LOCALDC
+ if server_dn == fsmo_owner['server_dn']:
+ # ldb_dc1 == VAMPIREDC
+ ldb_dc1, ldb_dc2 = self.ldb_dc2, self.ldb_dc1
+ else:
+ # Otherwise switch the two
+ ldb_dc1, ldb_dc2 = self.ldb_dc1, self.ldb_dc2
+
+ try:
+ # ldb_dc1 is now RID MASTER (as VAMPIREDC)
+ ldb_dc1.modify(m)
+ except ldb.LdbError as e1:
+ (num, msg) = e1.args
+ self.fail("Failed to reassign RID Master " + msg)
+
+ try:
+ # 2. Perform a RID alloc
+ req8 = self._exop_req8(dest_dsa=fsmo_owner["ntds_guid"],
+ invocation_id=fsmo_not_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_not_owner["dns_name"])
+ # 3. Make sure the allocation succeeds
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except RuntimeError as e:
+ self.fail("RID allocation failed: " + str(e))
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_not_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_not_owner["invocation_id"]))
+ ctr6 = ctr
+ self.assertEqual(ctr6.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ self.assertEqual(ctr6.object_count, 3)
+ self.assertNotEqual(ctr6.first_object, None)
+ self.assertEqual(ldb.Dn(ldb_dc2, ctr6.first_object.object.identifier.dn), fsmo_dn)
+ self.assertNotEqual(ctr6.first_object.next_object, None)
+ self.assertNotEqual(ctr6.first_object.next_object.next_object, None)
+ second_object = ctr6.first_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, second_object.identifier.dn), fsmo_owner["rid_set_dn"])
+ third_object = ctr6.first_object.next_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, third_object.identifier.dn), fsmo_owner["server_acct_dn"])
+ finally:
+ # Swap the RID master back for other tests
+ m = ldb.Message()
+ m.dn = ldb.Dn(ldb_dc2, "")
+ m["becomeRidMaster"] = ldb.MessageElement("1", ldb.FLAG_MOD_REPLACE, "becomeRidMaster")
+ try:
+ ldb_dc2.modify(m)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ self.fail("Failed to restore RID Master " + msg)
+
+ def test_offline_samba_tool_seized_ridalloc(self):
+ """Perform a join against the non-RID manager and then seize the RID Manager role"""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST1")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ (result, out, err) = self.runsubcmd("fsmo", "seize", "--role", "rid", "-H", ldb_url, "--configfile=%s" % (smbconf), "--force")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ finally:
+ shutil.rmtree(targetdir, ignore_errors=True)
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST1")
+
+ def _test_join(self, server, netbios_name):
+ tmpdir = os.path.join(self.tempdir, "targetdir")
+ creds = self.get_credentials()
+ (result, out, err) = self.runsubcmd("domain", "join",
+ creds.get_realm(),
+ "dc", "-U%s%%%s" % (creds.get_username(),
+ creds.get_password()),
+ '--targetdir=%s' % tmpdir,
+ '--server=%s' % server,
+ "--option=netbios name = %s" % netbios_name)
+ self.assertCmdSuccess(result, out, err)
+ return tmpdir
+
+ def _test_force_demote(self, server, netbios_name):
+ creds = self.get_credentials()
+ (result, out, err) = self.runsubcmd("domain", "demote",
+ "-U%s%%%s" % (creds.get_username(),
+ creds.get_password()),
+ '--server=%s' % server,
+ "--remove-other-dead-server=%s" % netbios_name)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_offline_manual_seized_ridalloc_with_dbcheck(self):
+ """Perform the same actions as test_offline_samba_tool_seized_ridalloc,
+ but do not create the RID set. Confirm that dbcheck correctly creates
+ the RID Set.
+
+ Also check
+ """
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST2")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ serviceName = new_ldb.get_dsServiceName()
+ m = ldb.Message()
+ m.dn = fsmo_dn
+ m["fSMORoleOwner"] = ldb.MessageElement(serviceName,
+ ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ new_ldb.modify(m)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ chk = dbcheck(new_ldb, verbose=False, fix=True, yes=True, quiet=True)
+
+ self.assertEqual(chk.check_database(DN=server_ref_dn, scope=ldb.SCOPE_BASE), 1, "Should have fixed one error (missing RID Set)")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ finally:
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST2")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_offline_manual_seized_ridalloc_add_user(self):
+ """Perform the same actions as test_offline_samba_tool_seized_ridalloc,
+ but do not create the RID set. Confirm that user-add correctly creates
+ the RID Set."""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST3")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ serviceName = new_ldb.get_dsServiceName()
+ m = ldb.Message()
+ m.dn = fsmo_dn
+ m["fSMORoleOwner"] = ldb.MessageElement(serviceName,
+ ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ new_ldb.modify(m)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+
+ finally:
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST3")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_offline_manual_seized_ridalloc_add_user_as_admin(self):
+ """Perform the same actions as test_offline_samba_tool_seized_ridalloc,
+ but do not create the RID set. Confirm that user-add correctly creates
+ the RID Set."""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST4")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=admin_session(lp, self.ldb_dc1.get_domain_sid()), lp=lp)
+
+ serviceName = new_ldb.get_dsServiceName()
+ m = ldb.Message()
+ m.dn = fsmo_dn
+ m["fSMORoleOwner"] = ldb.MessageElement(serviceName,
+ ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ new_ldb.modify(m)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ # Create a user to allocate a RID Set for itself (the RID master)
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+
+ finally:
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST4")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_join_time_ridalloc(self):
+ """Perform a join against the RID manager and assert we have a RID Set"""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_owner['dns_name'], "RIDALLOCTEST5")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ finally:
+ self._test_force_demote(fsmo_owner['dns_name'], "RIDALLOCTEST5")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_rid_set_dbcheck(self):
+ """Perform a join against the RID manager and assert we have a RID Set.
+ Using dbcheck, we assert that we can detect out of range users."""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_owner['dns_name'], "RIDALLOCTEST6")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ rid_set_dn = ldb.Dn(new_ldb, res[0]["rIDSetReferences"][0].decode('utf8'))
+
+ # 4. Add a new user (triggers RID set work)
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 5. Now fetch the RID SET
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ last_rid = (0xFFFFFFFF00000000 & next_pool) >> 32
+
+ # 6. Add user above the ridNextRid and at mid-range.
+ #
+ # We can do this with safety because this is an offline DB that will be
+ # destroyed.
+ m = ldb.Message()
+ m.dn = ldb.Dn(new_ldb, "CN=ridsettestuser1,CN=Users")
+ m.dn.add_base(new_ldb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('user', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(security.dom_sid(str(new_ldb.get_domain_sid()) + "-%d" % (last_rid - 10))),
+ ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ new_ldb.add(m, controls=["relax:0"])
+
+ # 7. Check the RID Set
+ chk = dbcheck(new_ldb, verbose=False, fix=True, yes=True, quiet=True)
+
+ # Should have one error (wrong rIDNextRID)
+ self.assertEqual(chk.check_database(DN=rid_set_dn, scope=ldb.SCOPE_BASE), 1)
+
+ # 8. Assert we get didn't show any other errors
+ chk = dbcheck(new_ldb, verbose=False, fix=False, quiet=True)
+
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ last_allocated_rid = int(rid_set_res[0]["rIDNextRid"][0])
+ self.assertEqual(last_allocated_rid, last_rid - 10)
+
+ # 9. Assert that the range wasn't thrown away
+
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ self.assertEqual(last_rid, (0xFFFFFFFF00000000 & next_pool) >> 32, "rid pool should have changed")
+ finally:
+ self._test_force_demote(fsmo_owner['dns_name'], "RIDALLOCTEST6")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_rid_set_dbcheck_after_seize(self):
+ """Perform a join against the RID manager and assert we have a RID Set.
+ We seize the RID master role, then using dbcheck, we assert that we can
+ detect out of range users (and then bump the RID set as required)."""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_owner['dns_name'], "RIDALLOCTEST7")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ rid_set_dn = ldb.Dn(new_ldb, res[0]["rIDSetReferences"][0].decode('utf8'))
+ # 4. Seize the RID Manager role
+ (result, out, err) = self.runsubcmd("fsmo", "seize", "--role", "rid", "-H", ldb_url, "--configfile=%s" % (smbconf), "--force")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # 5. Add a new user (triggers RID set work)
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 6. Now fetch the RID SET
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ last_rid = (0xFFFFFFFF00000000 & next_pool) >> 32
+
+ # 7. Add user above the ridNextRid and at almost the end of the range.
+ #
+ m = ldb.Message()
+ m.dn = ldb.Dn(new_ldb, "CN=ridsettestuser2,CN=Users")
+ m.dn.add_base(new_ldb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('user', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(security.dom_sid(str(new_ldb.get_domain_sid()) + "-%d" % (last_rid - 3))),
+ ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ new_ldb.add(m, controls=["relax:0"])
+
+ # 8. Add user above the ridNextRid and at the end of the range
+ m = ldb.Message()
+ m.dn = ldb.Dn(new_ldb, "CN=ridsettestuser3,CN=Users")
+ m.dn.add_base(new_ldb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('user', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(security.dom_sid(str(new_ldb.get_domain_sid()) + "-%d" % last_rid)),
+ ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ new_ldb.add(m, controls=["relax:0"])
+
+ chk = dbcheck(new_ldb, verbose=False, fix=True, yes=True, quiet=True)
+
+ # Should have fixed two errors (wrong ridNextRid)
+ self.assertEqual(chk.check_database(DN=rid_set_dn, scope=ldb.SCOPE_BASE), 2)
+
+ # 9. Assert we get didn't show any other errors
+ chk = dbcheck(new_ldb, verbose=False, fix=False, quiet=True)
+
+ # 10. Add another user (checks RID rollover)
+ # We have seized the role, so we can do that.
+ new_ldb.newuser("ridalloctestuser3", "P@ssword!")
+
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ self.assertNotEqual(last_rid, (0xFFFFFFFF00000000 & next_pool) >> 32, "rid pool should have changed")
+ finally:
+ self._test_force_demote(fsmo_owner['dns_name'], "RIDALLOCTEST7")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_replicate_against_deleted_objects_transaction(self):
+ """Not related to RID allocation, but uses the infrastructure here.
+ Do a join, create a link between two objects remotely, but
+ remove the target locally. Show that we need to set a magic
+ opaque if there is an outer transaction.
+
+ """
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ test_user4 = "ridalloctestuser4"
+ test_group = "ridalloctestgroup1"
+
+ self.ldb_dc1.newuser(test_user4, "P@ssword!")
+
+ self.addCleanup(self.ldb_dc1.deleteuser, test_user4)
+
+ self.ldb_dc1.newgroup(test_group)
+ self.addCleanup(self.ldb_dc1.deletegroup, test_group)
+
+ targetdir = self._test_join(self.dnsname_dc1, "RIDALLOCTEST8")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url,
+ session_info=system_session(lp), lp=lp)
+
+ destination_dsa_guid = misc.GUID(new_ldb.get_ntds_GUID())
+
+ repl = drs_utils.drs_Replicate(f'ncacn_ip_tcp:{self.dnsname_dc1}[seal]',
+ lp,
+ self.get_credentials(),
+ new_ldb,
+ destination_dsa_guid)
+
+ source_dsa_invocation_id = misc.GUID(self.ldb_dc1.invocation_id)
+
+ # Add the link on the remote DC
+ self.ldb_dc1.add_remove_group_members(test_group, [test_user4])
+
+ # Starting a transaction overrides, currently the logic
+ # inside repl.replicatate to retry with GET_TGT which in
+ # turn tells the repl_meta_data module that the most up to
+ # date info is already available
+ new_ldb.transaction_start()
+ repl.replicate(self.ldb_dc1.domain_dn(),
+ source_dsa_invocation_id,
+ destination_dsa_guid)
+
+ # Delete the user locally, before applying the links.
+ # This simulates getting the delete in the replciation
+ # stream.
+ new_ldb.deleteuser(test_user4)
+
+ # This fails as the user has been deleted locally but a remote link is sent
+ self.assertRaises(ldb.LdbError, new_ldb.transaction_commit)
+
+ new_ldb.transaction_start()
+ repl.replicate(self.ldb_dc1.domain_dn(),
+ source_dsa_invocation_id,
+ destination_dsa_guid)
+
+ # Delete the user locally (the previous transaction
+ # doesn't apply), before applying the links. This
+ # simulates getting the delete in the replciation stream.
+ new_ldb.deleteuser(test_user4)
+
+ new_ldb.set_opaque_integer(dsdb.DSDB_FULL_JOIN_REPLICATION_COMPLETED_OPAQUE_NAME,
+ 1)
+
+ # This should now work
+ try:
+ new_ldb.transaction_commit()
+ except ldb.LdbError as e:
+ self.fail(f"Failed to replicate despite setting opaque with {e.args[1]}")
+
+ finally:
+ self._test_force_demote(self.dnsname_dc1, "RIDALLOCTEST8")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_replicate_against_deleted_objects_normal(self):
+ """Not related to RID allocation, but uses the infrastructure here.
+ Do a join, create a link between two objects remotely, but
+ remove the target locally. .
+
+ """
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ test_user5 = "ridalloctestuser5"
+ test_group2 = "ridalloctestgroup2"
+
+ self.ldb_dc1.newuser(test_user5, "P@ssword!")
+ self.addCleanup(self.ldb_dc1.deleteuser, test_user5)
+
+ self.ldb_dc1.newgroup(test_group2)
+ self.addCleanup(self.ldb_dc1.deletegroup, test_group2)
+
+ targetdir = self._test_join(self.dnsname_dc1, "RIDALLOCTEST9")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url,
+ session_info=system_session(lp), lp=lp)
+
+ destination_dsa_guid = misc.GUID(new_ldb.get_ntds_GUID())
+
+ repl = drs_utils.drs_Replicate(f'ncacn_ip_tcp:{self.dnsname_dc1}[seal]',
+ lp,
+ self.get_credentials(),
+ new_ldb,
+ destination_dsa_guid)
+
+ source_dsa_invocation_id = misc.GUID(self.ldb_dc1.invocation_id)
+
+ # Add the link on the remote DC
+ self.ldb_dc1.add_remove_group_members(test_group2, [test_user5])
+
+ # Delete the user locally
+ new_ldb.deleteuser(test_user5)
+
+ # Confirm replication copes with a link to a locally deleted user
+ repl.replicate(self.ldb_dc1.domain_dn(),
+ source_dsa_invocation_id,
+ destination_dsa_guid)
+
+ finally:
+ self._test_force_demote(self.dnsname_dc1, "RIDALLOCTEST9")
+ shutil.rmtree(targetdir, ignore_errors=True)
diff --git a/source4/torture/drs/python/samba_tool_drs.py b/source4/torture/drs/python/samba_tool_drs.py
new file mode 100644
index 0000000..e622fe4
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs.py
@@ -0,0 +1,410 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for samba-tool drs."""
+
+import samba.tests
+import os
+import ldb
+import drs_base
+
+
+class SambaToolDrsTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+ self.dc2 = samba.tests.env_get_var_value("DC2")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(), creds.get_password())
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+
+ self.rm_files('names.tdb', allow_missing=True)
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns',
+ allow_missing=True)
+
+ super(SambaToolDrsTests, self).tearDown()
+
+ def _get_rootDSE(self, dc, ldap_only=True):
+ samdb = samba.tests.connect_samdb(dc, lp=self.get_loadparm(),
+ credentials=self.get_credentials(),
+ ldap_only=ldap_only)
+ return samdb.search(base="", scope=samba.tests.ldb.SCOPE_BASE)[0]
+
+ def test_samba_tool_bind(self):
+ """Tests 'samba-tool drs bind' command."""
+
+ # Output should be like:
+ # Extensions supported:
+ # <list-of-supported-extensions>
+ # Site GUID: <GUID>
+ # Repl epoch: 0
+ out = self.check_output("samba-tool drs bind %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertTrue("Site GUID:" in out.decode('utf8'))
+ self.assertTrue("Repl epoch:" in out.decode('utf8'))
+
+ def test_samba_tool_kcc(self):
+ """Tests 'samba-tool drs kcc' command."""
+
+ # Output should be like 'Consistency check on <DC> successful.'
+ out = self.check_output("samba-tool drs kcc %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertTrue(b"Consistency check on" in out)
+ self.assertTrue(b"successful" in out)
+
+ def test_samba_tool_options(self):
+ """Tests 'samba-tool drs options' command
+ """
+ # Output should be like 'Current DSA options: IS_GC <OTHER_FLAGS>'
+ out = self.check_output("samba-tool drs options %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertTrue(b"Current DSA options:" in out)
+
+ def test_samba_tool_replicate(self):
+ """Tests 'samba-tool drs replicate' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate %s %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name,
+ self.cmdline_creds))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_async(self):
+ """Tests 'samba-tool drs replicate --async-op' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was started.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate --async-op %s %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name,
+ self.cmdline_creds))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was started" in out)
+
+ def test_samba_tool_replicate_local_online(self):
+ """Tests 'samba-tool drs replicate --local-online' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate --local-online %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_local_online_async(self):
+ """Tests 'samba-tool drs replicate --local-online --async-op' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was started.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate --local-online --async-op %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was started" in out)
+
+ def test_samba_tool_replicate_local_machine_creds(self):
+ """Tests 'samba-tool drs replicate --local -P' command (uses machine creds)."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate -P --local %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Incremental" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_local(self):
+ """Tests 'samba-tool drs replicate --local' command (uses machine creds)."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+
+ def get_num_obj_links(output):
+ num_objs = None
+ num_links = None
+ for word in output.decode('utf8').split(" "):
+ try:
+ int(word)
+ if num_objs is None:
+ num_objs = int(word)
+ elif num_links is None:
+ num_links = int(word)
+ except ValueError:
+ pass
+
+ return (num_objs, num_links)
+
+ out = self.check_output("samba-tool drs replicate --local --full-sync %s %s %s %s"
+ % (self.dc1, self.dc2, nc_name, self.cmdline_creds))
+ self.assertTrue(b"was successful" in out)
+ self.assertTrue(b"Full" in out)
+
+ (first_obj, _) = get_num_obj_links(out)
+
+ out = self.check_output("samba-tool drs replicate --local %s %s %s %s"
+ % (self.dc1, self.dc2, nc_name, self.cmdline_creds))
+ self.assertTrue(b"was successful" in out)
+ self.assertTrue(b"Incremental" in out)
+
+ (second_obj, _) = get_num_obj_links(out)
+
+ self.assertTrue(first_obj > second_obj)
+
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+
+ # We have to give it a different netbiosname every time
+ # it runs, otherwise the collision causes strange issues
+ # to happen. This should be different on different environments.
+ netbiosname = "test" + self.dc2
+ if len(netbiosname) > 15:
+ netbiosname = netbiosname[:15]
+
+ out = self.check_output("samba-tool domain join %s dc --server=%s %s --targetdir=%s --option=netbiosname=%s"
+ % (server_realm, self.dc1, self.cmdline_creds, self.tempdir, netbiosname))
+
+ new_dc_config_file = "%s/etc/smb.conf" % self.tempdir
+
+ self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s"
+ % ("invalid", self.dc1, nc_name,
+ self.cmdline_creds, new_dc_config_file))
+
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2)
+
+ # add an object with link on dc1
+ group_name = "group-repl-local-%s" % self.dc2
+ user_name = "user-repl-local-%s" % self.dc2
+
+ self.check_output("samba-tool group add %s %s -H ldap://%s"
+ % (group_name, self.cmdline_creds, self.dc1))
+ self.check_output("samba-tool user add %s %s --random-password -H ldap://%s"
+ % (user_name, self.cmdline_creds, self.dc1))
+ self.check_output("samba-tool group addmembers %s %s %s -H ldap://%s"
+ % (group_name, user_name, self.cmdline_creds, self.dc1))
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1)
+
+ # pull that change with --local into local db from dc1: should send link and some objects
+ out = self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s"
+ % ("invalid", self.dc1, nc_name,
+ self.cmdline_creds, new_dc_config_file))
+
+ (obj_1, link_1) = get_num_obj_links(out)
+
+ self.assertGreaterEqual(obj_1, 2)
+ self.assertEqual(link_1, 1)
+
+ # pull that change with --local into local db from dc2: shouldn't send link or object
+ # as we sent an up-to-dateness vector showing that we had already synced with DC1
+ out = self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s"
+ % ("invalid", self.dc2, nc_name,
+ self.cmdline_creds, new_dc_config_file))
+
+ (obj_2, link_2) = get_num_obj_links(out)
+
+ self.assertEqual(obj_2, 0)
+ self.assertEqual(link_2, 0)
+
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H ldap://%s %s --configfile=%s"
+ % (netbiosname, self.dc1, self.cmdline_creds, new_dc_config_file))
+
+ def test_samba_tool_replicate_machine_creds_P(self):
+ """Tests 'samba-tool drs replicate -P' command with machine creds."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate -P %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_machine_creds(self):
+ """Tests 'samba-tool drs replicate' command with implicit machine creds."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_drs_clone_dc(self):
+ """Tests 'samba-tool drs clone-dc-database' command."""
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_nc_name = server_rootdse["defaultNamingContext"]
+ server_ds_name = server_rootdse["dsServiceName"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds,
+ self.tempdir))
+ ldb_rootdse = self._get_rootDSE("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"), ldap_only=False)
+ nc_name = ldb_rootdse["defaultNamingContext"]
+ ds_name = ldb_rootdse["dsServiceName"]
+ ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ self.assertEqual(nc_name, server_nc_name)
+ # The clone should pretend to be the source server
+ self.assertEqual(ds_name, server_ds_name)
+ self.assertEqual(ldap_service_name, server_ldap_service_name)
+
+ samdb = samba.tests.connect_samdb("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
+ ldap_only=False, lp=self.get_loadparm())
+
+ def get_krbtgt_pw():
+ samdb.searchone("unicodePwd", "cn=krbtgt,CN=users,%s" % nc_name)
+ self.assertRaises(KeyError, get_krbtgt_pw)
+
+ server_dn = samdb.searchone("serverReferenceBL", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name)).decode('utf8')
+ ntds_guid = samdb.searchone("objectGUID", "cn=ntds settings,%s" % server_dn).decode('utf8')
+
+ res = samdb.search(base=str(server_nc_name),
+ expression="(&(objectclass=user)(cn=dns-%s))" % (self.dc2),
+ attrs=[], scope=ldb.SCOPE_SUBTREE)
+ if len(res) == 1:
+ dns_obj = res[0]
+ else:
+ dns_obj = None
+
+ # While we have this cloned, try demoting the other server on the clone, by GUID
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
+ % (ntds_guid,
+ self.tempdir))
+
+ # Check some of the objects that should have been removed
+ def check_machine_obj():
+ samdb.searchone("CN", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
+ self.assertRaises(ldb.LdbError, check_machine_obj)
+
+ def check_server_obj():
+ samdb.searchone("CN", server_dn)
+ self.assertRaises(ldb.LdbError, check_server_obj)
+
+ def check_ntds_guid():
+ samdb.searchone("CN", "<GUID=%s>" % ntds_guid)
+ self.assertRaises(ldb.LdbError, check_ntds_guid)
+
+ if dns_obj is not None:
+ # Check some of the objects that should have been removed
+ def check_dns_account_obj():
+ samdb.search(base=dns_obj.dn, scope=ldb.SCOPE_BASE,
+ attrs=[])
+ self.assertRaises(ldb.LdbError, check_dns_account_obj)
+
+ def test_samba_tool_drs_clone_dc_secrets(self):
+ """Tests 'samba-tool drs clone-dc-database --include-secrets' command ."""
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_nc_name = server_rootdse["defaultNamingContext"]
+ server_ds_name = server_rootdse["dsServiceName"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s --include-secrets"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds,
+ self.tempdir))
+ ldb_rootdse = self._get_rootDSE("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"), ldap_only=False)
+ nc_name = ldb_rootdse["defaultNamingContext"]
+ ds_name = ldb_rootdse["dsServiceName"]
+ ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+
+ samdb = samba.tests.connect_samdb("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
+ ldap_only=False, lp=self.get_loadparm())
+ krbtgt_pw = samdb.searchone("unicodePwd", "cn=krbtgt,CN=users,%s" % nc_name)
+ self.assertIsNotNone(krbtgt_pw)
+
+ self.assertEqual(nc_name, server_nc_name)
+ # The clone should pretend to be the source server
+ self.assertEqual(ds_name, server_ds_name)
+ self.assertEqual(ldap_service_name, server_ldap_service_name)
+
+ server_dn = samdb.searchone("serverReferenceBL", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name)).decode('utf8')
+ ntds_guid = samdb.searchone("objectGUID", "cn=ntds settings,%s" % server_dn).decode('utf8')
+
+ res = samdb.search(base=str(server_nc_name),
+ expression="(&(objectclass=user)(cn=dns-%s))" % (self.dc2),
+ attrs=[], scope=ldb.SCOPE_SUBTREE)
+ if len(res) == 1:
+ dns_obj = res[0]
+ else:
+ dns_obj = None
+
+ def demote_self():
+ # While we have this cloned, try demoting the other server on the clone
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
+ % (self.dc1,
+ self.tempdir))
+ self.assertRaises(samba.tests.BlackboxProcessError, demote_self)
+
+ # While we have this cloned, try demoting the other server on the clone
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H ldb://%s/private/sam.ldb"
+ % (self.dc2,
+ self.tempdir))
+
+ # Check some of the objects that should have been removed
+ def check_machine_obj():
+ samdb.searchone("CN", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
+ self.assertRaises(ldb.LdbError, check_machine_obj)
+
+ def check_server_obj():
+ samdb.searchone("CN", server_dn)
+ self.assertRaises(ldb.LdbError, check_server_obj)
+
+ def check_ntds_guid():
+ samdb.searchone("CN", "<GUID=%s>" % ntds_guid)
+ self.assertRaises(ldb.LdbError, check_ntds_guid)
+
+ if dns_obj is not None:
+ # Check some of the objects that should have been removed
+ def check_dns_account_obj():
+ samdb.search(base=dns_obj.dn, scope=ldb.SCOPE_BASE,
+ attrs=[])
+ self.assertRaises(ldb.LdbError, check_dns_account_obj)
+
+ def test_samba_tool_drs_clone_dc_secrets_without_targetdir(self):
+ """Tests 'samba-tool drs clone-dc-database' command without --targetdir."""
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+
+ def attempt_clone():
+ self.check_output("samba-tool drs clone-dc-database %s --server=%s %s"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds))
+ self.assertRaises(samba.tests.BlackboxProcessError, attempt_clone)
diff --git a/source4/torture/drs/python/samba_tool_drs_critical.py b/source4/torture/drs/python/samba_tool_drs_critical.py
new file mode 100644
index 0000000..5260e15
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs_critical.py
@@ -0,0 +1,98 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for samba-tool drs."""
+
+import samba.tests
+import os
+import ldb
+import drs_base
+import random
+
+class SambaToolDrsTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+ self.dc2 = samba.tests.env_get_var_value("DC2")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(), creds.get_password())
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+
+ self.rm_files('names.tdb', allow_missing=True)
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns',
+ allow_missing=True)
+
+ super(SambaToolDrsTests, self).tearDown()
+
+ # This test is for the Samba 4.5 emulation servers (but runs
+ # against a normal server as well) that fail to correctly
+ # implement DRSUAPI_DRS_GET_ANC when DRSUAPI_DRS_CRITICAL_ONLY is
+ # set.
+ def test_samba_tool_drs_clone_dc_critical_object_chain(self):
+ """Tests 'samba-tool drs clone-dc-database' command with a Critical/non-critical/critical object chain."""
+
+ samdb = samba.tests.connect_samdb(self.dc1, lp=self.get_loadparm(),
+ credentials=self.get_credentials(),
+ ldap_only=True)
+ server_rootdse = samdb.search(base="",
+ scope=samba.tests.ldb.SCOPE_BASE)[0]
+ nc_name = server_rootdse["defaultNamingContext"][0]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+
+ not_critical_dn = f"OU=not-critical{random.randint(1, 10000000)},{nc_name}"
+ samdb.create_ou(not_critical_dn)
+ self.addCleanup(samdb.delete,
+ not_critical_dn)
+ domain_sid = samdb.get_domain_sid()
+ admin_sid = f"{domain_sid}-500"
+ samdb.rename(f"<SID={admin_sid}>",
+ f"cn=administrator,{not_critical_dn}")
+ self.addCleanup(samdb.rename,
+ f"<SID={admin_sid}>",
+ f"cn=administrator,cn=users,{nc_name}")
+
+ try:
+ self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds,
+ self.tempdir))
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling samba-tool: %s" % e)
+
+ local_samdb = samba.tests.connect_samdb("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
+ ldap_only=False, lp=self.get_loadparm())
+
+ # Check administrator was replicated and is in the right place
+ res = local_samdb.search(base=str(nc_name),
+ expression="(&(objectclass=user)(cn=administrator))",
+ attrs=[], scope=ldb.SCOPE_SUBTREE)
+ self.assertEqual(len(res), 1)
+
+ admin_obj = res[0]
+
+ self.assertEqual(admin_obj.dn, ldb.Dn(samdb, f"cn=administrator,{not_critical_dn}"))
diff --git a/source4/torture/drs/python/samba_tool_drs_no_dns.py b/source4/torture/drs/python/samba_tool_drs_no_dns.py
new file mode 100644
index 0000000..aad5966
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs_no_dns.py
@@ -0,0 +1,174 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+# Copyright (C) Catalyst.Net Ltd 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Blackbox tests for samba-tool drs with no DNS partitions
+
+Adapted from samba_tool_drs.py
+"""
+
+import samba.tests
+import os
+import ldb
+import drs_base
+
+from samba.tests import BlackboxProcessError
+from samba.common import get_string
+
+
+class SambaToolDrsNoDnsTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsNoDnsTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(), creds.get_password())
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self.rm_files('names.tdb', allow_missing=True)
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns',
+ allow_missing=True)
+
+ super(SambaToolDrsNoDnsTests, self).tearDown()
+
+ def _get_rootDSE(self, dc, ldap_only=True):
+ samdb = samba.tests.connect_samdb(dc, lp=self.get_loadparm(),
+ credentials=self.get_credentials(),
+ ldap_only=ldap_only)
+ return samdb.search(base="", scope=samba.tests.ldb.SCOPE_BASE)[0], samdb
+
+ def test_samba_tool_replicate_local_no_dns_tdb(self):
+ self.backend = 'tdb'
+ self._test_samba_tool_replicate_local_no_dns()
+
+ def test_samba_tool_replicate_local_no_dns_mdb(self):
+ self.backend = 'mdb'
+ self._test_samba_tool_replicate_local_no_dns()
+
+ def _test_samba_tool_replicate_local_no_dns(self):
+ """Check we can provision a database without DNS partitions
+ (and then add them afterwards)."""
+
+ server_rootdse, _ = self._get_rootDSE(self.dc1)
+ nc_name = server_rootdse["defaultNamingContext"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+
+ # We have to give it a different netbiosname every time
+ # it runs, otherwise the collision causes strange issues
+ # to happen. This should be different on different environments.
+ netbiosname = "dns" + self.backend + self.dc1
+ if len(netbiosname) > 15:
+ netbiosname = netbiosname[:15]
+
+ self.check_output("samba-tool domain join %s dc --server=%s %s --targetdir=%s --option=netbiosname=%s %s --backend-store=%s"
+ % (server_realm, self.dc1, self.cmdline_creds,
+ self.tempdir, netbiosname,
+ "--dns-backend=NONE",
+ self.backend))
+
+ new_dc_config_file = os.path.join(self.tempdir, "etc", "smb.conf")
+ new_dc_sam = os.path.join(self.tempdir, "private", "sam.ldb")
+
+ forestdns_dn = ldb.binary_encode('DC=ForestDNSZones,' + str(nc_name))
+ domaindns_dn = ldb.binary_encode('DC=DomainDNSZones,' + str(nc_name))
+
+ self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s --full-sync"
+ % ("invalid", self.dc1, forestdns_dn,
+ self.cmdline_creds, new_dc_config_file))
+
+ self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s --full-sync"
+ % ("invalid", self.dc1, domaindns_dn,
+ self.cmdline_creds, new_dc_config_file))
+
+ server_rootdse, samdb = self._get_rootDSE("ldb://" + new_dc_sam, ldap_only=False)
+ server_ds_name = ldb.binary_encode(server_rootdse["dsServiceName"][0].decode('utf-8'))
+
+ # Show that Has-Master-NCs is fixed by samba_upgradedns
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % forestdns_dn)
+ self.assertEqual(len(res), 0)
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % domaindns_dn)
+ self.assertEqual(len(res), 0)
+
+ self.check_output("samba_upgradedns --configfile=%s" % (new_dc_config_file))
+
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % forestdns_dn)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % domaindns_dn)
+ self.assertEqual(len(res), 1)
+
+ # Show that replica locations is fixed by dbcheck
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, forestdns_dn))
+ self.assertEqual(len(res), 0)
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, domaindns_dn))
+ self.assertEqual(len(res), 0)
+
+ try:
+ # This fixes any forward-link-backward-link issues with the tools
+ self.check_output("samba-tool dbcheck --configfile=%s --cross-ncs --fix --yes" % (new_dc_config_file))
+ except BlackboxProcessError as e:
+ self.assertTrue("Checked " in get_string(e.stdout))
+
+ self.check_output("samba-tool dbcheck --configfile=%s --cross-ncs" % (new_dc_config_file))
+
+ # Compare the two directories
+ self.check_output("samba-tool ldapcmp ldap://%s ldb://%s %s --filter=%s" %
+ (self.dc1, new_dc_sam, self.cmdline_creds,
+ "msDs-masteredBy,msDS-NC-Replica-Locations,msDS-hasMasterNCs"))
+
+ # Check all ForestDNS connections and backlinks
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % forestdns_dn)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(base=forestdns_dn,
+ expression="(msds-masteredby=%s)" % server_ds_name)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, forestdns_dn))
+ self.assertEqual(len(res), 1)
+
+ # Check all DomainDNS connections and backlinks
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % domaindns_dn)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(base=domaindns_dn,
+ expression="(msds-masteredby=%s)" % server_ds_name)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, domaindns_dn))
+ self.assertEqual(len(res), 1)
+
+ # Demote the DC we created in the test
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H ldap://%s %s --configfile=%s"
+ % (netbiosname, self.dc1, self.cmdline_creds, new_dc_config_file))
diff --git a/source4/torture/drs/python/samba_tool_drs_showrepl.py b/source4/torture/drs/python/samba_tool_drs_showrepl.py
new file mode 100644
index 0000000..0f0ed86
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs_showrepl.py
@@ -0,0 +1,377 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for samba-tool drs showrepl."""
+import samba.tests
+import drs_base
+from samba.dcerpc import drsuapi
+from samba import drs_utils
+import os
+import json
+import ldb
+import random
+from samba.common import get_string
+
+GUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
+HEX8_RE = r'0x[\da-f]{8}'
+DN_RE = r'(?:(?:CN|DC)=[\\:\w -]+,)+DC=com'
+
+
+class SambaToolDrsShowReplTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsShowReplTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+ self.dc2 = samba.tests.env_get_var_value("DC2")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(),
+ creds.get_password())
+
+ def test_samba_tool_showrepl(self):
+ """Tests 'samba-tool drs showrepl' command.
+ """
+ nc_list = [self.config_dn, self.domain_dn, self.schema_dn]
+ dns_name = self.ldb_dc1.domain_dns_name()
+
+ # Manually run kcc to create a "Connection" object, so we can find
+ # this for the expected output below.
+ kcc_out = self.check_output("samba-tool drs kcc %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertIn(b"successful", kcc_out)
+
+ # Run replicate to ensure there are incoming and outgoing partners
+ # exist, so we can expect these in the output below.
+ for nc in nc_list:
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=nc, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, nc_dn=nc, forced=True)
+
+ # Output should be like:
+ # <site-name>/<domain-name>
+ # DSA Options: <hex-options>
+ # DSA object GUID: <DSA-object-GUID>
+ # DSA invocationId: <DSA-invocationId>
+ # <Inbound-connections-list>
+ # <Outbound-connections-list>
+ # <KCC-objects>
+ # ...
+ # TODO: Perhaps we should check at least for
+ # DSA's objectGUDI and invocationId
+ out = self.check_output("samba-tool drs showrepl "
+ "%s %s" % (self.dc1, self.cmdline_creds))
+
+ out = get_string(out)
+ # We want to assert that we are getting the same results, but
+ # dates and GUIDs change randomly.
+ #
+ # There are sections with headers like ==== THIS ===="
+ (header,
+ _inbound, inbound,
+ _outbound, outbound,
+ _conn, conn) = out.split("====")
+
+ self.assertEqual(_inbound, ' INBOUND NEIGHBORS ')
+ self.assertEqual(_outbound, ' OUTBOUND NEIGHBORS ')
+ self.assertEqual(_conn, ' KCC CONNECTION OBJECTS ')
+
+ self.assertRegex(header,
+ r'^Default-First-Site-Name\\%s\s+'
+ r"DSA Options: %s\s+"
+ r"DSA object GUID: %s\s+"
+ r"DSA invocationId: %s" %
+ (self.dc1.upper(), HEX8_RE, GUID_RE, GUID_RE))
+
+ # We don't assert the DomainDnsZones and ForestDnsZones are
+ # there because we don't know that they have been set up yet.
+
+ for p in nc_list:
+ self.assertRegex(
+ inbound,
+ r'%s\n'
+ r'\tDefault-First-Site-Name\\[A-Z0-9]+ via RPC\n'
+ r'\t\tDSA object GUID: %s\n'
+ r'\t\tLast attempt @ [^\n]+\n'
+ r'\t\t\d+ consecutive failure\(s\).\n'
+ r'\t\tLast success @ [^\n]+\n'
+ r'\n' % (p, GUID_RE),
+ msg="%s inbound missing" % p)
+
+ self.assertRegex(
+ outbound,
+ r'%s\n'
+ r'\tDefault-First-Site-Name\\[A-Z0-9]+ via RPC\n'
+ r'\t\tDSA object GUID: %s\n'
+ r'\t\tLast attempt @ [^\n]+\n'
+ r'\t\t\d+ consecutive failure\(s\).\n'
+ r'\t\tLast success @ [^\n]+\n'
+ r'\n' % (p, GUID_RE),
+ msg="%s outbound missing" % p)
+
+ self.assertRegex(conn,
+ r'Connection --\n'
+ r'\tConnection name: %s\n'
+ r'\tEnabled : TRUE\n'
+ r'\tServer DNS name : \w+.%s\n'
+ r'\tServer DN name : %s'
+ r'\n' % (GUID_RE, dns_name, DN_RE))
+
+ def test_samba_tool_showrepl_json(self):
+ """Tests 'samba-tool drs showrepl --json' command.
+ """
+ dns_name = self.ldb_dc1.domain_dns_name()
+ out = self.check_output("samba-tool drs showrepl %s %s --json" %
+ (self.dc1, self.cmdline_creds))
+ d = json.loads(get_string(out))
+ self.assertEqual(set(d), set(['repsFrom',
+ 'repsTo',
+ "NTDSConnections",
+ "dsa"]))
+
+ # dsa
+ for k in ["objectGUID", "invocationId"]:
+ self.assertRegex(d['dsa'][k], '^%s$' % GUID_RE)
+ self.assertTrue(isinstance(d['dsa']["options"], int))
+
+ # repsfrom and repsto
+ for reps in (d['repsFrom'], d['repsTo']):
+ for r in reps:
+ for k in ('NC dn', "NTDS DN"):
+ self.assertRegex(r[k], '^%s$' % DN_RE)
+ for k in ("last attempt time",
+ "last attempt message",
+ "last success"):
+ self.assertTrue(isinstance(r[k], str))
+ self.assertRegex(r["DSA objectGUID"], '^%s$' % GUID_RE)
+ self.assertTrue(isinstance(r["consecutive failures"], int))
+
+ # ntdsconnection
+ for n in d["NTDSConnections"]:
+ self.assertTrue(n["dns name"].endswith(dns_name))
+ self.assertRegex(n["name"], "^%s$" % GUID_RE)
+ self.assertTrue(isinstance(n['enabled'], bool))
+ self.assertTrue(isinstance(n['options'], int))
+ self.assertTrue(isinstance(n['replicates NC'], list))
+ self.assertRegex(n["remote DN"], "^%s$" % DN_RE)
+
+ def _force_all_reps(self, samdb, dc, direction):
+ if direction == 'inbound':
+ info_type = drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS
+ elif direction == 'outbound':
+ info_type = drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO
+ else:
+ raise ValueError("expected 'inbound' or 'outbound'")
+
+ self._enable_all_repl(dc)
+ lp = self.get_loadparm()
+ creds = self.get_credentials()
+ drsuapi_conn, drsuapi_handle, _ = drs_utils.drsuapi_connect(dc, lp, creds)
+ req1 = drsuapi.DsReplicaGetInfoRequest1()
+ req1.info_type = info_type
+ _, info = drsuapi_conn.DsReplicaGetInfo(drsuapi_handle, 1, req1)
+ for x in info.array:
+ # you might think x.source_dsa_address was the thing, but no.
+ # and we need to filter out RODCs and deleted DCs
+
+ res = []
+ try:
+ res = samdb.search(base=x.source_dsa_obj_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-isRODC', 'isDeleted'],
+ controls=['show_deleted:0'])
+ except ldb.LdbError as e:
+ if e.args[0] != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ if (len(res) == 0 or
+ len(res[0].get('msDS-isRODC', '')) > 0 or
+ res[0]['isDeleted'] == 'TRUE'):
+ continue
+
+ dsa_dn = str(ldb.Dn(samdb, x.source_dsa_obj_dn).parent())
+ try:
+ res = samdb.search(base=dsa_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ except ldb.LdbError as e:
+ if e.args[0] != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ continue
+
+ if len(res) == 0:
+ print("server %s has no dNSHostName" % dsa_dn)
+ continue
+
+ remote = res[0].get('dNSHostName', [''])[0]
+ if remote:
+ self._enable_all_repl(remote)
+
+ if direction == 'inbound':
+ src, dest = remote, dc
+ else:
+ src, dest = dc, remote
+ self._net_drs_replicate(dest, src, forced=True)
+
+ def test_samba_tool_showrepl_pull_summary_all_good(self):
+ """Tests 'samba-tool drs showrepl --pull-summary' command."""
+ # To be sure that all is good we need to force replication
+ # with everyone (because others might have it turned off), and
+ # turn replication on for them in case they suddenly decide to
+ # try again.
+ #
+ # We don't restore them to the non-auto-replication state.
+ samdb1 = self.getSamDB("-H", "ldap://%s" % self.dc1,
+ self.cmdline_creds)
+ self._enable_all_repl(self.dc1)
+ self._force_all_reps(samdb1, self.dc1, 'inbound')
+ self._force_all_reps(samdb1, self.dc1, 'outbound')
+ old_no_color = os.environ.get('NO_COLOR')
+ all_good_green = "\033[1;32m[ALL GOOD]\033[0m\n"
+ all_good = "[ALL GOOD]\n"
+
+ try:
+ out = self.check_output(
+ "samba-tool drs showrepl --pull-summary %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good)
+ out = get_string(out)
+
+ out = self.check_output("samba-tool drs showrepl --pull-summary "
+ "--color=yes %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good_green)
+
+ # --verbose output is still quiet when all is good.
+ out = self.check_output(
+ "samba-tool drs showrepl --pull-summary -v %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good)
+
+ out = self.check_output("samba-tool drs showrepl --pull-summary -v "
+ "--color=always %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good_green)
+
+ out = self.check_output("samba-tool drs showrepl --pull-summary -v "
+ "--color=never %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good)
+
+ os.environ['NO_COLOR'] = 'bean'
+
+ out = self.check_output("samba-tool drs showrepl --pull-summary -v "
+ "--color=auto %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good)
+
+ os.environ['NO_COLOR'] = ''
+
+ out = self.check_output("samba-tool drs showrepl --pull-summary -v "
+ "--color=auto %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, all_good_green)
+
+ except samba.tests.BlackboxProcessError as e:
+ self.fail(str(e))
+ finally:
+ if old_no_color is None:
+ os.environ.pop('NO_COLOR', None)
+ else:
+ os.environ['NO_COLOR'] = old_no_color
+
+ def test_samba_tool_showrepl_summary_forced_failure(self):
+ """Tests 'samba-tool drs showrepl --summary' command when we break the
+ network on purpose.
+ """
+ self.addCleanup(self._enable_all_repl, self.dc1)
+ self._disable_all_repl(self.dc1)
+
+ samdb1 = self.getSamDB("-H", "ldap://%s" % self.dc1,
+ self.cmdline_creds)
+ samdb2 = self.getSamDB("-H", "ldap://%s" % self.dc2,
+ self.cmdline_creds)
+ domain_dn = samdb1.domain_dn()
+
+ # Add some things to NOT replicate
+ ou1 = "OU=dc1.%x,%s" % (random.randrange(1 << 64), domain_dn)
+ ou2 = "OU=dc2.%x,%s" % (random.randrange(1 << 64), domain_dn)
+ samdb1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ self.addCleanup(samdb1.delete, ou1, ['tree_delete:1'])
+ samdb2.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+ self.addCleanup(samdb2.delete, ou2, ['tree_delete:1'])
+
+ dn1 = 'cn=u1.%%d,%s' % (ou1)
+ dn2 = 'cn=u2.%%d,%s' % (ou2)
+
+ try:
+ for i in range(100):
+ samdb1.add({
+ "dn": dn1 % i,
+ "objectclass": "user"
+ })
+ samdb2.add({
+ "dn": dn2 % i,
+ "objectclass": "user"
+ })
+ out = self.check_output("samba-tool drs showrepl --summary -v "
+ "%s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual('[ALL GOOD]', out, strip=True)
+ out = self.check_output("samba-tool drs showrepl --summary -v "
+ "--color=yes %s %s" %
+ (self.dc2, self.cmdline_creds))
+ out = get_string(out)
+ self.assertIn('[ALL GOOD]', out)
+
+ except samba.tests.BlackboxProcessError as e:
+ e_stdout = get_string(e.stdout)
+ e_stderr = get_string(e.stderr)
+ print("Good, failed as expected after %d rounds: %r" % (i, e.cmd))
+ self.assertIn('There are failing connections', e_stdout,
+ msg=('stdout: %r\nstderr: %r\nretcode: %s'
+ '\nmessage: %r\ncmd: %r') % (e_stdout,
+ e_stderr,
+ e.returncode,
+ e.msg,
+ e.cmd))
+ self.assertRegex(
+ e_stdout,
+ r'result 845[67] '
+ r'\(WERR_DS_DRA_(SINK|SOURCE)_DISABLED\)',
+ msg=("The process should have failed "
+ "because replication was forced off, "
+ "but it failed for some other reason."))
+ self.assertIn('consecutive failure(s).', e_stdout)
+ else:
+ self.fail("No DRS failure noticed after 100 rounds of trying")
diff --git a/source4/torture/drs/rpc/dssync.c b/source4/torture/drs/rpc/dssync.c
new file mode 100644
index 0000000..64d0498
--- /dev/null
+++ b/source4/torture/drs/rpc/dssync.c
@@ -0,0 +1,1072 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ DsGetNCChanges replication test
+
+ Copyright (C) Stefan (metze) Metzmacher 2005
+ Copyright (C) Brad Henry 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "lib/cmdline/cmdline.h"
+#include "librpc/gen_ndr/ndr_drsuapi_c.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "libcli/cldap/cldap.h"
+#include "torture/torture.h"
+#include "../libcli/drsuapi/drsuapi.h"
+#include "auth/gensec/gensec.h"
+#include "param/param.h"
+#include "dsdb/samdb/samdb.h"
+#include "torture/rpc/torture_rpc.h"
+#include "torture/drs/proto.h"
+#include "lib/tsocket/tsocket.h"
+#include "libcli/resolve/resolve.h"
+#include "lib/util/util_paths.h"
+
+#undef strcasecmp
+
+struct DsSyncBindInfo {
+ struct dcerpc_pipe *drs_pipe;
+ struct dcerpc_binding_handle *drs_handle;
+ struct drsuapi_DsBind req;
+ struct GUID bind_guid;
+ struct drsuapi_DsBindInfoCtr our_bind_info_ctr;
+ struct drsuapi_DsBindInfo28 our_bind_info28;
+ struct drsuapi_DsBindInfo28 peer_bind_info28;
+ struct policy_handle bind_handle;
+};
+
+struct DsSyncLDAPInfo {
+ struct ldb_context *ldb;
+};
+
+struct DsSyncTest {
+ struct dcerpc_binding *drsuapi_binding;
+
+ const char *ldap_url;
+ const char *dest_address;
+ const char *domain_dn;
+ const char *config_dn;
+ const char *schema_dn;
+
+ /* what we need to do as 'Administrator' */
+ struct {
+ struct cli_credentials *credentials;
+ struct DsSyncBindInfo drsuapi;
+ struct DsSyncLDAPInfo ldap;
+ } admin;
+
+ /* what we need to do as the new dc machine account */
+ struct {
+ struct cli_credentials *credentials;
+ struct DsSyncBindInfo drsuapi;
+ struct drsuapi_DsGetDCInfo2 dc_info2;
+ struct GUID invocation_id;
+ struct GUID object_guid;
+ } new_dc;
+
+ /* info about the old dc */
+ struct {
+ struct drsuapi_DsGetDomainControllerInfo dc_info;
+ } old_dc;
+};
+
+static struct DsSyncTest *test_create_context(struct torture_context *tctx)
+{
+ NTSTATUS status;
+ struct DsSyncTest *ctx;
+ struct drsuapi_DsBindInfo28 *our_bind_info28;
+ struct drsuapi_DsBindInfoCtr *our_bind_info_ctr;
+ const char *binding = torture_setting_string(tctx, "binding", NULL);
+ const char *host;
+ struct nbt_name name;
+
+ ctx = talloc_zero(tctx, struct DsSyncTest);
+ if (!ctx) return NULL;
+
+ status = dcerpc_parse_binding(ctx, binding, &ctx->drsuapi_binding);
+ if (!NT_STATUS_IS_OK(status)) {
+ printf("Bad binding string %s\n", binding);
+ return NULL;
+ }
+ status = dcerpc_binding_set_flags(ctx->drsuapi_binding,
+ DCERPC_SIGN | DCERPC_SEAL, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ printf("dcerpc_binding_set_flags - %s\n", nt_errstr(status));
+ return NULL;
+ }
+
+ host = dcerpc_binding_get_string_option(ctx->drsuapi_binding, "host");
+
+ ctx->ldap_url = talloc_asprintf(ctx, "ldap://%s", host);
+
+ make_nbt_name_server(&name, host);
+
+ /* do an initial name resolution to find its IP */
+ status = resolve_name_ex(lpcfg_resolve_context(tctx->lp_ctx),
+ 0, 0, &name, tctx,
+ &ctx->dest_address, tctx->ev);
+ if (!NT_STATUS_IS_OK(status)) {
+ printf("Failed to resolve %s - %s\n",
+ name.name, nt_errstr(status));
+ return NULL;
+ }
+
+ /* ctx->admin ...*/
+ ctx->admin.credentials = samba_cmdline_get_creds();
+
+ our_bind_info28 = &ctx->admin.drsuapi.our_bind_info28;
+ our_bind_info28->supported_extensions = 0xFFFFFFFF;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3;
+ our_bind_info28->site_guid = GUID_zero();
+ our_bind_info28->pid = 0;
+ our_bind_info28->repl_epoch = 1;
+
+ our_bind_info_ctr = &ctx->admin.drsuapi.our_bind_info_ctr;
+ our_bind_info_ctr->length = 28;
+ our_bind_info_ctr->info.info28 = *our_bind_info28;
+
+ GUID_from_string(DRSUAPI_DS_BIND_GUID, &ctx->admin.drsuapi.bind_guid);
+
+ ctx->admin.drsuapi.req.in.bind_guid = &ctx->admin.drsuapi.bind_guid;
+ ctx->admin.drsuapi.req.in.bind_info = our_bind_info_ctr;
+ ctx->admin.drsuapi.req.out.bind_handle = &ctx->admin.drsuapi.bind_handle;
+
+ /* ctx->new_dc ...*/
+ ctx->new_dc.credentials = samba_cmdline_get_creds();
+
+ our_bind_info28 = &ctx->new_dc.drsuapi.our_bind_info28;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_BASE;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7;
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT;
+ if (lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "xpress", false)) {
+ our_bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS;
+ }
+ our_bind_info28->site_guid = GUID_zero();
+ our_bind_info28->pid = 0;
+ our_bind_info28->repl_epoch = 0;
+
+ our_bind_info_ctr = &ctx->new_dc.drsuapi.our_bind_info_ctr;
+ our_bind_info_ctr->length = 28;
+ our_bind_info_ctr->info.info28 = *our_bind_info28;
+
+ GUID_from_string(DRSUAPI_DS_BIND_GUID_W2K3, &ctx->new_dc.drsuapi.bind_guid);
+
+ ctx->new_dc.drsuapi.req.in.bind_guid = &ctx->new_dc.drsuapi.bind_guid;
+ ctx->new_dc.drsuapi.req.in.bind_info = our_bind_info_ctr;
+ ctx->new_dc.drsuapi.req.out.bind_handle = &ctx->new_dc.drsuapi.bind_handle;
+
+ ctx->new_dc.invocation_id = ctx->new_dc.drsuapi.bind_guid;
+
+ /* ctx->old_dc ...*/
+
+ return ctx;
+}
+
+static bool _test_DsBind(struct torture_context *tctx,
+ struct DsSyncTest *ctx, struct cli_credentials *credentials, struct DsSyncBindInfo *b)
+{
+ NTSTATUS status;
+ bool ret = true;
+
+ status = dcerpc_pipe_connect_b(ctx,
+ &b->drs_pipe, ctx->drsuapi_binding,
+ &ndr_table_drsuapi,
+ credentials, tctx->ev, tctx->lp_ctx);
+
+ if (!NT_STATUS_IS_OK(status)) {
+ printf("Failed to connect to server as a BDC: %s\n", nt_errstr(status));
+ return false;
+ }
+ b->drs_handle = b->drs_pipe->binding_handle;
+
+ status = dcerpc_drsuapi_DsBind_r(b->drs_handle, ctx, &b->req);
+ if (!NT_STATUS_IS_OK(status)) {
+ const char *errstr = nt_errstr(status);
+ printf("dcerpc_drsuapi_DsBind failed - %s\n", errstr);
+ ret = false;
+ } else if (!W_ERROR_IS_OK(b->req.out.result)) {
+ printf("DsBind failed - %s\n", win_errstr(b->req.out.result));
+ ret = false;
+ }
+
+ ZERO_STRUCT(b->peer_bind_info28);
+ if (b->req.out.bind_info) {
+ switch (b->req.out.bind_info->length) {
+ case 24: {
+ struct drsuapi_DsBindInfo24 *info24;
+ info24 = &b->req.out.bind_info->info.info24;
+ b->peer_bind_info28.supported_extensions= info24->supported_extensions;
+ b->peer_bind_info28.site_guid = info24->site_guid;
+ b->peer_bind_info28.pid = info24->pid;
+ b->peer_bind_info28.repl_epoch = 0;
+ break;
+ }
+ case 28: {
+ b->peer_bind_info28 = b->req.out.bind_info->info.info28;
+ break;
+ }
+ case 32: {
+ struct drsuapi_DsBindInfo32 *info32;
+ info32 = &b->req.out.bind_info->info.info32;
+ b->peer_bind_info28.supported_extensions= info32->supported_extensions;
+ b->peer_bind_info28.site_guid = info32->site_guid;
+ b->peer_bind_info28.pid = info32->pid;
+ b->peer_bind_info28.repl_epoch = info32->repl_epoch;
+ break;
+ }
+ case 48: {
+ struct drsuapi_DsBindInfo48 *info48;
+ info48 = &b->req.out.bind_info->info.info48;
+ b->peer_bind_info28.supported_extensions= info48->supported_extensions;
+ b->peer_bind_info28.site_guid = info48->site_guid;
+ b->peer_bind_info28.pid = info48->pid;
+ b->peer_bind_info28.repl_epoch = info48->repl_epoch;
+ break;
+ }
+ case 52: {
+ struct drsuapi_DsBindInfo52 *info52;
+ info52 = &b->req.out.bind_info->info.info52;
+ b->peer_bind_info28.supported_extensions= info52->supported_extensions;
+ b->peer_bind_info28.site_guid = info52->site_guid;
+ b->peer_bind_info28.pid = info52->pid;
+ b->peer_bind_info28.repl_epoch = info52->repl_epoch;
+ break;
+ }
+ default:
+ printf("DsBind - warning: unknown BindInfo length: %u\n",
+ b->req.out.bind_info->length);
+ }
+ }
+
+ return ret;
+}
+
+static bool test_LDAPBind(struct torture_context *tctx, struct DsSyncTest *ctx,
+ struct cli_credentials *credentials, struct DsSyncLDAPInfo *l)
+{
+ bool ret = true;
+
+ struct ldb_context *ldb;
+
+ const char *modules_option[] = { "modules:paged_searches", NULL };
+ ctx->admin.ldap.ldb = ldb = ldb_init(ctx, tctx->ev);
+ if (ldb == NULL) {
+ return false;
+ }
+
+ /* Despite us loading the schema from the AD server, we need
+ * the samba handlers to get the extended DN syntax stuff */
+ ret = ldb_register_samba_handlers(ldb);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(ldb);
+ return NULL;
+ }
+
+ ldb_set_modules_dir(ldb, modules_path(ldb, "ldb"));
+
+ if (ldb_set_opaque(ldb, "credentials", credentials)) {
+ talloc_free(ldb);
+ return NULL;
+ }
+
+ if (ldb_set_opaque(ldb, "loadparm", tctx->lp_ctx)) {
+ talloc_free(ldb);
+ return NULL;
+ }
+
+ ret = ldb_connect(ldb, ctx->ldap_url, 0, modules_option);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(ldb);
+ torture_assert_int_equal(tctx, ret, LDB_SUCCESS, "Failed to make LDB connection to target");
+ }
+
+ printf("connected to LDAP: %s\n", ctx->ldap_url);
+
+ return true;
+}
+
+static bool test_GetInfo(struct torture_context *tctx, struct DsSyncTest *ctx)
+{
+ struct ldb_context *ldb = ctx->admin.ldap.ldb;
+
+ /* We must have LDB connection ready by this time */
+ SMB_ASSERT(ldb != NULL);
+
+ ctx->domain_dn = ldb_dn_get_linearized(ldb_get_default_basedn(ldb));
+ torture_assert(tctx, ctx->domain_dn != NULL, "Failed to get Domain DN");
+
+ ctx->config_dn = ldb_dn_get_linearized(ldb_get_config_basedn(ldb));
+ torture_assert(tctx, ctx->config_dn != NULL, "Failed to get Domain DN");
+
+ ctx->schema_dn = ldb_dn_get_linearized(ldb_get_schema_basedn(ldb));
+ torture_assert(tctx, ctx->schema_dn != NULL, "Failed to get Domain DN");
+
+ return true;
+}
+
+static bool test_analyse_objects(struct torture_context *tctx,
+ struct DsSyncTest *ctx,
+ const char *partition,
+ const struct drsuapi_DsReplicaOIDMapping_Ctr *mapping_ctr,
+ uint32_t object_count,
+ const struct drsuapi_DsReplicaObjectListItemEx *first_object,
+ const DATA_BLOB *gensec_skey)
+{
+ static uint32_t object_id;
+ const char *save_values_dir;
+ const struct drsuapi_DsReplicaObjectListItemEx *cur;
+ struct ldb_context *ldb = ctx->admin.ldap.ldb;
+ struct ldb_dn *deleted_dn;
+ WERROR status;
+ int i, j, ret;
+ struct dsdb_extended_replicated_objects *objs;
+ struct ldb_extended_dn_control *extended_dn_ctrl;
+ struct dsdb_schema *ldap_schema;
+ struct ldb_dn *partition_dn = ldb_dn_new(tctx, ldb, partition);
+
+ torture_assert_not_null(tctx, partition_dn, "Failed to parse partition DN as as DN");
+
+ /* load dsdb_schema using remote prefixMap */
+ torture_assert(tctx,
+ drs_util_dsdb_schema_load_ldb(tctx, ldb, mapping_ctr, false),
+ "drs_util_dsdb_schema_load_ldb() failed");
+ ldap_schema = dsdb_get_schema(ldb, NULL);
+
+ status = dsdb_replicated_objects_convert(ldb,
+ ldap_schema,
+ partition_dn,
+ mapping_ctr,
+ object_count,
+ first_object,
+ 0, NULL,
+ NULL, NULL,
+ gensec_skey,
+ 0,
+ ctx, &objs);
+ torture_assert_werr_ok(tctx, status, "dsdb_extended_replicated_objects_convert() failed!");
+
+ extended_dn_ctrl = talloc(objs, struct ldb_extended_dn_control);
+ extended_dn_ctrl->type = 1;
+
+ deleted_dn = ldb_dn_new(objs, ldb, partition);
+ ldb_dn_add_child_fmt(deleted_dn, "CN=Deleted Objects");
+
+ for (i=0; i < objs->num_objects; i++) {
+ struct ldb_request *search_req;
+ struct ldb_result *res;
+ struct ldb_message *new_msg, *drs_msg, *ldap_msg;
+ size_t num_attrs = objs->objects[i].msg->num_elements+1;
+ const char **attrs = talloc_array(objs, const char *, num_attrs);
+ for (j=0; j < objs->objects[i].msg->num_elements; j++) {
+ attrs[j] = objs->objects[i].msg->elements[j].name;
+ }
+ attrs[j] = NULL;
+ res = talloc_zero(objs, struct ldb_result);
+ if (!res) {
+ return LDB_ERR_OPERATIONS_ERROR;
+ }
+ ret = ldb_build_search_req(&search_req, ldb, objs,
+ objs->objects[i].msg->dn,
+ LDB_SCOPE_BASE,
+ NULL,
+ attrs,
+ NULL,
+ res,
+ ldb_search_default_callback,
+ NULL);
+ if (ret != LDB_SUCCESS) {
+ return false;
+ }
+ talloc_steal(search_req, res);
+ ret = ldb_request_add_control(search_req, LDB_CONTROL_SHOW_DELETED_OID, true, NULL);
+ if (ret != LDB_SUCCESS) {
+ return false;
+ }
+
+ ret = ldb_request_add_control(search_req, LDB_CONTROL_EXTENDED_DN_OID, true, extended_dn_ctrl);
+ if (ret != LDB_SUCCESS) {
+ return false;
+ }
+
+ ret = ldb_request(ldb, search_req);
+ if (ret == LDB_SUCCESS) {
+ ret = ldb_wait(search_req->handle, LDB_WAIT_ALL);
+ }
+
+ torture_assert_int_equal(tctx, ret, LDB_SUCCESS,
+ talloc_asprintf(tctx,
+ "Could not re-fetch object just delivered over DRS: %s",
+ ldb_errstring(ldb)));
+ torture_assert_int_equal(tctx, res->count, 1, "Could not re-fetch object just delivered over DRS");
+ ldap_msg = res->msgs[0];
+ for (j=0; j < ldap_msg->num_elements; j++) {
+ ldap_msg->elements[j].flags = LDB_FLAG_MOD_ADD;
+ /* For unknown reasons, there is no nTSecurityDescriptor on cn=deleted objects over LDAP, but there is over DRS! Skip it on both transports for now here so */
+ if ((ldb_attr_cmp(ldap_msg->elements[j].name, "nTSecurityDescriptor") == 0) &&
+ (ldb_dn_compare(ldap_msg->dn, deleted_dn) == 0)) {
+ ldb_msg_remove_element(ldap_msg, &ldap_msg->elements[j]);
+ /* Don't skip one */
+ j--;
+ }
+ }
+
+ ret = ldb_msg_normalize(ldb, search_req,
+ objs->objects[i].msg, &drs_msg);
+ torture_assert(tctx, ret == LDB_SUCCESS,
+ "ldb_msg_normalize() has failed");
+
+ for (j=0; j < drs_msg->num_elements; j++) {
+ if (drs_msg->elements[j].num_values == 0) {
+ ldb_msg_remove_element(drs_msg, &drs_msg->elements[j]);
+ /* Don't skip one */
+ j--;
+
+ /* For unknown reasons, there is no nTSecurityDescriptor on cn=deleted objects over LDAP, but there is over DRS! */
+ } else if ((ldb_attr_cmp(drs_msg->elements[j].name, "nTSecurityDescriptor") == 0) &&
+ (ldb_dn_compare(drs_msg->dn, deleted_dn) == 0)) {
+ ldb_msg_remove_element(drs_msg, &drs_msg->elements[j]);
+ /* Don't skip one */
+ j--;
+ } else if (ldb_attr_cmp(drs_msg->elements[j].name, "unicodePwd") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "dBCSPwd") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "userPassword") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "ntPwdHistory") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "lmPwdHistory") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "supplementalCredentials") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "priorValue") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "currentValue") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "trustAuthOutgoing") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "trustAuthIncoming") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "initialAuthOutgoing") == 0 ||
+ ldb_attr_cmp(drs_msg->elements[j].name, "initialAuthIncoming") == 0) {
+
+ /* These are not shown over LDAP, so we need to skip them for the comparison */
+ ldb_msg_remove_element(drs_msg, &drs_msg->elements[j]);
+ /* Don't skip one */
+ j--;
+ } else {
+ drs_msg->elements[j].flags = LDB_FLAG_MOD_ADD;
+ }
+ }
+
+
+ ret = ldb_msg_difference(ldb, search_req,
+ drs_msg, ldap_msg, &new_msg);
+ torture_assert(tctx, ret == LDB_SUCCESS, "ldb_msg_difference() has failed");
+ if (new_msg->num_elements != 0) {
+ char *s;
+ bool is_warning = true;
+ unsigned int idx;
+ struct ldb_message_element *el;
+ const struct dsdb_attribute * a;
+ struct ldb_ldif ldif;
+ ldif.changetype = LDB_CHANGETYPE_MODIFY;
+ ldif.msg = new_msg;
+ s = ldb_ldif_write_string(ldb, new_msg, &ldif);
+ s = talloc_asprintf(tctx, "\n# Difference in between DRS and LDAP objects: \n%s", s);
+
+ ret = ldb_msg_difference(ldb, search_req,
+ ldap_msg, drs_msg, &ldif.msg);
+ torture_assert(tctx, ret == LDB_SUCCESS, "ldb_msg_difference() has failed");
+ s = talloc_asprintf_append(s,
+ "\n# Difference in between LDAP and DRS objects: \n%s",
+ ldb_ldif_write_string(ldb, new_msg, &ldif));
+
+ s = talloc_asprintf_append(s,
+ "# Should have no objects in 'difference' message. Diff elements: %d",
+ new_msg->num_elements);
+
+ /*
+ * In case differences in messages are:
+ * 1. Attributes with different values, i.e. 'replace'
+ * 2. Those attributes are forward-link attributes
+ * then we just warn about those differences.
+ * It turns out windows doesn't send all of those values
+ * in replicated_object but in linked_attributes.
+ */
+ for (idx = 0; idx < new_msg->num_elements && is_warning; idx++) {
+ el = &new_msg->elements[idx];
+ a = dsdb_attribute_by_lDAPDisplayName(ldap_schema,
+ el->name);
+ if (LDB_FLAG_MOD_TYPE(el->flags) != LDB_FLAG_MOD_ADD &&
+ LDB_FLAG_MOD_TYPE(el->flags) != LDB_FLAG_MOD_REPLACE)
+ {
+ /* DRS only value */
+ is_warning = false;
+ } else if (a->linkID & 1) {
+ is_warning = false;
+ }
+ }
+ if (is_warning) {
+ torture_warning(tctx, "%s", s);
+ } else {
+ torture_fail(tctx, s);
+ }
+ }
+
+ /* search_req is used as a tmp talloc context in the above */
+ talloc_free(search_req);
+ }
+
+ if (!lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "print_pwd_blobs", false)) {
+ talloc_free(objs);
+ return true;
+ }
+
+ save_values_dir = lpcfg_parm_string(tctx->lp_ctx, NULL, "dssync", "save_pwd_blobs_dir");
+
+ for (cur = first_object; cur; cur = cur->next_object) {
+ const char *dn;
+ bool dn_printed = false;
+
+ if (!cur->object.identifier) continue;
+
+ dn = cur->object.identifier->dn;
+
+ for (i=0; i < cur->object.attribute_ctr.num_attributes; i++) {
+ const char *name = NULL;
+ DATA_BLOB plain_data;
+ struct drsuapi_DsReplicaAttribute *attr;
+ ndr_pull_flags_fn_t pull_fn = NULL;
+ ndr_print_fn_t print_fn = NULL;
+ void *ptr = NULL;
+ attr = &cur->object.attribute_ctr.attributes[i];
+
+ switch (attr->attid) {
+ case DRSUAPI_ATTID_dBCSPwd:
+ name = "dBCSPwd";
+ break;
+ case DRSUAPI_ATTID_unicodePwd:
+ name = "unicodePwd";
+ break;
+ case DRSUAPI_ATTID_ntPwdHistory:
+ name = "ntPwdHistory";
+ break;
+ case DRSUAPI_ATTID_lmPwdHistory:
+ name = "lmPwdHistory";
+ break;
+ case DRSUAPI_ATTID_supplementalCredentials:
+ name = "supplementalCredentials";
+ pull_fn = (ndr_pull_flags_fn_t)ndr_pull_supplementalCredentialsBlob;
+ print_fn = (ndr_print_fn_t)ndr_print_supplementalCredentialsBlob;
+ ptr = talloc(ctx, struct supplementalCredentialsBlob);
+ break;
+ case DRSUAPI_ATTID_priorValue:
+ name = "priorValue";
+ break;
+ case DRSUAPI_ATTID_currentValue:
+ name = "currentValue";
+ break;
+ case DRSUAPI_ATTID_trustAuthOutgoing:
+ name = "trustAuthOutgoing";
+ pull_fn = (ndr_pull_flags_fn_t)ndr_pull_trustAuthInOutBlob;
+ print_fn = (ndr_print_fn_t)ndr_print_trustAuthInOutBlob;
+ ptr = talloc(ctx, struct trustAuthInOutBlob);
+ break;
+ case DRSUAPI_ATTID_trustAuthIncoming:
+ name = "trustAuthIncoming";
+ pull_fn = (ndr_pull_flags_fn_t)ndr_pull_trustAuthInOutBlob;
+ print_fn = (ndr_print_fn_t)ndr_print_trustAuthInOutBlob;
+ ptr = talloc(ctx, struct trustAuthInOutBlob);
+ break;
+ case DRSUAPI_ATTID_initialAuthOutgoing:
+ name = "initialAuthOutgoing";
+ break;
+ case DRSUAPI_ATTID_initialAuthIncoming:
+ name = "initialAuthIncoming";
+ break;
+ default:
+ continue;
+ }
+
+ if (attr->value_ctr.num_values != 1) continue;
+
+ if (!attr->value_ctr.values[0].blob) continue;
+
+ plain_data = *attr->value_ctr.values[0].blob;
+
+ if (!dn_printed) {
+ object_id++;
+ DEBUG(0,("DN[%u] %s\n", object_id, dn));
+ dn_printed = true;
+ }
+ DEBUGADD(0,("ATTR: %s plain.length=%lu\n",
+ name, (long)plain_data.length));
+ if (plain_data.length) {
+ enum ndr_err_code ndr_err;
+ dump_data(0, plain_data.data, plain_data.length);
+ if (save_values_dir) {
+ char *fname;
+ fname = talloc_asprintf(ctx, "%s/%s%02d",
+ save_values_dir,
+ name, object_id);
+ if (fname) {
+ bool ok;
+ ok = file_save(fname, plain_data.data, plain_data.length);
+ if (!ok) {
+ DEBUGADD(0,("Failed to save '%s'\n", fname));
+ }
+ }
+ talloc_free(fname);
+ }
+
+ if (pull_fn) {
+ /* Can't use '_all' because of PIDL bugs with relative pointers */
+ ndr_err = ndr_pull_struct_blob(&plain_data, ptr,
+ ptr, pull_fn);
+ if (NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ (void)ndr_print_debug(1, print_fn, name, ptr, __location__, __func__);
+ } else {
+ DEBUG(0, ("Failed to decode %s\n", name));
+ }
+ }
+ }
+ talloc_free(ptr);
+ }
+ }
+ talloc_free(objs);
+ return true;
+}
+
+static bool test_GetNCChanges(struct torture_context *tctx,
+ struct DsSyncTest *ctx,
+ const char *nc_dn_str)
+{
+ NTSTATUS status;
+ bool ret = true;
+ int i, y = 0;
+ uint64_t highest_usn = 0;
+ struct drsuapi_DsGetNCChanges r;
+ union drsuapi_DsGetNCChangesRequest req;
+ struct drsuapi_DsReplicaObjectIdentifier nc;
+ struct drsuapi_DsGetNCChangesCtr1 *ctr1 = NULL;
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6 = NULL;
+ uint32_t out_level = 0;
+ struct dom_sid null_sid;
+ DATA_BLOB gensec_skey;
+ struct {
+ uint32_t level;
+ } array[] = {
+/* {
+ 5
+ },
+*/ {
+ 8
+ }
+ };
+
+ ZERO_STRUCT(null_sid);
+
+ highest_usn = lpcfg_parm_int(tctx->lp_ctx, NULL, "dssync", "highest_usn", 0);
+
+ array[0].level = lpcfg_parm_int(tctx->lp_ctx, NULL, "dssync", "get_nc_changes_level", array[0].level);
+
+ if (lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "print_pwd_blobs", false)) {
+ const struct samr_Password *nthash;
+ nthash = cli_credentials_get_nt_hash(ctx->new_dc.credentials, ctx);
+ if (nthash) {
+ dump_data_pw("CREDENTIALS nthash:", nthash->hash, sizeof(nthash->hash));
+ }
+ }
+ status = gensec_session_key(ctx->new_dc.drsuapi.drs_pipe->conn->security_state.generic_state,
+ ctx, &gensec_skey);
+ if (!NT_STATUS_IS_OK(status)) {
+ printf("failed to get gensec session key: %s\n", nt_errstr(status));
+ return false;
+ }
+
+ for (i=0; i < ARRAY_SIZE(array); i++) {
+ printf("Testing DsGetNCChanges level %d\n",
+ array[i].level);
+
+ r.in.bind_handle = &ctx->new_dc.drsuapi.bind_handle;
+ r.in.level = array[i].level;
+
+ switch (r.in.level) {
+ case 5:
+ nc.guid = GUID_zero();
+ nc.sid = null_sid;
+ nc.dn = nc_dn_str;
+
+ r.in.req = &req;
+ r.in.req->req5.destination_dsa_guid = ctx->new_dc.invocation_id;
+ r.in.req->req5.source_dsa_invocation_id = GUID_zero();
+ r.in.req->req5.naming_context = &nc;
+ r.in.req->req5.highwatermark.tmp_highest_usn = highest_usn;
+ r.in.req->req5.highwatermark.reserved_usn = 0;
+ r.in.req->req5.highwatermark.highest_usn = highest_usn;
+ r.in.req->req5.uptodateness_vector = NULL;
+ r.in.req->req5.replica_flags = 0;
+ if (lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "compression", false)) {
+ r.in.req->req5.replica_flags |= DRSUAPI_DRS_USE_COMPRESSION;
+ }
+ if (lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "neighbour_writeable", true)) {
+ r.in.req->req5.replica_flags |= DRSUAPI_DRS_WRIT_REP;
+ }
+ r.in.req->req5.replica_flags |= DRSUAPI_DRS_INIT_SYNC
+ | DRSUAPI_DRS_PER_SYNC
+ | DRSUAPI_DRS_GET_ANC
+ | DRSUAPI_DRS_NEVER_SYNCED
+ ;
+ r.in.req->req5.max_object_count = 133;
+ r.in.req->req5.max_ndr_size = 1336770;
+ r.in.req->req5.extended_op = DRSUAPI_EXOP_NONE;
+ r.in.req->req5.fsmo_info = 0;
+
+ break;
+ case 8:
+ nc.guid = GUID_zero();
+ nc.sid = null_sid;
+ nc.dn = nc_dn_str;
+ /* nc.dn can be set to any other ad partition */
+
+ r.in.req = &req;
+ r.in.req->req8.destination_dsa_guid = ctx->new_dc.invocation_id;
+ r.in.req->req8.source_dsa_invocation_id = GUID_zero();
+ r.in.req->req8.naming_context = &nc;
+ r.in.req->req8.highwatermark.tmp_highest_usn = highest_usn;
+ r.in.req->req8.highwatermark.reserved_usn = 0;
+ r.in.req->req8.highwatermark.highest_usn = highest_usn;
+ r.in.req->req8.uptodateness_vector = NULL;
+ r.in.req->req8.replica_flags = 0;
+ if (lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "compression", false)) {
+ r.in.req->req8.replica_flags |= DRSUAPI_DRS_USE_COMPRESSION;
+ }
+ if (lpcfg_parm_bool(tctx->lp_ctx, NULL, "dssync", "neighbour_writeable", true)) {
+ r.in.req->req8.replica_flags |= DRSUAPI_DRS_WRIT_REP;
+ }
+ r.in.req->req8.replica_flags |= DRSUAPI_DRS_INIT_SYNC
+ | DRSUAPI_DRS_PER_SYNC
+ | DRSUAPI_DRS_GET_ANC
+ | DRSUAPI_DRS_NEVER_SYNCED
+ ;
+ r.in.req->req8.max_object_count = 402;
+ r.in.req->req8.max_ndr_size = 402116;
+
+ r.in.req->req8.extended_op = DRSUAPI_EXOP_NONE;
+ r.in.req->req8.fsmo_info = 0;
+ r.in.req->req8.partial_attribute_set = NULL;
+ r.in.req->req8.partial_attribute_set_ex = NULL;
+ r.in.req->req8.mapping_ctr.num_mappings = 0;
+ r.in.req->req8.mapping_ctr.mappings = NULL;
+
+ break;
+ }
+
+ for (y=0; ;y++) {
+ uint32_t _level = 0;
+ union drsuapi_DsGetNCChangesCtr ctr;
+
+ ZERO_STRUCT(r.out);
+
+ r.out.level_out = &_level;
+ r.out.ctr = &ctr;
+
+ if (r.in.level == 5) {
+ torture_comment(tctx,
+ "start[%d] tmp_higest_usn: %llu , highest_usn: %llu\n",
+ y,
+ (unsigned long long) r.in.req->req5.highwatermark.tmp_highest_usn,
+ (unsigned long long) r.in.req->req5.highwatermark.highest_usn);
+ }
+
+ if (r.in.level == 8) {
+ torture_comment(tctx,
+ "start[%d] tmp_higest_usn: %llu , highest_usn: %llu\n",
+ y,
+ (unsigned long long) r.in.req->req8.highwatermark.tmp_highest_usn,
+ (unsigned long long) r.in.req->req8.highwatermark.highest_usn);
+ }
+
+ status = dcerpc_drsuapi_DsGetNCChanges_r(ctx->new_dc.drsuapi.drs_handle, ctx, &r);
+ torture_drsuapi_assert_call(tctx, ctx->new_dc.drsuapi.drs_pipe, status,
+ &r, "dcerpc_drsuapi_DsGetNCChanges");
+
+ if (ret == true && *r.out.level_out == 1) {
+ out_level = 1;
+ ctr1 = &r.out.ctr->ctr1;
+ } else if (ret == true && *r.out.level_out == 2 &&
+ r.out.ctr->ctr2.mszip1.ts) {
+ out_level = 1;
+ ctr1 = &r.out.ctr->ctr2.mszip1.ts->ctr1;
+ }
+
+ if (out_level == 1) {
+ torture_comment(tctx,
+ "end[%d] tmp_highest_usn: %llu , highest_usn: %llu\n",
+ y,
+ (unsigned long long) ctr1->new_highwatermark.tmp_highest_usn,
+ (unsigned long long) ctr1->new_highwatermark.highest_usn);
+
+ if (!test_analyse_objects(tctx, ctx, nc_dn_str, &ctr1->mapping_ctr, ctr1->object_count,
+ ctr1->first_object, &gensec_skey)) {
+ return false;
+ }
+
+ if (ctr1->more_data) {
+ r.in.req->req5.highwatermark = ctr1->new_highwatermark;
+ continue;
+ }
+ }
+
+ if (ret == true && *r.out.level_out == 6) {
+ out_level = 6;
+ ctr6 = &r.out.ctr->ctr6;
+ } else if (ret == true && *r.out.level_out == 7
+ && r.out.ctr->ctr7.level == 6
+ && r.out.ctr->ctr7.type == DRSUAPI_COMPRESSION_TYPE_MSZIP
+ && r.out.ctr->ctr7.ctr.mszip6.ts) {
+ out_level = 6;
+ ctr6 = &r.out.ctr->ctr7.ctr.mszip6.ts->ctr6;
+ } else if (ret == true && *r.out.level_out == 7
+ && r.out.ctr->ctr7.level == 6
+ && r.out.ctr->ctr7.type == DRSUAPI_COMPRESSION_TYPE_WIN2K3_LZ77_DIRECT2
+ && r.out.ctr->ctr7.ctr.xpress6.ts) {
+ out_level = 6;
+ ctr6 = &r.out.ctr->ctr7.ctr.xpress6.ts->ctr6;
+ }
+
+ if (out_level == 6) {
+ torture_comment(tctx,
+ "end[%d] tmp_highest_usn: %llu , highest_usn: %llu\n",
+ y,
+ (unsigned long long) ctr6->new_highwatermark.tmp_highest_usn,
+ (unsigned long long) ctr6->new_highwatermark.highest_usn);
+
+ if (!test_analyse_objects(tctx, ctx, nc_dn_str, &ctr6->mapping_ctr, ctr6->object_count,
+ ctr6->first_object, &gensec_skey)) {
+ return false;
+ }
+
+ if (ctr6->more_data) {
+ r.in.req->req8.highwatermark = ctr6->new_highwatermark;
+ continue;
+ }
+ }
+
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Test DsGetNCChanges() DRSUAPI call against one
+ * or more Naming Contexts.
+ * Specific NC to test with may be supplied
+ * in lp_ctx configuration. If no NC is specified,
+ * it will test DsGetNCChanges() on all NCs on remote DC
+ */
+static bool test_FetchData(struct torture_context *tctx, struct DsSyncTest *ctx)
+{
+ bool ret = true;
+ size_t i, count;
+ const char *nc_dn_str;
+ const char **nc_list;
+
+ nc_list = const_str_list(str_list_make_empty(ctx));
+ torture_assert(tctx, nc_list, "Not enough memory!");
+
+ /* make a list of partitions to test with */
+ nc_dn_str = lpcfg_parm_string(tctx->lp_ctx, NULL, "dssync", "partition");
+ if (nc_dn_str == NULL) {
+ nc_list = str_list_add_const(nc_list, ctx->domain_dn);
+ nc_list = str_list_add_const(nc_list, ctx->config_dn);
+ nc_list = str_list_add_const(nc_list, ctx->schema_dn);
+ } else {
+ nc_list = str_list_add_const(nc_list, nc_dn_str);
+ }
+
+ count = str_list_length(nc_list);
+ for (i = 0; i < count && ret; i++) {
+ torture_comment(tctx, "\nNaming Context: %s\n", nc_list[i]);
+ ret = test_GetNCChanges(tctx, ctx, nc_list[i]);
+ }
+
+ talloc_free(nc_list);
+ return ret;
+}
+
+
+static bool test_FetchNT4Data(struct torture_context *tctx,
+ struct DsSyncTest *ctx)
+{
+ NTSTATUS status;
+ struct drsuapi_DsGetNT4ChangeLog r;
+ union drsuapi_DsGetNT4ChangeLogRequest req;
+ union drsuapi_DsGetNT4ChangeLogInfo info;
+ uint32_t level_out = 0;
+ DATA_BLOB cookie;
+
+ ZERO_STRUCT(cookie);
+
+ ZERO_STRUCT(r);
+ r.in.bind_handle = &ctx->new_dc.drsuapi.bind_handle;
+ r.in.level = 1;
+ r.out.info = &info;
+ r.out.level_out = &level_out;
+
+ req.req1.flags = lpcfg_parm_int(tctx->lp_ctx, NULL,
+ "dssync", "nt4changelog_flags",
+ DRSUAPI_NT4_CHANGELOG_GET_CHANGELOG |
+ DRSUAPI_NT4_CHANGELOG_GET_SERIAL_NUMBERS);
+ req.req1.preferred_maximum_length = lpcfg_parm_int(tctx->lp_ctx, NULL,
+ "dssync", "nt4changelog_preferred_len",
+ 0x00004000);
+
+ while (1) {
+ req.req1.restart_length = cookie.length;
+ req.req1.restart_data = cookie.data;
+
+ r.in.req = &req;
+
+ status = dcerpc_drsuapi_DsGetNT4ChangeLog_r(ctx->new_dc.drsuapi.drs_handle, ctx, &r);
+ if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_IMPLEMENTED)) {
+ torture_skip(tctx,
+ "DsGetNT4ChangeLog not supported: NT_STATUS_NOT_IMPLEMENTED");
+ } else if (!NT_STATUS_IS_OK(status)) {
+ const char *errstr = nt_errstr(status);
+ if (NT_STATUS_EQUAL(status, NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE)) {
+ torture_skip(tctx,
+ "DsGetNT4ChangeLog not supported: NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE");
+ }
+ torture_fail(tctx,
+ talloc_asprintf(tctx, "dcerpc_drsuapi_DsGetNT4ChangeLog failed - %s\n",
+ errstr));
+ } else if (W_ERROR_EQUAL(r.out.result, WERR_INVALID_DOMAIN_ROLE)) {
+ torture_skip(tctx,
+ "DsGetNT4ChangeLog not supported: WERR_INVALID_DOMAIN_ROLE");
+ } else if (!W_ERROR_IS_OK(r.out.result)) {
+ torture_fail(tctx,
+ talloc_asprintf(tctx, "DsGetNT4ChangeLog failed - %s\n",
+ win_errstr(r.out.result)));
+ } else if (*r.out.level_out != 1) {
+ torture_fail(tctx,
+ talloc_asprintf(tctx, "DsGetNT4ChangeLog unknown level - %u\n",
+ *r.out.level_out));
+ } else if (NT_STATUS_IS_OK(r.out.info->info1.status)) {
+ } else if (NT_STATUS_EQUAL(r.out.info->info1.status, STATUS_MORE_ENTRIES)) {
+ cookie.length = r.out.info->info1.restart_length;
+ cookie.data = r.out.info->info1.restart_data;
+ continue;
+ } else {
+ torture_fail(tctx,
+ talloc_asprintf(tctx, "DsGetNT4ChangeLog failed - %s\n",
+ nt_errstr(r.out.info->info1.status)));
+ }
+
+ break;
+ }
+
+ return true;
+}
+
+/**
+ * DSSYNC test case setup
+ */
+static bool torture_dssync_tcase_setup(struct torture_context *tctx, void **data)
+{
+ bool bret;
+ struct DsSyncTest *ctx;
+
+ *data = ctx = test_create_context(tctx);
+ torture_assert(tctx, ctx, "test_create_context() failed");
+
+ bret = _test_DsBind(tctx, ctx, ctx->admin.credentials, &ctx->admin.drsuapi);
+ torture_assert(tctx, bret, "_test_DsBind() failed");
+
+ bret = test_LDAPBind(tctx, ctx, ctx->admin.credentials, &ctx->admin.ldap);
+ torture_assert(tctx, bret, "test_LDAPBind() failed");
+
+ bret = test_GetInfo(tctx, ctx);
+ torture_assert(tctx, bret, "test_GetInfo() failed");
+
+ bret = _test_DsBind(tctx, ctx, ctx->new_dc.credentials, &ctx->new_dc.drsuapi);
+ torture_assert(tctx, bret, "_test_DsBind() failed");
+
+ return true;
+}
+
+/**
+ * DSSYNC test case cleanup
+ */
+static bool torture_dssync_tcase_teardown(struct torture_context *tctx, void *data)
+{
+ struct DsSyncTest *ctx;
+ struct drsuapi_DsUnbind r;
+ struct policy_handle bind_handle;
+
+ ctx = talloc_get_type(data, struct DsSyncTest);
+
+ ZERO_STRUCT(r);
+ r.out.bind_handle = &bind_handle;
+
+ /* Unbing admin handle */
+ r.in.bind_handle = &ctx->admin.drsuapi.bind_handle;
+ dcerpc_drsuapi_DsUnbind_r(ctx->admin.drsuapi.drs_handle, ctx, &r);
+
+ /* Unbing new_dc handle */
+ r.in.bind_handle = &ctx->new_dc.drsuapi.bind_handle;
+ dcerpc_drsuapi_DsUnbind_r(ctx->new_dc.drsuapi.drs_handle, ctx, &r);
+
+ talloc_free(ctx);
+
+ return true;
+}
+
+/**
+ * DSSYNC test case implementation
+ */
+void torture_drs_rpc_dssync_tcase(struct torture_suite *suite)
+{
+ typedef bool (*run_func) (struct torture_context *test, void *tcase_data);
+ struct torture_tcase *tcase = torture_suite_add_tcase(suite, "dssync");
+
+ torture_tcase_set_fixture(tcase,
+ torture_dssync_tcase_setup,
+ torture_dssync_tcase_teardown);
+
+ torture_tcase_add_simple_test(tcase, "DC_FetchData", (run_func)test_FetchData);
+ torture_tcase_add_simple_test(tcase, "FetchNT4Data", (run_func)test_FetchNT4Data);
+}
+
diff --git a/source4/torture/drs/rpc/msds_intid.c b/source4/torture/drs/rpc/msds_intid.c
new file mode 100644
index 0000000..1bc5c32
--- /dev/null
+++ b/source4/torture/drs/rpc/msds_intid.c
@@ -0,0 +1,792 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ msDS-IntId attribute replication test.
+
+ Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "lib/cmdline/cmdline.h"
+#include "librpc/gen_ndr/ndr_drsuapi_c.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "libcli/cldap/cldap.h"
+#include "torture/torture.h"
+#include "../libcli/drsuapi/drsuapi.h"
+#include "auth/gensec/gensec.h"
+#include "param/param.h"
+#include "dsdb/samdb/samdb.h"
+#include "torture/rpc/torture_rpc.h"
+#include "torture/drs/proto.h"
+#include "lib/tsocket/tsocket.h"
+#include "libcli/resolve/resolve.h"
+#include "lib/util/util_paths.h"
+
+struct DsSyncBindInfo {
+ struct dcerpc_pipe *drs_pipe;
+ struct dcerpc_binding_handle *drs_handle;
+ struct drsuapi_DsBind req;
+ struct GUID bind_guid;
+ struct drsuapi_DsBindInfoCtr our_bind_info_ctr;
+ struct drsuapi_DsBindInfo28 our_bind_info28;
+ struct drsuapi_DsBindInfo28 peer_bind_info28;
+ struct policy_handle bind_handle;
+};
+
+struct DsaBindInfo {
+ struct dcerpc_binding *server_binding;
+
+ struct dcerpc_pipe *drs_pipe;
+ struct dcerpc_binding_handle *drs_handle;
+
+ DATA_BLOB gensec_skey;
+ struct drsuapi_DsBindInfo48 srv_info48;
+ struct policy_handle rpc_handle;
+};
+
+struct DsIntIdTestCtx {
+ const char *ldap_url;
+ const char *domain_dn;
+ const char *config_dn;
+ const char *schema_dn;
+
+ /* what we need to do as 'Administrator' */
+ struct cli_credentials *creds;
+ struct DsaBindInfo dsa_bind;
+ struct ldb_context *ldb;
+
+};
+
+/* Format string to create provision LDIF with */
+#define PROVISION_LDIF_FMT \
+ "###########################################################\n" \
+ "# Format string with positional params:\n" \
+ "# 1 - (int) Unique ID between 1 and 2^16\n" \
+ "# 2 - (string) Domain DN\n" \
+ "###########################################################\n" \
+ "\n" \
+ "###########################################################\n" \
+ "# Update schema\n" \
+ "###########################################################\n" \
+ "dn: CN=msds-intid-%1$d,CN=Schema,CN=Configuration,%2$s\n" \
+ "changetype: add\n" \
+ "objectClass: top\n" \
+ "objectClass: attributeSchema\n" \
+ "cn: msds-intid-%1$d\n" \
+ "attributeID: 1.3.6.1.4.1.7165.4.6.1.%1$d.1.5.9940\n" \
+ "attributeSyntax: 2.5.5.10\n" \
+ "omSyntax: 4\n" \
+ "instanceType: 4\n" \
+ "isSingleValued: TRUE\n" \
+ "systemOnly: FALSE\n" \
+ "\n" \
+ "# schemaUpdateNow\n" \
+ "DN:\n" \
+ "changeType: modify\n" \
+ "add: schemaUpdateNow\n" \
+ "schemaUpdateNow: 1\n" \
+ "-\n" \
+ "\n" \
+ "###########################################################\n" \
+ "# Update schema (with linked attribute)\n" \
+ "###########################################################\n" \
+ "dn: CN=msds-intid-link-%1$d,CN=Schema,CN=Configuration,%2$s\n" \
+ "changetype: add\n" \
+ "objectClass: top\n" \
+ "objectClass: attributeSchema\n" \
+ "cn: msds-intid-link-%1$d\n" \
+ "attributeID: 1.3.6.1.4.1.7165.4.6.1.%1$d.1.5.9941\n" \
+ "attributeSyntax: 2.5.5.1\n" \
+ "omSyntax: 127\n" \
+ "instanceType: 4\n" \
+ "isSingleValued: TRUE\n" \
+ "systemOnly: FALSE\n" \
+ "linkID: 1.2.840.113556.1.2.50\n" \
+ "\n" \
+ "# schemaUpdateNow\n" \
+ "DN:\n" \
+ "changeType: modify\n" \
+ "add: schemaUpdateNow\n" \
+ "schemaUpdateNow: 1\n" \
+ "-\n" \
+ "\n" \
+ "###########################################################\n" \
+ "# Update User class\n" \
+ "###########################################################\n" \
+ "dn: CN=User,CN=Schema,CN=Configuration,%2$s\n" \
+ "changetype: modify\n" \
+ "add: mayContain\n" \
+ "mayContain: msdsIntid%1$d\n" \
+ "mayContain: msdsIntidLink%1$d\n" \
+ "-\n" \
+ "\n" \
+ "# schemaUpdateNow\n" \
+ "DN:\n" \
+ "changeType: modify\n" \
+ "add: schemaUpdateNow\n" \
+ "schemaUpdateNow: 1\n" \
+ "-\n" \
+ "\n" \
+ "###########################################################\n" \
+ "# create user to test with\n" \
+ "###########################################################\n" \
+ "dn: CN=dsIntId_usr_%1$d,CN=Users,%2$s\n" \
+ "changetype: add\n" \
+ "objectClass: user\n" \
+ "cn: dsIntId_usr_%1$d\n" \
+ "name: dsIntId_usr_%1$d\n" \
+ "displayName: dsIntId_usr_%1$d\n" \
+ "sAMAccountName: dsIntId_usr_%1$d\n" \
+ "msdsIntid%1$d: msDS-IntId-%1$d attribute value\n" \
+ "msdsIntidLink%1$d: %2$s\n" \
+ "\n"
+
+
+static struct DsIntIdTestCtx *_dsintid_create_context(struct torture_context *tctx)
+{
+ NTSTATUS status;
+ struct DsIntIdTestCtx *ctx;
+ struct dcerpc_binding *server_binding;
+ const char *binding = torture_setting_string(tctx, "binding", NULL);
+
+ /* Create test suite context */
+ ctx = talloc_zero(tctx, struct DsIntIdTestCtx);
+ if (!ctx) {
+ torture_result(tctx, TORTURE_FAIL, "Not enough memory!");
+ return NULL;
+ }
+
+ /* parse binding object */
+ status = dcerpc_parse_binding(ctx, binding, &server_binding);
+ if (!NT_STATUS_IS_OK(status)) {
+ torture_result(tctx, TORTURE_FAIL,
+ "Bad binding string '%s': %s", binding, nt_errstr(status));
+ return NULL;
+ }
+
+ status = dcerpc_binding_set_flags(server_binding,
+ DCERPC_SIGN | DCERPC_SEAL, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ torture_result(tctx, TORTURE_FAIL,
+ "dcerpc_binding_set_flags: %s", nt_errstr(status));
+ return NULL;
+ }
+
+ /* populate test suite context */
+ ctx->creds = samba_cmdline_get_creds();
+ ctx->dsa_bind.server_binding = server_binding;
+
+ ctx->ldap_url = talloc_asprintf(ctx, "ldap://%s",
+ dcerpc_binding_get_string_option(server_binding, "host"));
+
+ return ctx;
+}
+
+static bool _test_DsaBind(struct torture_context *tctx,
+ TALLOC_CTX *mem_ctx,
+ struct cli_credentials *credentials,
+ uint32_t req_extensions,
+ struct DsaBindInfo *bi)
+{
+ NTSTATUS status;
+ struct GUID bind_guid;
+ struct drsuapi_DsBind r;
+ struct drsuapi_DsBindInfoCtr bind_info_ctr;
+ uint32_t supported_extensions;
+
+ /* make DCE RPC connection */
+ status = dcerpc_pipe_connect_b(mem_ctx,
+ &bi->drs_pipe,
+ bi->server_binding,
+ &ndr_table_drsuapi,
+ credentials, tctx->ev, tctx->lp_ctx);
+ torture_assert_ntstatus_ok(tctx, status, "Failed to connect to server");
+
+ bi->drs_handle = bi->drs_pipe->binding_handle;
+
+ status = gensec_session_key(bi->drs_pipe->conn->security_state.generic_state,
+ mem_ctx, &bi->gensec_skey);
+ torture_assert_ntstatus_ok(tctx, status, "failed to get gensec session key");
+
+ /* Bind to DRSUAPI interface */
+ GUID_from_string(DRSUAPI_DS_BIND_GUID_W2K3, &bind_guid);
+
+ /*
+ * Add flags that should be 1, according to MS docs.
+ * It turns out DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
+ * is actually required in order for GetNCChanges() to
+ * return schemaInfo entry in the prefixMap returned.
+ * Use DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION so
+ * we are able to fetch sensitive data.
+ */
+ supported_extensions = req_extensions
+ | DRSUAPI_SUPPORTED_EXTENSION_BASE
+ | DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION
+ | DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD
+ | DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
+ | DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION;
+
+ ZERO_STRUCT(bind_info_ctr);
+ bind_info_ctr.length = 28;
+ bind_info_ctr.info.info28.supported_extensions = supported_extensions;
+
+ r.in.bind_guid = &bind_guid;
+ r.in.bind_info = &bind_info_ctr;
+ r.out.bind_handle = &bi->rpc_handle;
+
+ status = dcerpc_drsuapi_DsBind_r(bi->drs_handle, mem_ctx, &r);
+ torture_drsuapi_assert_call(tctx, bi->drs_pipe, status,
+ &r, "dcerpc_drsuapi_DsBind_r");
+
+
+ switch (r.out.bind_info->length) {
+ case 24: {
+ struct drsuapi_DsBindInfo24 *info24;
+ info24 = &r.out.bind_info->info.info24;
+ bi->srv_info48.supported_extensions = info24->supported_extensions;
+ bi->srv_info48.site_guid = info24->site_guid;
+ bi->srv_info48.pid = info24->pid;
+ break;
+ }
+ case 28: {
+ struct drsuapi_DsBindInfo28 *info28;
+ info28 = &r.out.bind_info->info.info28;
+ bi->srv_info48.supported_extensions = info28->supported_extensions;
+ bi->srv_info48.site_guid = info28->site_guid;
+ bi->srv_info48.pid = info28->pid;
+ bi->srv_info48.repl_epoch = info28->repl_epoch;
+ break;
+ }
+ case 32: {
+ struct drsuapi_DsBindInfo32 *info32;
+ info32 = &r.out.bind_info->info.info32;
+ bi->srv_info48.supported_extensions = info32->supported_extensions;
+ bi->srv_info48.site_guid = info32->site_guid;
+ bi->srv_info48.pid = info32->pid;
+ bi->srv_info48.repl_epoch = info32->repl_epoch;
+ break;
+ }
+ case 48: {
+ bi->srv_info48 = r.out.bind_info->info.info48;
+ break;
+ }
+ case 52: {
+ struct drsuapi_DsBindInfo52 *info52;
+ info52 = &r.out.bind_info->info.info52;
+ bi->srv_info48.supported_extensions = info52->supported_extensions;
+ bi->srv_info48.site_guid = info52->site_guid;
+ bi->srv_info48.pid = info52->pid;
+ bi->srv_info48.repl_epoch = info52->repl_epoch;
+ break;
+ }
+ default:
+ torture_result(tctx, TORTURE_FAIL,
+ "DsBind: unknown BindInfo length: %u",
+ r.out.bind_info->length);
+ return false;
+ }
+
+ /* check if server supports extensions we've requested */
+ if ((bi->srv_info48.supported_extensions & req_extensions) != req_extensions) {
+ torture_result(tctx, TORTURE_FAIL,
+ "Server does not support requested extensions. "
+ "Requested: 0x%08X, Supported: 0x%08X",
+ req_extensions, bi->srv_info48.supported_extensions);
+ return false;
+ }
+
+ return true;
+}
+
+static bool _test_LDAPBind(struct torture_context *tctx,
+ TALLOC_CTX *mem_ctx,
+ struct cli_credentials *credentials,
+ const char *ldap_url,
+ struct ldb_context **_ldb)
+{
+ bool ret = true;
+
+ struct ldb_context *ldb;
+
+ const char *modules_option[] = { "modules:paged_searches", NULL };
+ ldb = ldb_init(mem_ctx, tctx->ev);
+ if (ldb == NULL) {
+ return false;
+ }
+
+ /* Despite us loading the schema from the AD server, we need
+ * the samba handlers to get the extended DN syntax stuff */
+ ret = ldb_register_samba_handlers(ldb);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(ldb);
+ return NULL;
+ }
+
+ ldb_set_modules_dir(ldb, modules_path(ldb, "ldb"));
+
+ if (ldb_set_opaque(ldb, "credentials", credentials) != LDB_SUCCESS) {
+ talloc_free(ldb);
+ return NULL;
+ }
+
+ if (ldb_set_opaque(ldb, "loadparm", tctx->lp_ctx) != LDB_SUCCESS) {
+ talloc_free(ldb);
+ return NULL;
+ }
+
+ ret = ldb_connect(ldb, ldap_url, 0, modules_option);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(ldb);
+ torture_assert_int_equal(tctx, ret, LDB_SUCCESS, "Failed to make LDB connection to target");
+ }
+
+ *_ldb = ldb;
+
+ return true;
+}
+
+static bool _test_provision(struct torture_context *tctx, struct DsIntIdTestCtx *ctx)
+{
+ int ret;
+ char *ldif_str;
+ const char *pstr;
+ struct ldb_ldif *ldif;
+ uint32_t attr_id;
+ struct ldb_context *ldb = ctx->ldb;
+
+ /* We must have LDB connection ready by this time */
+ SMB_ASSERT(ldb != NULL);
+
+ ctx->domain_dn = ldb_dn_get_linearized(ldb_get_default_basedn(ldb));
+ torture_assert(tctx, ctx->domain_dn != NULL, "Failed to get Domain DN");
+
+ ctx->config_dn = ldb_dn_get_linearized(ldb_get_config_basedn(ldb));
+ torture_assert(tctx, ctx->config_dn != NULL, "Failed to get Domain DN");
+
+ ctx->schema_dn = ldb_dn_get_linearized(ldb_get_schema_basedn(ldb));
+ torture_assert(tctx, ctx->schema_dn != NULL, "Failed to get Domain DN");
+
+ /* prepare LDIF to provision with */
+ attr_id = generate_random() % 0xFFFF;
+ pstr = ldif_str = talloc_asprintf(ctx, PROVISION_LDIF_FMT,
+ attr_id, ctx->domain_dn);
+
+ /* Provision test data */
+ while ((ldif = ldb_ldif_read_string(ldb, &pstr)) != NULL) {
+ switch (ldif->changetype) {
+ case LDB_CHANGETYPE_DELETE:
+ ret = ldb_delete(ldb, ldif->msg->dn);
+ break;
+ case LDB_CHANGETYPE_MODIFY:
+ ret = ldb_modify(ldb, ldif->msg);
+ break;
+ case LDB_CHANGETYPE_ADD:
+ default:
+ ret = ldb_add(ldb, ldif->msg);
+ break;
+ }
+ if (ret != LDB_SUCCESS) {
+ char *msg = talloc_asprintf(ctx,
+ "Failed to apply ldif - %s (%s): \n%s",
+ ldb_errstring(ldb),
+ ldb_strerror(ret),
+ ldb_ldif_write_string(ldb, ctx, ldif));
+ torture_fail(tctx, msg);
+
+ }
+ ldb_ldif_read_free(ldb, ldif);
+ }
+
+ return true;
+}
+
+
+static bool _test_GetNCChanges(struct torture_context *tctx,
+ struct DsaBindInfo *bi,
+ const char *nc_dn_str,
+ TALLOC_CTX *mem_ctx,
+ struct drsuapi_DsGetNCChangesCtr6 **_ctr6)
+{
+ NTSTATUS status;
+ struct drsuapi_DsGetNCChanges r;
+ union drsuapi_DsGetNCChangesRequest req;
+ struct drsuapi_DsReplicaObjectIdentifier nc;
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6_chunk = NULL;
+ struct drsuapi_DsGetNCChangesCtr6 ctr6;
+ uint32_t _level = 0;
+ union drsuapi_DsGetNCChangesCtr ctr;
+
+ struct dom_sid null_sid;
+
+ ZERO_STRUCT(null_sid);
+
+ /* fill-in Naming Context */
+ nc.guid = GUID_zero();
+ nc.sid = null_sid;
+ nc.dn = nc_dn_str;
+
+ /* fill-in request fields */
+ req.req8.destination_dsa_guid = GUID_random();
+ req.req8.source_dsa_invocation_id = GUID_zero();
+ req.req8.naming_context = &nc;
+ req.req8.highwatermark.tmp_highest_usn = 0;
+ req.req8.highwatermark.reserved_usn = 0;
+ req.req8.highwatermark.highest_usn = 0;
+ req.req8.uptodateness_vector = NULL;
+ req.req8.replica_flags = DRSUAPI_DRS_WRIT_REP
+ | DRSUAPI_DRS_INIT_SYNC
+ | DRSUAPI_DRS_PER_SYNC
+ | DRSUAPI_DRS_GET_ANC
+ | DRSUAPI_DRS_NEVER_SYNCED
+ ;
+ req.req8.max_object_count = 402;
+ req.req8.max_ndr_size = 402116;
+
+ req.req8.extended_op = DRSUAPI_EXOP_NONE;
+ req.req8.fsmo_info = 0;
+ req.req8.partial_attribute_set = NULL;
+ req.req8.partial_attribute_set_ex = NULL;
+ req.req8.mapping_ctr.num_mappings = 0;
+ req.req8.mapping_ctr.mappings = NULL;
+
+ r.in.bind_handle = &bi->rpc_handle;
+ r.in.level = 8;
+ r.in.req = &req;
+
+ ZERO_STRUCT(r.out);
+ r.out.level_out = &_level;
+ r.out.ctr = &ctr;
+
+ ZERO_STRUCT(ctr6);
+ do {
+ ZERO_STRUCT(ctr);
+
+ status = dcerpc_drsuapi_DsGetNCChanges_r(bi->drs_handle, mem_ctx, &r);
+ torture_drsuapi_assert_call(tctx, bi->drs_pipe, status,
+ &r, "dcerpc_drsuapi_DsGetNCChanges_r");
+
+ /* we expect to get level 6 reply */
+ torture_assert_int_equal(tctx, _level, 6, "Expected level 6 reply");
+
+ /* store this chunk for later use */
+ ctr6_chunk = &r.out.ctr->ctr6;
+
+ if (!ctr6.first_object) {
+ ctr6 = *ctr6_chunk;
+ } else {
+ struct drsuapi_DsReplicaObjectListItemEx *cur;
+
+ ctr6.object_count += ctr6_chunk->object_count;
+ for (cur = ctr6.first_object; cur->next_object; cur = cur->next_object) {}
+ cur->next_object = ctr6_chunk->first_object;
+
+ if (ctr6_chunk->linked_attributes_count != 0) {
+ uint32_t i;
+ ctr6.linked_attributes = talloc_realloc(mem_ctx, ctr6.linked_attributes,
+ struct drsuapi_DsReplicaLinkedAttribute,
+ ctr6.linked_attributes_count + ctr6_chunk->linked_attributes_count);
+ for (i = 0; i < ctr6_chunk->linked_attributes_count; i++) {
+ ctr6.linked_attributes[ctr6.linked_attributes_count++] = ctr6_chunk->linked_attributes[i];
+ }
+ }
+ }
+
+ /* prepare for next request */
+ r.in.req->req8.highwatermark = ctr6_chunk->new_highwatermark;
+
+ } while (ctr6_chunk->more_data);
+
+ *_ctr6 = talloc(mem_ctx, struct drsuapi_DsGetNCChangesCtr6);
+ torture_assert(mem_ctx, *_ctr6, "Not enough memory");
+ **_ctr6 = ctr6;
+
+ return true;
+}
+
+static char * _make_error_message(TALLOC_CTX *mem_ctx,
+ enum drsuapi_DsAttributeId drs_attid,
+ const struct dsdb_attribute *dsdb_attr,
+ const struct drsuapi_DsReplicaObjectIdentifier *identifier)
+{
+ return talloc_asprintf(mem_ctx, "\nInvalid ATTID for %1$s (%2$s)\n"
+ " drs_attid: %3$11d (0x%3$08X)\n"
+ " msDS_IntId: %4$11d (0x%4$08X)\n"
+ " attributeId_id: %5$11d (0x%5$08X)",
+ dsdb_attr->lDAPDisplayName,
+ identifier->dn,
+ drs_attid,
+ dsdb_attr->msDS_IntId,
+ dsdb_attr->attributeID_id);
+}
+
+/**
+ * Fetch Schema NC and check ATTID values returned.
+ * When Schema partition is replicated, ATTID
+ * should always be made using prefixMap
+ */
+static bool test_dsintid_schema(struct torture_context *tctx, struct DsIntIdTestCtx *ctx)
+{
+ uint32_t i;
+ const struct dsdb_schema *ldap_schema;
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6 = NULL;
+ const struct dsdb_attribute *dsdb_attr;
+ const struct drsuapi_DsReplicaAttribute *drs_attr;
+ const struct drsuapi_DsReplicaAttributeCtr *attr_ctr;
+ const struct drsuapi_DsReplicaObjectListItemEx *cur;
+ const struct drsuapi_DsReplicaLinkedAttribute *la;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(ctx);
+ torture_assert(tctx, mem_ctx, "Not enough memory");
+
+ /* fetch whole Schema partition */
+ torture_comment(tctx, "Fetch partition: %s\n", ctx->schema_dn);
+ if (!_test_GetNCChanges(tctx, &ctx->dsa_bind, ctx->schema_dn, mem_ctx, &ctr6)) {
+ torture_fail(tctx, "_test_GetNCChanges() failed");
+ }
+
+ /* load schema if not loaded yet */
+ torture_comment(tctx, "Loading schema...\n");
+ if (!drs_util_dsdb_schema_load_ldb(tctx, ctx->ldb, &ctr6->mapping_ctr, false)) {
+ torture_fail(tctx, "drs_util_dsdb_schema_load_ldb() failed");
+ }
+ ldap_schema = dsdb_get_schema(ctx->ldb, NULL);
+
+ /* verify ATTIDs fetched */
+ torture_comment(tctx, "Verify ATTIDs fetched\n");
+ for (cur = ctr6->first_object; cur; cur = cur->next_object) {
+ attr_ctr = &cur->object.attribute_ctr;
+ for (i = 0; i < attr_ctr->num_attributes; i++) {
+ drs_attr = &attr_ctr->attributes[i];
+ dsdb_attr = dsdb_attribute_by_attributeID_id(ldap_schema,
+ drs_attr->attid);
+
+ torture_assert(tctx,
+ drs_attr->attid == dsdb_attr->attributeID_id,
+ _make_error_message(ctx, drs_attr->attid,
+ dsdb_attr,
+ cur->object.identifier));
+ if (dsdb_attr->msDS_IntId) {
+ torture_assert(tctx,
+ drs_attr->attid != dsdb_attr->msDS_IntId,
+ _make_error_message(ctx, drs_attr->attid,
+ dsdb_attr,
+ cur->object.identifier));
+ }
+ }
+ }
+
+ /* verify ATTIDs for Linked Attributes */
+ torture_comment(tctx, "Verify ATTIDs for Linked Attributes (%u)\n",
+ ctr6->linked_attributes_count);
+ for (i = 0; i < ctr6->linked_attributes_count; i++) {
+ la = &ctr6->linked_attributes[i];
+ dsdb_attr = dsdb_attribute_by_attributeID_id(ldap_schema, la->attid);
+
+ torture_assert(tctx,
+ la->attid == dsdb_attr->attributeID_id,
+ _make_error_message(ctx, la->attid,
+ dsdb_attr,
+ la->identifier));
+ if (dsdb_attr->msDS_IntId) {
+ torture_assert(tctx,
+ la->attid != dsdb_attr->msDS_IntId,
+ _make_error_message(ctx, la->attid,
+ dsdb_attr,
+ la->identifier));
+ }
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/**
+ * Fetch non-Schema NC and check ATTID values returned.
+ * When non-Schema partition is replicated, ATTID
+ * should be msDS-IntId value for the attribute
+ * if this value exists
+ */
+static bool _test_dsintid(struct torture_context *tctx,
+ struct DsIntIdTestCtx *ctx,
+ const char *nc_dn_str)
+{
+ uint32_t i;
+ const struct dsdb_schema *ldap_schema;
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6 = NULL;
+ const struct dsdb_attribute *dsdb_attr;
+ const struct drsuapi_DsReplicaAttribute *drs_attr;
+ const struct drsuapi_DsReplicaAttributeCtr *attr_ctr;
+ const struct drsuapi_DsReplicaObjectListItemEx *cur;
+ const struct drsuapi_DsReplicaLinkedAttribute *la;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(ctx);
+ torture_assert(tctx, mem_ctx, "Not enough memory");
+
+ /* fetch whole Schema partition */
+ torture_comment(tctx, "Fetch partition: %s\n", nc_dn_str);
+ if (!_test_GetNCChanges(tctx, &ctx->dsa_bind, nc_dn_str, mem_ctx, &ctr6)) {
+ torture_fail(tctx, "_test_GetNCChanges() failed");
+ }
+
+ /* load schema if not loaded yet */
+ torture_comment(tctx, "Loading schema...\n");
+ if (!drs_util_dsdb_schema_load_ldb(tctx, ctx->ldb, &ctr6->mapping_ctr, false)) {
+ torture_fail(tctx, "drs_util_dsdb_schema_load_ldb() failed");
+ }
+ ldap_schema = dsdb_get_schema(ctx->ldb, NULL);
+
+ /* verify ATTIDs fetched */
+ torture_comment(tctx, "Verify ATTIDs fetched\n");
+ for (cur = ctr6->first_object; cur; cur = cur->next_object) {
+ attr_ctr = &cur->object.attribute_ctr;
+ for (i = 0; i < attr_ctr->num_attributes; i++) {
+ drs_attr = &attr_ctr->attributes[i];
+ dsdb_attr = dsdb_attribute_by_attributeID_id(ldap_schema,
+ drs_attr->attid);
+ if (dsdb_attr->msDS_IntId) {
+ torture_assert(tctx,
+ drs_attr->attid == dsdb_attr->msDS_IntId,
+ _make_error_message(ctx, drs_attr->attid,
+ dsdb_attr,
+ cur->object.identifier));
+ } else {
+ torture_assert(tctx,
+ drs_attr->attid == dsdb_attr->attributeID_id,
+ _make_error_message(ctx, drs_attr->attid,
+ dsdb_attr,
+ cur->object.identifier));
+ }
+ }
+ }
+
+ /* verify ATTIDs for Linked Attributes */
+ torture_comment(tctx, "Verify ATTIDs for Linked Attributes (%u)\n",
+ ctr6->linked_attributes_count);
+ for (i = 0; i < ctr6->linked_attributes_count; i++) {
+ la = &ctr6->linked_attributes[i];
+ dsdb_attr = dsdb_attribute_by_attributeID_id(ldap_schema, la->attid);
+
+ if (dsdb_attr->msDS_IntId) {
+ torture_assert(tctx,
+ la->attid == dsdb_attr->msDS_IntId,
+ _make_error_message(ctx, la->attid,
+ dsdb_attr,
+ la->identifier));
+ } else {
+ torture_assert(tctx,
+ la->attid == dsdb_attr->attributeID_id,
+ _make_error_message(ctx, la->attid,
+ dsdb_attr,
+ la->identifier));
+ }
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/**
+ * Fetch Domain NC and check ATTID values returned.
+ * When Domain partition is replicated, ATTID
+ * should be msDS-IntId value for the attribute
+ * if this value exists
+ */
+static bool test_dsintid_configuration(struct torture_context *tctx, struct DsIntIdTestCtx *ctx)
+{
+ return _test_dsintid(tctx, ctx, ctx->config_dn);
+}
+
+/**
+ * Fetch Configuration NC and check ATTID values returned.
+ * When Configuration partition is replicated, ATTID
+ * should be msDS-IntId value for the attribute
+ * if this value exists
+ */
+static bool test_dsintid_domain(struct torture_context *tctx, struct DsIntIdTestCtx *ctx)
+{
+ return _test_dsintid(tctx, ctx, ctx->domain_dn);
+}
+
+
+/**
+ * DSSYNC test case setup
+ */
+static bool torture_dsintid_tcase_setup(struct torture_context *tctx, void **data)
+{
+ bool bret;
+ struct DsIntIdTestCtx *ctx;
+
+ *data = ctx = _dsintid_create_context(tctx);
+ torture_assert(tctx, ctx, "test_create_context() failed");
+
+ bret = _test_DsaBind(tctx, ctx, ctx->creds,
+ DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8 |
+ DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6,
+ &ctx->dsa_bind);
+ torture_assert(tctx, bret, "_test_DsaBind() failed");
+
+ bret = _test_LDAPBind(tctx, ctx, ctx->creds, ctx->ldap_url, &ctx->ldb);
+ torture_assert(tctx, bret, "_test_LDAPBind() failed");
+
+ bret = _test_provision(tctx, ctx);
+ torture_assert(tctx, bret, "_test_provision() failed");
+
+ return true;
+}
+
+/**
+ * DSSYNC test case cleanup
+ */
+static bool torture_dsintid_tcase_teardown(struct torture_context *tctx, void *data)
+{
+ struct DsIntIdTestCtx *ctx;
+ struct drsuapi_DsUnbind r;
+ struct policy_handle bind_handle;
+
+ ctx = talloc_get_type(data, struct DsIntIdTestCtx);
+
+ ZERO_STRUCT(r);
+ r.out.bind_handle = &bind_handle;
+
+ /* Release DRSUAPI handle */
+ r.in.bind_handle = &ctx->dsa_bind.rpc_handle;
+ dcerpc_drsuapi_DsUnbind_r(ctx->dsa_bind.drs_handle, ctx, &r);
+
+ talloc_free(ctx);
+
+ return true;
+}
+
+/**
+ * DSSYNC test case implementation
+ */
+void torture_drs_rpc_dsintid_tcase(struct torture_suite *suite)
+{
+ typedef bool (*run_func) (struct torture_context *test, void *tcase_data);
+ struct torture_tcase *tcase = torture_suite_add_tcase(suite, "msDSIntId");
+
+ torture_tcase_set_fixture(tcase,
+ torture_dsintid_tcase_setup,
+ torture_dsintid_tcase_teardown);
+
+ torture_tcase_add_simple_test(tcase, "Schema", (run_func)test_dsintid_schema);
+ torture_tcase_add_simple_test(tcase, "Configuration", (run_func)test_dsintid_configuration);
+ torture_tcase_add_simple_test(tcase, "Domain", (run_func)test_dsintid_domain);
+}
diff --git a/source4/torture/drs/unit/prefixmap_tests.c b/source4/torture/drs/unit/prefixmap_tests.c
new file mode 100644
index 0000000..35764cd
--- /dev/null
+++ b/source4/torture/drs/unit/prefixmap_tests.c
@@ -0,0 +1,900 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ DRSUAPI prefixMap unit tests
+
+ Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2009-2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "torture/smbtorture.h"
+#include "dsdb/samdb/samdb.h"
+#include "torture/rpc/drsuapi.h"
+#include "torture/drs/proto.h"
+#include "param/param.h"
+#include "librpc/ndr/libndr.h"
+
+/**
+ * Private data to be shared among all test in Test case
+ */
+struct drsut_prefixmap_data {
+ struct dsdb_schema_prefixmap *pfm_new;
+ struct dsdb_schema_prefixmap *pfm_full;
+
+ /* default schemaInfo value to test with */
+ struct dsdb_schema_info *schi_default;
+
+ struct ldb_context *ldb_ctx;
+};
+
+/**
+ * Test-oid data structure
+ */
+struct drsut_pfm_oid_data {
+ uint32_t id;
+ const char *bin_oid;
+ const char *oid_prefix;
+};
+
+/**
+ * Default prefixMap initialization data.
+ * This prefixMap is what dsdb_schema_pfm_new() should return.
+ * Based on: MS-DRSR, 5.16.4 ATTRTYP-to-OID Conversion
+ * procedure NewPrefixTable( )
+ */
+static const struct drsut_pfm_oid_data _prefixmap_test_new_data[] = {
+ {.id=0x00000000, .bin_oid="5504", .oid_prefix="2.5.4"},
+ {.id=0x00000001, .bin_oid="5506", .oid_prefix="2.5.6"},
+ {.id=0x00000002, .bin_oid="2A864886F7140102", .oid_prefix="1.2.840.113556.1.2"},
+ {.id=0x00000003, .bin_oid="2A864886F7140103", .oid_prefix="1.2.840.113556.1.3"},
+ {.id=0x00000004, .bin_oid="6086480165020201", .oid_prefix="2.16.840.1.101.2.2.1"},
+ {.id=0x00000005, .bin_oid="6086480165020203", .oid_prefix="2.16.840.1.101.2.2.3"},
+ {.id=0x00000006, .bin_oid="6086480165020105", .oid_prefix="2.16.840.1.101.2.1.5"},
+ {.id=0x00000007, .bin_oid="6086480165020104", .oid_prefix="2.16.840.1.101.2.1.4"},
+ {.id=0x00000008, .bin_oid="5505", .oid_prefix="2.5.5"},
+ {.id=0x00000009, .bin_oid="2A864886F7140104", .oid_prefix="1.2.840.113556.1.4"},
+ {.id=0x0000000A, .bin_oid="2A864886F7140105", .oid_prefix="1.2.840.113556.1.5"},
+ {.id=0x00000013, .bin_oid="0992268993F22C64", .oid_prefix="0.9.2342.19200300.100"},
+ {.id=0x00000014, .bin_oid="6086480186F84203", .oid_prefix="2.16.840.1.113730.3"},
+ {.id=0x00000015, .bin_oid="0992268993F22C6401", .oid_prefix="0.9.2342.19200300.100.1"},
+ {.id=0x00000016, .bin_oid="6086480186F8420301", .oid_prefix="2.16.840.1.113730.3.1"},
+ {.id=0x00000017, .bin_oid="2A864886F7140105B658", .oid_prefix="1.2.840.113556.1.5.7000"},
+ {.id=0x00000018, .bin_oid="5515", .oid_prefix="2.5.21"},
+ {.id=0x00000019, .bin_oid="5512", .oid_prefix="2.5.18"},
+ {.id=0x0000001A, .bin_oid="5514", .oid_prefix="2.5.20"},
+};
+
+/**
+ * Data to be used for creating full prefix map for testing.
+ * 'full-prefixMap' is based on what w2k8 returns as a prefixMap
+ * on clean installation - i.e. prefixMap for clean Schema
+ */
+static const struct drsut_pfm_oid_data _prefixmap_full_map_data[] = {
+ {.id=0x00000000, .bin_oid="0x5504", .oid_prefix="2.5.4"},
+ {.id=0x00000001, .bin_oid="0x5506", .oid_prefix="2.5.6"},
+ {.id=0x00000002, .bin_oid="0x2A864886F7140102", .oid_prefix="1.2.840.113556.1.2"},
+ {.id=0x00000003, .bin_oid="0x2A864886F7140103", .oid_prefix="1.2.840.113556.1.3"},
+ {.id=0x00000004, .bin_oid="0x6086480165020201", .oid_prefix="2.16.840.1.101.2.2.1"},
+ {.id=0x00000005, .bin_oid="0x6086480165020203", .oid_prefix="2.16.840.1.101.2.2.3"},
+ {.id=0x00000006, .bin_oid="0x6086480165020105", .oid_prefix="2.16.840.1.101.2.1.5"},
+ {.id=0x00000007, .bin_oid="0x6086480165020104", .oid_prefix="2.16.840.1.101.2.1.4"},
+ {.id=0x00000008, .bin_oid="0x5505", .oid_prefix="2.5.5"},
+ {.id=0x00000009, .bin_oid="0x2A864886F7140104", .oid_prefix="1.2.840.113556.1.4"},
+ {.id=0x0000000a, .bin_oid="0x2A864886F7140105", .oid_prefix="1.2.840.113556.1.5"},
+ {.id=0x00000013, .bin_oid="0x0992268993F22C64", .oid_prefix="0.9.2342.19200300.100"},
+ {.id=0x00000014, .bin_oid="0x6086480186F84203", .oid_prefix="2.16.840.1.113730.3"},
+ {.id=0x00000015, .bin_oid="0x0992268993F22C6401", .oid_prefix="0.9.2342.19200300.100.1"},
+ {.id=0x00000016, .bin_oid="0x6086480186F8420301", .oid_prefix="2.16.840.1.113730.3.1"},
+ {.id=0x00000017, .bin_oid="0x2A864886F7140105B658", .oid_prefix="1.2.840.113556.1.5.7000"},
+ {.id=0x00000018, .bin_oid="0x5515", .oid_prefix="2.5.21"},
+ {.id=0x00000019, .bin_oid="0x5512", .oid_prefix="2.5.18"},
+ {.id=0x0000001a, .bin_oid="0x5514", .oid_prefix="2.5.20"},
+ {.id=0x0000000b, .bin_oid="0x2A864886F71401048204", .oid_prefix="1.2.840.113556.1.4.260"},
+ {.id=0x0000000c, .bin_oid="0x2A864886F714010538", .oid_prefix="1.2.840.113556.1.5.56"},
+ {.id=0x0000000d, .bin_oid="0x2A864886F71401048206", .oid_prefix="1.2.840.113556.1.4.262"},
+ {.id=0x0000000e, .bin_oid="0x2A864886F714010539", .oid_prefix="1.2.840.113556.1.5.57"},
+ {.id=0x0000000f, .bin_oid="0x2A864886F71401048207", .oid_prefix="1.2.840.113556.1.4.263"},
+ {.id=0x00000010, .bin_oid="0x2A864886F71401053A", .oid_prefix="1.2.840.113556.1.5.58"},
+ {.id=0x00000011, .bin_oid="0x2A864886F714010549", .oid_prefix="1.2.840.113556.1.5.73"},
+ {.id=0x00000012, .bin_oid="0x2A864886F71401048231", .oid_prefix="1.2.840.113556.1.4.305"},
+ {.id=0x0000001b, .bin_oid="0x2B060104018B3A6577", .oid_prefix="1.3.6.1.4.1.1466.101.119"},
+ {.id=0x0000001c, .bin_oid="0x6086480186F8420302", .oid_prefix="2.16.840.1.113730.3.2"},
+ {.id=0x0000001d, .bin_oid="0x2B06010401817A01", .oid_prefix="1.3.6.1.4.1.250.1"},
+ {.id=0x0000001e, .bin_oid="0x2A864886F70D0109", .oid_prefix="1.2.840.113549.1.9"},
+ {.id=0x0000001f, .bin_oid="0x0992268993F22C6404", .oid_prefix="0.9.2342.19200300.100.4"},
+ {.id=0x00000020, .bin_oid="0x2A864886F714010617", .oid_prefix="1.2.840.113556.1.6.23"},
+ {.id=0x00000021, .bin_oid="0x2A864886F71401061201", .oid_prefix="1.2.840.113556.1.6.18.1"},
+ {.id=0x00000022, .bin_oid="0x2A864886F71401061202", .oid_prefix="1.2.840.113556.1.6.18.2"},
+ {.id=0x00000023, .bin_oid="0x2A864886F71401060D03", .oid_prefix="1.2.840.113556.1.6.13.3"},
+ {.id=0x00000024, .bin_oid="0x2A864886F71401060D04", .oid_prefix="1.2.840.113556.1.6.13.4"},
+ {.id=0x00000025, .bin_oid="0x2B0601010101", .oid_prefix="1.3.6.1.1.1.1"},
+ {.id=0x00000026, .bin_oid="0x2B0601010102", .oid_prefix="1.3.6.1.1.1.2"},
+ {.id=0x000003ed, .bin_oid="0x2A864886F7140104B65866", .oid_prefix="1.2.840.113556.1.4.7000.102"},
+ {.id=0x00000428, .bin_oid="0x2A864886F7140105B6583E", .oid_prefix="1.2.840.113556.1.5.7000.62"},
+ {.id=0x0000044c, .bin_oid="0x2A864886F7140104B6586683", .oid_prefix="1.2.840.113556.1.4.7000.102:0x83"},
+ {.id=0x0000044f, .bin_oid="0x2A864886F7140104B6586681", .oid_prefix="1.2.840.113556.1.4.7000.102:0x81"},
+ {.id=0x0000047d, .bin_oid="0x2A864886F7140105B6583E81", .oid_prefix="1.2.840.113556.1.5.7000.62:0x81"},
+ {.id=0x00000561, .bin_oid="0x2A864886F7140105B6583E83", .oid_prefix="1.2.840.113556.1.5.7000.62:0x83"},
+ {.id=0x000007d1, .bin_oid="0x2A864886F71401061401", .oid_prefix="1.2.840.113556.1.6.20.1"},
+ {.id=0x000007e1, .bin_oid="0x2A864886F71401061402", .oid_prefix="1.2.840.113556.1.6.20.2"},
+ {.id=0x00001b86, .bin_oid="0x2A817A", .oid_prefix="1.2.250"},
+ {.id=0x00001c78, .bin_oid="0x2A817A81", .oid_prefix="1.2.250:0x81"},
+ {.id=0x00001c7b, .bin_oid="0x2A817A8180", .oid_prefix="1.2.250:0x8180"},
+};
+
+
+/**
+ * OID-to-ATTID mappings to be used for testing.
+ * An entry is marked as 'exists=true' if it exists in
+ * base prefixMap (_prefixmap_test_new_data)
+ */
+static const struct {
+ const char *oid;
+ uint32_t id;
+ uint32_t attid;
+ bool exists;
+} _prefixmap_test_data[] = {
+ {.oid="2.5.4.0", .id=0x00000000, .attid=0x000000, .exists=true},
+ {.oid="2.5.4.42", .id=0x00000000, .attid=0x00002a, .exists=true},
+ {.oid="1.2.840.113556.1.2.1", .id=0x00000002, .attid=0x020001, .exists=true},
+ {.oid="1.2.840.113556.1.2.13", .id=0x00000002, .attid=0x02000d, .exists=true},
+ {.oid="1.2.840.113556.1.2.281", .id=0x00000002, .attid=0x020119, .exists=true},
+ {.oid="1.2.840.113556.1.4.125", .id=0x00000009, .attid=0x09007d, .exists=true},
+ {.oid="1.2.840.113556.1.4.146", .id=0x00000009, .attid=0x090092, .exists=true},
+ {.oid="1.2.250.1", .id=0x00001b86, .attid=0x1b860001, .exists=false},
+ {.oid="1.2.250.16386", .id=0x00001c78, .attid=0x1c788002, .exists=false},
+ {.oid="1.2.250.2097154", .id=0x00001c7b, .attid=0x1c7b8002, .exists=false},
+};
+
+
+/**
+ * Creates dsdb_schema_prefixmap based on predefined data
+ */
+static WERROR _drsut_prefixmap_new(const struct drsut_pfm_oid_data *_pfm_init_data, uint32_t count,
+ TALLOC_CTX *mem_ctx, struct dsdb_schema_prefixmap **_pfm)
+{
+ uint32_t i;
+ struct dsdb_schema_prefixmap *pfm;
+
+ pfm = talloc(mem_ctx, struct dsdb_schema_prefixmap);
+ W_ERROR_HAVE_NO_MEMORY(pfm);
+
+ pfm->length = count;
+ pfm->prefixes = talloc_array(pfm, struct dsdb_schema_prefixmap_oid, pfm->length);
+ if (!pfm->prefixes) {
+ talloc_free(pfm);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ for (i = 0; i < pfm->length; i++) {
+ pfm->prefixes[i].id = _pfm_init_data[i].id;
+ pfm->prefixes[i].bin_oid = strhex_to_data_blob(pfm, _pfm_init_data[i].bin_oid);
+ if (!pfm->prefixes[i].bin_oid.data) {
+ talloc_free(pfm);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+ }
+
+ *_pfm = pfm;
+
+ return WERR_OK;
+}
+
+/**
+ * Compares two prefixMaps for being equal - same items on same indexes
+ */
+static bool _torture_drs_pfm_compare_same(struct torture_context *tctx,
+ const struct dsdb_schema_prefixmap *pfm_left,
+ const struct dsdb_schema_prefixmap *pfm_right,
+ bool quiet)
+{
+ uint32_t i;
+ char *err_msg = NULL;
+
+ if (pfm_left->length != pfm_right->length) {
+ err_msg = talloc_asprintf(tctx, "prefixMaps differ in size; left = %d, right = %d",
+ pfm_left->length, pfm_right->length);
+ goto failed;
+ }
+
+ for (i = 0; i < pfm_left->length; i++) {
+ struct dsdb_schema_prefixmap_oid *entry_left = &pfm_left->prefixes[i];
+ struct dsdb_schema_prefixmap_oid *entry_right = &pfm_right->prefixes[i];
+
+ if (entry_left->id != entry_right->id) {
+ err_msg = talloc_asprintf(tctx, "Different IDs for index=%d", i);
+ goto failed;
+ }
+ if (data_blob_cmp(&entry_left->bin_oid, &entry_right->bin_oid)) {
+ err_msg = talloc_asprintf(tctx, "Different bin_oid for index=%d", i);
+ goto failed;
+ }
+ }
+
+ return true;
+
+failed:
+ if (!quiet) {
+ torture_comment(tctx, "_torture_drs_pfm_compare_same: %s", err_msg);
+ }
+ talloc_free(err_msg);
+
+ return false;
+}
+
+/*
+ * Tests dsdb_schema_pfm_new()
+ */
+static bool torture_drs_unit_pfm_new(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ bool bret;
+ TALLOC_CTX *mem_ctx;
+ struct dsdb_schema_prefixmap *pfm = NULL;
+
+ mem_ctx = talloc_new(priv);
+
+ /* create new prefix map */
+ werr = dsdb_schema_pfm_new(mem_ctx, &pfm);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_new() failed!");
+ torture_assert(tctx, pfm != NULL, "NULL prefixMap created!");
+ torture_assert(tctx, pfm->length > 0, "Empty prefixMap created!");
+ torture_assert(tctx, pfm->prefixes != NULL, "No prefixes for newly created prefixMap!");
+
+ /* compare newly created prefixMap with template one */
+ bret = _torture_drs_pfm_compare_same(tctx, priv->pfm_new, pfm, false);
+
+ talloc_free(mem_ctx);
+
+ return bret;
+}
+
+/**
+ * Tests dsdb_schema_pfm_make_attid() using full prefixMap.
+ * In this test we know exactly which ATTID and prefixMap->ID
+ * should be returned, i.e. no prefixMap entries should be added.
+ */
+static bool torture_drs_unit_pfm_make_attid_full_map(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ uint32_t i, count;
+ uint32_t attid;
+ char *err_msg;
+
+ count = ARRAY_SIZE(_prefixmap_test_data);
+ for (i = 0; i < count; i++) {
+ werr = dsdb_schema_pfm_make_attid(priv->pfm_full, _prefixmap_test_data[i].oid, &attid);
+ /* prepare error message */
+ err_msg = talloc_asprintf(priv, "dsdb_schema_pfm_make_attid() failed with %s",
+ _prefixmap_test_data[i].oid);
+ torture_assert(tctx, err_msg, "Unexpected: Have no memory!");
+ /* verify result and returned ATTID */
+ torture_assert_werr_ok(tctx, werr, err_msg);
+ torture_assert_int_equal(tctx, attid, _prefixmap_test_data[i].attid, err_msg);
+ /* reclaim memory for prepared error message */
+ talloc_free(err_msg);
+ }
+
+ return true;
+}
+
+/**
+ * Tests dsdb_schema_pfm_make_attid() using initially small prefixMap.
+ * In this test we don't know exactly which ATTID and prefixMap->ID
+ * should be returned, but we can verify lo-word of ATTID.
+ * This test verifies implementation branch when a new
+ * prefix should be added into prefixMap.
+ */
+static bool torture_drs_unit_pfm_make_attid_small_map(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ uint32_t i, j;
+ uint32_t idx;
+ uint32_t attid, attid_2;
+ char *err_msg;
+ struct dsdb_schema_prefixmap *pfm = NULL;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(priv);
+
+ /* create new prefix map */
+ werr = dsdb_schema_pfm_new(mem_ctx, &pfm);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_new() failed!");
+
+ /* make some ATTIDs and check result */
+ for (i = 0; i < ARRAY_SIZE(_prefixmap_test_data); i++) {
+ werr = dsdb_schema_pfm_make_attid(pfm, _prefixmap_test_data[i].oid, &attid);
+
+ /* prepare error message */
+ err_msg = talloc_asprintf(mem_ctx, "dsdb_schema_pfm_make_attid() failed with %s",
+ _prefixmap_test_data[i].oid);
+ torture_assert(tctx, err_msg, "Unexpected: Have no memory!");
+
+ /* verify result and returned ATTID */
+ torture_assert_werr_ok(tctx, werr, err_msg);
+ /* verify ATTID lo-word */
+ torture_assert_int_equal(tctx, attid & 0xFFFF, _prefixmap_test_data[i].attid & 0xFFFF, err_msg);
+
+ /* try again, this time verify for whole ATTID */
+ werr = dsdb_schema_pfm_make_attid(pfm, _prefixmap_test_data[i].oid, &attid_2);
+ torture_assert_werr_ok(tctx, werr, err_msg);
+ torture_assert_int_equal(tctx, attid_2, attid, err_msg);
+
+ /* reclaim memory for prepared error message */
+ talloc_free(err_msg);
+
+ /* check there is such an index in modified prefixMap */
+ idx = (attid >> 16);
+ for (j = 0; j < pfm->length; j++) {
+ if (pfm->prefixes[j].id == idx)
+ break;
+ }
+ if (j >= pfm->length) {
+ torture_result(tctx, TORTURE_FAIL, __location__": No prefix for ATTID=0x%08X", attid);
+ return false;
+ }
+
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/**
+ * Tests dsdb_schema_pfm_attid_from_oid() using full prefixMap.
+ * In this test we know exactly which ATTID and prefixMap->ID
+ * should be returned- dsdb_schema_pfm_attid_from_oid() should succeed.
+ */
+static bool torture_drs_unit_pfm_attid_from_oid_full_map(struct torture_context *tctx,
+ struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ uint32_t i, count;
+ uint32_t attid;
+ char *err_msg;
+
+ count = ARRAY_SIZE(_prefixmap_test_data);
+ for (i = 0; i < count; i++) {
+ werr = dsdb_schema_pfm_attid_from_oid(priv->pfm_full,
+ _prefixmap_test_data[i].oid,
+ &attid);
+ /* prepare error message */
+ err_msg = talloc_asprintf(priv, "dsdb_schema_pfm_attid_from_oid() failed with %s",
+ _prefixmap_test_data[i].oid);
+ torture_assert(tctx, err_msg, "Unexpected: Have no memory!");
+ /* verify result and returned ATTID */
+ torture_assert_werr_ok(tctx, werr, err_msg);
+ torture_assert_int_equal(tctx, attid, _prefixmap_test_data[i].attid, err_msg);
+ /* reclaim memory for prepared error message */
+ talloc_free(err_msg);
+ }
+
+ return true;
+}
+
+/**
+ * Tests dsdb_schema_pfm_attid_from_oid() using base (initial) prefixMap.
+ * dsdb_schema_pfm_attid_from_oid() should fail when testing with OID
+ * that are not already in the prefixMap.
+ */
+static bool torture_drs_unit_pfm_attid_from_oid_base_map(struct torture_context *tctx,
+ struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ uint32_t i;
+ uint32_t attid;
+ char *err_msg;
+ struct dsdb_schema_prefixmap *pfm = NULL;
+ struct dsdb_schema_prefixmap pfm_prev;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(priv);
+ torture_assert(tctx, mem_ctx, "Unexpected: Have no memory!");
+
+ /* create new prefix map */
+ werr = dsdb_schema_pfm_new(mem_ctx, &pfm);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_new() failed!");
+
+ /* keep initial pfm around for testing */
+ pfm_prev = *pfm;
+ pfm_prev.prefixes = talloc_reference(mem_ctx, pfm->prefixes);
+
+ /* get some ATTIDs and check result */
+ for (i = 0; i < ARRAY_SIZE(_prefixmap_test_data); i++) {
+ werr = dsdb_schema_pfm_attid_from_oid(pfm, _prefixmap_test_data[i].oid, &attid);
+
+ /* prepare error message */
+ err_msg = talloc_asprintf(mem_ctx,
+ "dsdb_schema_pfm_attid_from_oid() failed for %s",
+ _prefixmap_test_data[i].oid);
+ torture_assert(tctx, err_msg, "Unexpected: Have no memory!");
+
+
+ /* verify pfm hasn't been altered */
+ if (_prefixmap_test_data[i].exists) {
+ /* should succeed and return valid ATTID */
+ torture_assert_werr_ok(tctx, werr, err_msg);
+ /* verify ATTID */
+ torture_assert_int_equal(tctx,
+ attid, _prefixmap_test_data[i].attid,
+ err_msg);
+ } else {
+ /* should fail */
+ torture_assert_werr_equal(tctx, werr, WERR_NOT_FOUND, err_msg);
+ }
+
+ /* prefixMap should never be changed */
+ if (!_torture_drs_pfm_compare_same(tctx, &pfm_prev, pfm, true)) {
+ torture_fail(tctx, "schema->prefixmap has changed");
+ }
+
+ /* reclaim memory for prepared error message */
+ talloc_free(err_msg);
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/**
+ * Tests dsdb_schema_pfm_oid_from_attid() using full prefixMap.
+ */
+static bool torture_drs_unit_pfm_oid_from_attid(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ uint32_t i, count;
+ char *err_msg;
+ const char *oid;
+
+ count = ARRAY_SIZE(_prefixmap_test_data);
+ for (i = 0; i < count; i++) {
+ oid = NULL;
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, _prefixmap_test_data[i].attid,
+ priv, &oid);
+ /* prepare error message */
+ err_msg = talloc_asprintf(priv, "dsdb_schema_pfm_oid_from_attid() failed with 0x%08X",
+ _prefixmap_test_data[i].attid);
+ torture_assert(tctx, err_msg, "Unexpected: Have no memory!");
+ /* verify result and returned ATTID */
+ torture_assert_werr_ok(tctx, werr, err_msg);
+ torture_assert(tctx, oid, "dsdb_schema_pfm_oid_from_attid() returned NULL OID!!!");
+ torture_assert_str_equal(tctx, oid, _prefixmap_test_data[i].oid, err_msg);
+ /* reclaim memory for prepared error message */
+ talloc_free(err_msg);
+ /* free memory for OID */
+ talloc_free(discard_const(oid));
+ }
+
+ return true;
+}
+
+/**
+ * Tests dsdb_schema_pfm_oid_from_attid() for handling
+ * correctly different type of attid values.
+ * See: MS-ADTS, 3.1.1.2.6 ATTRTYP
+ */
+static bool torture_drs_unit_pfm_oid_from_attid_check_attid(struct torture_context *tctx,
+ struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ const char *oid;
+
+ /* Test with valid prefixMap attid */
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0x00010001, tctx, &oid);
+ torture_assert_werr_ok(tctx, werr, "Testing prefixMap type attid = 0x00010001");
+
+ /* Test with valid attid but invalid index */
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0x01110001, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_DS_NO_ATTRIBUTE_OR_VALUE,
+ "Testing invalid-index attid = 0x01110001");
+
+ /* Test with attid in msDS-IntId range */
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0x80000000, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_INVALID_PARAMETER,
+ "Testing msDS-IntId type attid = 0x80000000");
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0xBFFFFFFF, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_INVALID_PARAMETER,
+ "Testing msDS-IntId type attid = 0xBFFFFFFF");
+
+ /* Test with attid in RESERVED range */
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0xC0000000, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_INVALID_PARAMETER,
+ "Testing RESERVED type attid = 0xC0000000");
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0xFFFEFFFF, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_INVALID_PARAMETER,
+ "Testing RESERVED type attid = 0xFFFEFFFF");
+
+ /* Test with attid in INTERNAL range */
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0xFFFF0000, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_INVALID_PARAMETER,
+ "Testing INTERNAL type attid = 0xFFFF0000");
+ werr = dsdb_schema_pfm_oid_from_attid(priv->pfm_full, 0xFFFFFFFF, tctx, &oid);
+ torture_assert_werr_equal(tctx, werr, WERR_INVALID_PARAMETER,
+ "Testing INTERNAL type attid = 0xFFFFFFFF");
+
+ return true;
+}
+
+/**
+ * Test Schema prefixMap conversions to/from drsuapi prefixMap
+ * representation.
+ */
+static bool torture_drs_unit_pfm_to_from_drsuapi(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ struct dsdb_schema_info *schema_info;
+ DATA_BLOB schema_info_blob;
+ struct dsdb_schema_prefixmap *pfm;
+ struct drsuapi_DsReplicaOIDMapping_Ctr *ctr;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(tctx);
+ torture_assert(tctx, mem_ctx, "Unexpected: Have no memory!");
+
+ /* convert Schema_prefixMap to drsuapi_prefixMap */
+ werr = dsdb_drsuapi_pfm_from_schema_pfm(priv->pfm_full, priv->schi_default, mem_ctx, &ctr);
+ torture_assert_werr_ok(tctx, werr, "dsdb_drsuapi_pfm_from_schema_pfm() failed");
+ torture_assert(tctx, ctr && ctr->mappings, "drsuapi_prefixMap not constructed correctly");
+ torture_assert_int_equal(tctx, ctr->num_mappings, priv->pfm_full->length + 1,
+ "drs_mappings count does not match");
+ /* look for schema_info entry - it should be the last one */
+ schema_info_blob = data_blob_const(ctr->mappings[ctr->num_mappings - 1].oid.binary_oid,
+ ctr->mappings[ctr->num_mappings - 1].oid.length);
+ werr = dsdb_schema_info_from_blob(&schema_info_blob, tctx, &schema_info);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_info_from_blob failed");
+ torture_assert_int_equal(tctx, schema_info->revision, priv->schi_default->revision,
+ "schema_info (revision) not stored correctly or not last entry");
+ torture_assert(tctx, GUID_equal(&schema_info->invocation_id, &priv->schi_default->invocation_id),
+ "schema_info (invocation_id) not stored correctly or not last entry");
+
+ /* compare schema_prefixMap and drsuapi_prefixMap */
+ werr = dsdb_schema_pfm_contains_drsuapi_pfm(priv->pfm_full, ctr);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_contains_drsuapi_pfm() failed");
+
+ /* convert back drsuapi_prefixMap to schema_prefixMap */
+ werr = dsdb_schema_pfm_from_drsuapi_pfm(ctr, true, mem_ctx, &pfm, &schema_info);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_from_drsuapi_pfm() failed");
+ torture_assert_int_equal(tctx, schema_info->revision, priv->schi_default->revision,
+ "Fetched schema_info is different (revision)");
+ torture_assert(tctx, GUID_equal(&schema_info->invocation_id, &priv->schi_default->invocation_id),
+ "Fetched schema_info is different (invocation_id)");
+
+ /* compare against the original */
+ if (!_torture_drs_pfm_compare_same(tctx, priv->pfm_full, pfm, true)) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+
+ /* test conversion with partial drsuapi_prefixMap */
+ ctr->num_mappings--;
+ werr = dsdb_schema_pfm_from_drsuapi_pfm(ctr, false, mem_ctx, &pfm, NULL);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_from_drsuapi_pfm() failed");
+ /* compare against the original */
+ if (!_torture_drs_pfm_compare_same(tctx, priv->pfm_full, pfm, false)) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+
+ talloc_free(mem_ctx);
+ return true;
+}
+
+
+/**
+ * Test Schema prefixMap conversions to/from ldb_val
+ * blob representation.
+ */
+static bool torture_drs_unit_pfm_to_from_ldb_val(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ struct dsdb_schema *schema;
+ struct ldb_val pfm_ldb_val;
+ struct ldb_val schema_info_ldb_val;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(tctx);
+ torture_assert(tctx, mem_ctx, "Unexpected: Have no memory!");
+
+ schema = dsdb_new_schema(mem_ctx);
+ torture_assert(tctx, schema, "Unexpected: failed to allocate schema object");
+
+ /* set priv->pfm_full as prefixMap for new schema object */
+ schema->prefixmap = priv->pfm_full;
+ schema->schema_info = priv->schi_default;
+
+ /* convert schema_prefixMap to ldb_val blob */
+ werr = dsdb_get_oid_mappings_ldb(schema, mem_ctx, &pfm_ldb_val, &schema_info_ldb_val);
+ torture_assert_werr_ok(tctx, werr, "dsdb_get_oid_mappings_ldb() failed");
+ torture_assert(tctx, pfm_ldb_val.data && pfm_ldb_val.length,
+ "pfm_ldb_val not constructed correctly");
+ torture_assert(tctx, schema_info_ldb_val.data && schema_info_ldb_val.length,
+ "schema_info_ldb_val not constructed correctly");
+
+ /* convert pfm_ldb_val back to schema_prefixMap */
+ schema->prefixmap = NULL;
+ schema->schema_info = NULL;
+ werr = dsdb_load_oid_mappings_ldb(schema, &pfm_ldb_val, &schema_info_ldb_val);
+ torture_assert_werr_ok(tctx, werr, "dsdb_load_oid_mappings_ldb() failed");
+ /* compare against the original */
+ if (!_torture_drs_pfm_compare_same(tctx, schema->prefixmap, priv->pfm_full, false)) {
+ talloc_free(mem_ctx);
+ return false;
+ }
+ torture_assert_int_equal(tctx, schema->schema_info->revision, priv->schi_default->revision,
+ "Fetched schema_info is different (revision)");
+ torture_assert(tctx, GUID_equal(&schema->schema_info->invocation_id, &priv->schi_default->invocation_id),
+ "Fetched schema_info is different (invocation_id)");
+
+ talloc_free(mem_ctx);
+ return true;
+}
+
+/**
+ * Test read/write in ldb implementation
+ */
+static bool torture_drs_unit_pfm_read_write_ldb(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ struct dsdb_schema *schema;
+ struct dsdb_schema_prefixmap *pfm;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(tctx);
+ torture_assert(tctx, mem_ctx, "Unexpected: Have no memory!");
+
+ /* makeup a dsdb_schema to test with */
+ schema = dsdb_new_schema(mem_ctx);
+ torture_assert(tctx, schema, "Unexpected: failed to allocate schema object");
+ /* set priv->pfm_full as prefixMap for new schema object */
+ schema->prefixmap = priv->pfm_full;
+ schema->schema_info = priv->schi_default;
+
+ /* write prfixMap to ldb */
+ werr = dsdb_write_prefixes_from_schema_to_ldb(mem_ctx, priv->ldb_ctx, schema);
+ torture_assert_werr_ok(tctx, werr, "dsdb_write_prefixes_from_schema_to_ldb() failed");
+
+ /* read from ldb what we have written */
+ werr = dsdb_read_prefixes_from_ldb(priv->ldb_ctx, mem_ctx, &pfm);
+ torture_assert_werr_ok(tctx, werr, "dsdb_read_prefixes_from_ldb() failed");
+
+ /* compare data written/read */
+ if (!_torture_drs_pfm_compare_same(tctx, schema->prefixmap, priv->pfm_full, false)) {
+ torture_fail(tctx, "prefixMap read/write in LDB is not consistent");
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/**
+ * Test dsdb_create_prefix_mapping
+ */
+static bool torture_drs_unit_dsdb_create_prefix_mapping(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ WERROR werr;
+ uint32_t i;
+ struct dsdb_schema *schema;
+ TALLOC_CTX *mem_ctx;
+ struct dsdb_schema_prefixmap *pfm_ldb = NULL;
+
+ mem_ctx = talloc_new(tctx);
+ torture_assert(tctx, mem_ctx, "Unexpected: Have no memory!");
+
+ /* makeup a dsdb_schema to test with */
+ schema = dsdb_new_schema(mem_ctx);
+ torture_assert(tctx, schema, "Unexpected: failed to allocate schema object");
+ /* set priv->pfm_full as prefixMap for new schema object */
+ schema->schema_info = priv->schi_default;
+ werr = _drsut_prefixmap_new(_prefixmap_test_new_data, ARRAY_SIZE(_prefixmap_test_new_data),
+ schema, &schema->prefixmap);
+ torture_assert_werr_ok(tctx, werr, "_drsut_prefixmap_new() failed");
+ /* write prfixMap to ldb */
+ werr = dsdb_write_prefixes_from_schema_to_ldb(mem_ctx, priv->ldb_ctx, schema);
+ torture_assert_werr_ok(tctx, werr, "dsdb_write_prefixes_from_schema_to_ldb() failed");
+
+ /* read from ldb what we have written */
+ werr = dsdb_read_prefixes_from_ldb(priv->ldb_ctx, mem_ctx, &pfm_ldb);
+ torture_assert_werr_ok(tctx, werr, "dsdb_read_prefixes_from_ldb() failed");
+ /* compare data written/read */
+ if (!_torture_drs_pfm_compare_same(tctx, schema->prefixmap, pfm_ldb, true)) {
+ torture_fail(tctx, "pfm in LDB is different");
+ }
+ TALLOC_FREE(pfm_ldb);
+
+ for (i = 0; i < ARRAY_SIZE(_prefixmap_test_data); i++) {
+ struct dsdb_schema_prefixmap *pfm_prev;
+ struct dsdb_schema_prefixmap *pfm_new;
+
+ pfm_prev = schema->prefixmap;
+
+ pfm_new = dsdb_schema_pfm_copy_shallow(schema, pfm_prev);
+ torture_assert(tctx, pfm_new != NULL, "dsdb_schema_pfm_copy_shallow() failed");
+
+ if (!_prefixmap_test_data[i].exists) {
+ uint32_t attid;
+
+ werr = dsdb_schema_pfm_make_attid(pfm_new,
+ _prefixmap_test_data[i].oid,
+ &attid);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_pfm_make_attid() failed");
+ }
+
+ /* call dsdb_create_prefix_mapping() and check result accordingly */
+ werr = dsdb_create_prefix_mapping(priv->ldb_ctx, schema, _prefixmap_test_data[i].oid);
+ torture_assert_werr_ok(tctx, werr, "dsdb_create_prefix_mapping() failed");
+
+ /*
+ * The prefix should not change, only on reload
+ */
+ torture_assert(tctx, pfm_prev == schema->prefixmap,
+ "schema->prefixmap has been reallocated!");
+ if (!_torture_drs_pfm_compare_same(tctx, pfm_prev, schema->prefixmap, true)) {
+ torture_fail(tctx, "schema->prefixmap has changed");
+ }
+
+ /* read from ldb what we have written */
+ werr = dsdb_read_prefixes_from_ldb(priv->ldb_ctx, mem_ctx, &pfm_ldb);
+ torture_assert_werr_ok(tctx, werr, "dsdb_read_prefixes_from_ldb() failed");
+ /* compare data written/read */
+ if (!_torture_drs_pfm_compare_same(tctx, pfm_new, pfm_ldb, true)) {
+ torture_fail(tctx, talloc_asprintf(tctx, "%u: pfm in LDB is different", i));
+ }
+ /* free mem for pfm read from LDB */
+ TALLOC_FREE(pfm_ldb);
+
+ /* prepare for the next round */
+ schema->prefixmap = pfm_new;
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/**
+ * Prepare temporary LDB and opens it
+ */
+static bool torture_drs_unit_ldb_setup(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ int ldb_err;
+ char *ldb_url;
+ bool bret = true;
+ TALLOC_CTX* mem_ctx;
+ char *tempdir;
+ NTSTATUS status;
+
+ mem_ctx = talloc_new(priv);
+
+ status = torture_temp_dir(tctx, "drs_", &tempdir);
+ torture_assert_ntstatus_ok(tctx, status, "creating temp dir");
+
+ ldb_url = talloc_asprintf(priv, "%s/drs_test.ldb", tempdir);
+
+ /* create LDB */
+ priv->ldb_ctx = ldb_init(priv, tctx->ev);
+ ldb_err = ldb_connect(priv->ldb_ctx, ldb_url, 0, NULL);
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE, "ldb_connect() failed");
+
+ /* set some schemaNamingContext */
+ ldb_err = ldb_set_opaque(priv->ldb_ctx,
+ "schemaNamingContext",
+ ldb_dn_new(priv->ldb_ctx, priv->ldb_ctx, "CN=Schema,CN=Config"));
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE, "ldb_set_opaque() failed");
+
+ /* add prefixMap attribute so tested layer could work properly */
+ {
+ struct ldb_message *msg = ldb_msg_new(mem_ctx);
+ msg->dn = ldb_get_schema_basedn(priv->ldb_ctx);
+ ldb_err = ldb_msg_add_string(msg, "prefixMap", "prefixMap");
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE,
+ "ldb_msg_add_string() failed");
+
+ ldb_err = ldb_add(priv->ldb_ctx, msg);
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE, "ldb_add() failed");
+ }
+
+DONE:
+ talloc_free(mem_ctx);
+ return bret;
+}
+
+/*
+ * Setup/Teardown for test case
+ */
+static bool torture_drs_unit_prefixmap_setup(struct torture_context *tctx, struct drsut_prefixmap_data **_priv)
+{
+ WERROR werr;
+ DATA_BLOB blob;
+ struct drsut_prefixmap_data *priv;
+
+ priv = *_priv = talloc_zero(tctx, struct drsut_prefixmap_data);
+ torture_assert(tctx, priv != NULL, "Not enough memory");
+
+ werr = _drsut_prefixmap_new(_prefixmap_test_new_data, ARRAY_SIZE(_prefixmap_test_new_data),
+ tctx, &priv->pfm_new);
+ torture_assert_werr_ok(tctx, werr, "failed to create pfm_new");
+
+ werr = _drsut_prefixmap_new(_prefixmap_full_map_data, ARRAY_SIZE(_prefixmap_full_map_data),
+ tctx, &priv->pfm_full);
+ torture_assert_werr_ok(tctx, werr, "failed to create pfm_test");
+
+ torture_assert(tctx, drsut_schemainfo_new(tctx, &priv->schi_default),
+ "drsut_schemainfo_new() failed");
+
+ werr = dsdb_blob_from_schema_info(priv->schi_default, priv, &blob);
+ torture_assert_werr_ok(tctx, werr, "dsdb_blob_from_schema_info() failed");
+
+ /* create temporary LDB and populate with data */
+ if (!torture_drs_unit_ldb_setup(tctx, priv)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool torture_drs_unit_prefixmap_teardown(struct torture_context *tctx, struct drsut_prefixmap_data *priv)
+{
+ talloc_free(priv);
+
+ return true;
+}
+
+/**
+ * Test case initialization for
+ * drs.unit.prefixMap
+ */
+struct torture_tcase * torture_drs_unit_prefixmap(struct torture_suite *suite)
+{
+ typedef bool (*pfn_setup)(struct torture_context *, void **);
+ typedef bool (*pfn_teardown)(struct torture_context *, void *);
+ typedef bool (*pfn_run)(struct torture_context *, void *);
+
+ struct torture_tcase * tc = torture_suite_add_tcase(suite, "prefixMap");
+
+ torture_tcase_set_fixture(tc,
+ (pfn_setup)torture_drs_unit_prefixmap_setup,
+ (pfn_teardown)torture_drs_unit_prefixmap_teardown);
+
+ tc->description = talloc_strdup(tc, "Unit tests for DRSUAPI::prefixMap implementation");
+
+ torture_tcase_add_simple_test(tc, "new", (pfn_run)torture_drs_unit_pfm_new);
+
+ torture_tcase_add_simple_test(tc, "make_attid_full_map", (pfn_run)torture_drs_unit_pfm_make_attid_full_map);
+ torture_tcase_add_simple_test(tc, "make_attid_small_map", (pfn_run)torture_drs_unit_pfm_make_attid_small_map);
+
+ torture_tcase_add_simple_test(tc, "attid_from_oid_full_map",
+ (pfn_run)torture_drs_unit_pfm_attid_from_oid_full_map);
+ torture_tcase_add_simple_test(tc, "attid_from_oid_empty_map",
+ (pfn_run)torture_drs_unit_pfm_attid_from_oid_base_map);
+
+ torture_tcase_add_simple_test(tc, "oid_from_attid_full_map", (pfn_run)torture_drs_unit_pfm_oid_from_attid);
+ torture_tcase_add_simple_test(tc, "oid_from_attid_check_attid",
+ (pfn_run)torture_drs_unit_pfm_oid_from_attid_check_attid);
+
+ torture_tcase_add_simple_test(tc, "pfm_to_from_drsuapi", (pfn_run)torture_drs_unit_pfm_to_from_drsuapi);
+
+ torture_tcase_add_simple_test(tc, "pfm_to_from_ldb_val", (pfn_run)torture_drs_unit_pfm_to_from_ldb_val);
+
+ torture_tcase_add_simple_test(tc, "pfm_read_write_ldb", (pfn_run)torture_drs_unit_pfm_read_write_ldb);
+
+ torture_tcase_add_simple_test(tc, "dsdb_create_prefix_mapping", (pfn_run)torture_drs_unit_dsdb_create_prefix_mapping);
+
+ return tc;
+}
diff --git a/source4/torture/drs/unit/schemainfo_tests.c b/source4/torture/drs/unit/schemainfo_tests.c
new file mode 100644
index 0000000..4b4cca6
--- /dev/null
+++ b/source4/torture/drs/unit/schemainfo_tests.c
@@ -0,0 +1,740 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ DRSUAPI schemaInfo unit tests
+
+ Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "torture/smbtorture.h"
+#include "dsdb/samdb/samdb.h"
+#include "dsdb/samdb/ldb_modules/util.h"
+#include "ldb_wrap.h"
+#include <ldb_module.h>
+#include "torture/rpc/drsuapi.h"
+#include "librpc/ndr/libndr.h"
+#include "param/param.h"
+#include "torture/drs/proto.h"
+#include "torture/drs/proto.h"
+
+
+/**
+ * schemaInfo to init ldb context with
+ * Rev: 0
+ * GUID: 00000000-0000-0000-0000-000000000000
+ */
+#define SCHEMA_INFO_INIT_STR "FF0000000000000000000000000000000000000000"
+
+/**
+ * Default schema_info string to be used for testing
+ * Rev: 01
+ * GUID: 071c82fd-45c7-4351-a3db-51f75a630a7f
+ */
+#define SCHEMA_INFO_DEFAULT_STR "FF00000001FD821C07C7455143A3DB51F75A630A7F"
+
+/**
+ * Schema info data to test with
+ */
+struct schemainfo_data {
+ DATA_BLOB ndr_blob;
+ struct dsdb_schema_info schi;
+ WERROR werr_expected;
+ bool test_both_ways;
+};
+
+/**
+ * Schema info test data in human-readable format (... kind of)
+ */
+static const struct {
+ const char *schema_info_str;
+ uint32_t revision;
+ const char *guid_str;
+ WERROR werr_expected;
+ bool test_both_ways;
+} _schemainfo_test_data[] = {
+ {
+ .schema_info_str = "FF0000000000000000000000000000000000000000",
+ .revision = 0,
+ .guid_str = "00000000-0000-0000-0000-000000000000",
+ .werr_expected = WERR_OK,
+ .test_both_ways = true
+ },
+ {
+ .schema_info_str = "FF00000001FD821C07C7455143A3DB51F75A630A7F",
+ .revision = 1,
+ .guid_str = "071c82fd-45c7-4351-a3db-51f75a630a7f",
+ .werr_expected = WERR_OK,
+ .test_both_ways = true
+ },
+ {
+ .schema_info_str = "FFFFFFFFFFFD821C07C7455143A3DB51F75A630A7F",
+ .revision = 0xFFFFFFFF,
+ .guid_str = "071c82fd-45c7-4351-a3db-51f75a630a7f",
+ .werr_expected = WERR_OK,
+ .test_both_ways = true
+ },
+ { /* len == 21 */
+ .schema_info_str = "FF00000001FD821C07C7455143A3DB51F75A630A7F00",
+ .revision = 1,
+ .guid_str = "071c82fd-45c7-4351-a3db-51f75a630a7f",
+ .werr_expected = WERR_INVALID_PARAMETER,
+ .test_both_ways = false
+ },
+ { /* marker == FF */
+ .schema_info_str = "AA00000001FD821C07C7455143A3DB51F75A630A7F",
+ .revision = 1,
+ .guid_str = "071c82fd-45c7-4351-a3db-51f75a630a7f",
+ .werr_expected = WERR_INVALID_PARAMETER,
+ .test_both_ways = false
+ }
+};
+
+/**
+ * Private data to be shared among all test in Test case
+ */
+struct drsut_schemainfo_data {
+ struct ldb_context *ldb;
+ struct ldb_module *ldb_module;
+ struct dsdb_schema *schema;
+
+ /* Initial schemaInfo set in ldb to test with */
+ struct dsdb_schema_info *schema_info;
+
+ uint32_t test_data_count;
+ struct schemainfo_data *test_data;
+};
+
+/**
+ * torture macro to assert for equal dsdb_schema_info's
+ */
+#define torture_assert_schema_info_equal(torture_ctx,got,expected,cmt)\
+ do { const struct dsdb_schema_info *__got = (got), *__expected = (expected); \
+ if (__got->revision != __expected->revision) { \
+ torture_result(torture_ctx, TORTURE_FAIL, \
+ __location__": "#got".revision %d did not match "#expected".revision %d: %s", \
+ (int)__got->revision, (int)__expected->revision, cmt); \
+ return false; \
+ } \
+ if (!GUID_equal(&__got->invocation_id, &__expected->invocation_id)) { \
+ torture_result(torture_ctx, TORTURE_FAIL, \
+ __location__": "#got".invocation_id did not match "#expected".invocation_id: %s", cmt); \
+ return false; \
+ } \
+ } while(0)
+
+/*
+ * forward declaration for internal functions
+ */
+static bool _drsut_ldb_schema_info_reset(struct torture_context *tctx,
+ struct ldb_context *ldb,
+ const char *schema_info_str,
+ bool in_setup);
+
+
+/**
+ * Creates dsdb_schema_info object based on NDR data
+ * passed as hex string
+ */
+static bool _drsut_schemainfo_new(struct torture_context *tctx,
+ const char *schema_info_str, struct dsdb_schema_info **_si)
+{
+ WERROR werr;
+ DATA_BLOB blob;
+
+ blob = strhex_to_data_blob(tctx, schema_info_str);
+ if (!blob.data) {
+ torture_comment(tctx, "Not enough memory!\n");
+ return false;
+ }
+
+ werr = dsdb_schema_info_from_blob(&blob, tctx, _si);
+ if (!W_ERROR_IS_OK(werr)) {
+ torture_comment(tctx,
+ "Failed to create dsdb_schema_info object for %s: %s",
+ schema_info_str,
+ win_errstr(werr));
+ return false;
+ }
+
+ data_blob_free(&blob);
+
+ return true;
+}
+
+/**
+ * Creates dsdb_schema_info object based on predefined data
+ * Function is public as it is intended to be used by other
+ * tests (e.g. prefixMap tests)
+ */
+bool drsut_schemainfo_new(struct torture_context *tctx, struct dsdb_schema_info **_si)
+{
+ return _drsut_schemainfo_new(tctx, SCHEMA_INFO_DEFAULT_STR, _si);
+}
+
+
+/*
+ * Tests dsdb_schema_info_new() and dsdb_schema_info_blob_new()
+ */
+static bool test_dsdb_schema_info_new(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ WERROR werr;
+ DATA_BLOB ndr_blob;
+ DATA_BLOB ndr_blob_expected;
+ struct dsdb_schema_info *schi;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(priv);
+ torture_assert(tctx, mem_ctx, "Not enough memory!");
+ ndr_blob_expected = strhex_to_data_blob(mem_ctx, SCHEMA_INFO_INIT_STR);
+ torture_assert(tctx, ndr_blob_expected.data, "Not enough memory!");
+
+ werr = dsdb_schema_info_new(mem_ctx, &schi);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_info_new() failed");
+ torture_assert_int_equal(tctx, schi->revision, 0,
+ "dsdb_schema_info_new() creates schemaInfo with invalid revision");
+ torture_assert(tctx, GUID_all_zero(&schi->invocation_id),
+ "dsdb_schema_info_new() creates schemaInfo with not ZERO GUID");
+
+ werr = dsdb_schema_info_blob_new(mem_ctx, &ndr_blob);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_info_blob_new() failed");
+ torture_assert_data_blob_equal(tctx, ndr_blob, ndr_blob_expected,
+ "dsdb_schema_info_blob_new() returned invalid blob");
+
+ talloc_free(mem_ctx);
+ return true;
+}
+
+/*
+ * Tests dsdb_schema_info_from_blob()
+ */
+static bool test_dsdb_schema_info_from_blob(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ uint32_t i;
+ WERROR werr;
+ char *msg;
+ struct dsdb_schema_info *schema_info;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(priv);
+ torture_assert(tctx, mem_ctx, "Not enough memory!");
+
+ for (i = 0; i < priv->test_data_count; i++) {
+ struct schemainfo_data *data = &priv->test_data[i];
+
+ msg = talloc_asprintf(tctx, "dsdb_schema_info_from_blob() [%d]-[%s]",
+ i, _schemainfo_test_data[i].schema_info_str);
+
+ werr = dsdb_schema_info_from_blob(&data->ndr_blob, mem_ctx, &schema_info);
+ torture_assert_werr_equal(tctx, werr, data->werr_expected, msg);
+
+ /* test returned data */
+ if (W_ERROR_IS_OK(werr)) {
+ torture_assert_schema_info_equal(tctx,
+ schema_info, &data->schi,
+ "after dsdb_schema_info_from_blob() call");
+ }
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+/*
+ * Tests dsdb_blob_from_schema_info()
+ */
+static bool test_dsdb_blob_from_schema_info(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ uint32_t i;
+ WERROR werr;
+ char *msg;
+ DATA_BLOB ndr_blob;
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(priv);
+ torture_assert(tctx, mem_ctx, "Not enough memory!");
+
+ for (i = 0; i < priv->test_data_count; i++) {
+ struct schemainfo_data *data = &priv->test_data[i];
+
+ /* not all test are valid reverse type of conversion */
+ if (!data->test_both_ways) {
+ continue;
+ }
+
+ msg = talloc_asprintf(tctx, "dsdb_blob_from_schema_info() [%d]-[%s]",
+ i, _schemainfo_test_data[i].schema_info_str);
+
+ werr = dsdb_blob_from_schema_info(&data->schi, mem_ctx, &ndr_blob);
+ torture_assert_werr_equal(tctx, werr, data->werr_expected, msg);
+
+ /* test returned data */
+ if (W_ERROR_IS_OK(werr)) {
+ torture_assert_data_blob_equal(tctx,
+ ndr_blob, data->ndr_blob,
+ "dsdb_blob_from_schema_info()");
+ }
+ }
+
+ talloc_free(mem_ctx);
+
+ return true;
+}
+
+static bool test_dsdb_schema_info_cmp(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ DATA_BLOB blob;
+ struct drsuapi_DsReplicaOIDMapping_Ctr *ctr;
+ struct dsdb_schema_info schema_info;
+
+ ctr = talloc_zero(priv, struct drsuapi_DsReplicaOIDMapping_Ctr);
+ torture_assert(tctx, ctr, "Not enough memory!");
+
+ /* not enough elements */
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_INVALID_PARAMETER,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* an empty element for schemaInfo */
+ ctr->num_mappings = 1;
+ ctr->mappings = talloc_zero_array(ctr, struct drsuapi_DsReplicaOIDMapping, 1);
+ torture_assert(tctx, ctr->mappings, "Not enough memory!");
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_INVALID_PARAMETER,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with invalid schemaInfo - length != 21 */
+ blob = strhex_to_data_blob(ctr, "FF00000001FD821C07C7455143A3DB51F75A630A7F00");
+ torture_assert(tctx, blob.data, "Not enough memory!");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_INVALID_PARAMETER,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with invalid schemaInfo - marker != 0xFF */
+ blob = strhex_to_data_blob(ctr, "AA00000001FD821C07C7455143A3DB51F75A630A7F");
+ torture_assert(tctx, blob.data, "Not enough memory!");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_INVALID_PARAMETER,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo, but older one should be ok */
+ blob = strhex_to_data_blob(ctr, "FF0000000000000000000000000000000000000000");
+ torture_assert(tctx, blob.data, "Not enough memory!");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_OK,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with correct schemaInfo, but invalid ATTID */
+ schema_info = *priv->schema->schema_info;
+ torture_assert_werr_ok(tctx,
+ dsdb_blob_from_schema_info(&schema_info, tctx, &blob),
+ "dsdb_blob_from_schema_info() failed");
+ ctr->mappings[0].id_prefix = 1;
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_INVALID_PARAMETER,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo */
+ ctr->mappings[0].id_prefix = 0;
+ torture_assert_werr_ok(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo, but older revision */
+ schema_info = *priv->schema->schema_info;
+ schema_info.revision -= 1;
+ torture_assert_werr_ok(tctx,
+ dsdb_blob_from_schema_info(&schema_info, tctx, &blob),
+ "dsdb_blob_from_schema_info() failed");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_OK,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo, but newer revision */
+ schema_info = *priv->schema->schema_info;
+ schema_info.revision += 1;
+ torture_assert_werr_ok(tctx,
+ dsdb_blob_from_schema_info(&schema_info, tctx, &blob),
+ "dsdb_blob_from_schema_info() failed");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_DS_DRA_SCHEMA_MISMATCH,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo, but newer revision and other invocationId */
+ schema_info = *priv->schema->schema_info;
+ schema_info.revision += 1;
+ schema_info.invocation_id.time_mid += 1;
+ torture_assert_werr_ok(tctx,
+ dsdb_blob_from_schema_info(&schema_info, tctx, &blob),
+ "dsdb_blob_from_schema_info() failed");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_DS_DRA_SCHEMA_MISMATCH,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo, but older revision and other invocationId */
+ schema_info = *priv->schema->schema_info;
+ schema_info.revision -= 1;
+ schema_info.invocation_id.time_mid += 1;
+ torture_assert_werr_ok(tctx,
+ dsdb_blob_from_schema_info(&schema_info, tctx, &blob),
+ "dsdb_blob_from_schema_info() failed");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_OK,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ /* test with valid schemaInfo, but same revision and other invocationId */
+ schema_info = *priv->schema->schema_info;
+ schema_info.invocation_id.time_mid += 1;
+ torture_assert_werr_ok(tctx,
+ dsdb_blob_from_schema_info(&schema_info, tctx, &blob),
+ "dsdb_blob_from_schema_info() failed");
+ ctr->mappings[0].oid.length = blob.length;
+ ctr->mappings[0].oid.binary_oid = blob.data;
+ torture_assert_werr_equal(tctx,
+ dsdb_schema_info_cmp(priv->schema, ctr),
+ WERR_DS_DRA_SCHEMA_CONFLICT,
+ "dsdb_schema_info_cmp(): unexpected result");
+
+ talloc_free(ctr);
+ return true;
+}
+
+/*
+ * Tests dsdb_module_schema_info_blob_read()
+ * and dsdb_module_schema_info_blob_write()
+ */
+static bool test_dsdb_module_schema_info_blob_rw(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ int ldb_err;
+ DATA_BLOB blob_write;
+ DATA_BLOB blob_read;
+
+ /* reset schmeInfo to know value */
+ torture_assert(tctx,
+ _drsut_ldb_schema_info_reset(tctx, priv->ldb, SCHEMA_INFO_INIT_STR, false),
+ "_drsut_ldb_schema_info_reset() failed");
+
+ /* write tests' default schemaInfo */
+ blob_write = strhex_to_data_blob(priv, SCHEMA_INFO_DEFAULT_STR);
+ torture_assert(tctx, blob_write.data, "Not enough memory!");
+
+ ldb_err = dsdb_module_schema_info_blob_write(priv->ldb_module,
+ DSDB_FLAG_TOP_MODULE,
+ &blob_write, NULL);
+ torture_assert_int_equal(tctx, ldb_err, LDB_SUCCESS, "dsdb_module_schema_info_blob_write() failed");
+
+ ldb_err = dsdb_module_schema_info_blob_read(priv->ldb_module, DSDB_FLAG_TOP_MODULE,
+ priv, &blob_read, NULL);
+ torture_assert_int_equal(tctx, ldb_err, LDB_SUCCESS, "dsdb_module_schema_info_blob_read() failed");
+
+ /* check if we get what we wrote */
+ torture_assert_data_blob_equal(tctx, blob_read, blob_write,
+ "Write/Read of schemeInfo blob failed");
+
+ return true;
+}
+
+/*
+ * Tests dsdb_schema_update_schema_info()
+ */
+static bool test_dsdb_module_schema_info_update(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ int ldb_err;
+ WERROR werr;
+ DATA_BLOB blob;
+ struct dsdb_schema_info *schema_info;
+
+ /* reset schmeInfo to know value */
+ torture_assert(tctx,
+ _drsut_ldb_schema_info_reset(tctx, priv->ldb, SCHEMA_INFO_INIT_STR, false),
+ "_drsut_ldb_schema_info_reset() failed");
+
+ ldb_err = dsdb_module_schema_info_update(priv->ldb_module,
+ priv->schema,
+ DSDB_FLAG_TOP_MODULE | DSDB_FLAG_AS_SYSTEM, NULL);
+ torture_assert_int_equal(tctx, ldb_err, LDB_SUCCESS, "dsdb_module_schema_info_update() failed");
+
+ /* get updated schemaInfo */
+ ldb_err = dsdb_module_schema_info_blob_read(priv->ldb_module, DSDB_FLAG_TOP_MODULE,
+ priv, &blob, NULL);
+ torture_assert_int_equal(tctx, ldb_err, LDB_SUCCESS, "dsdb_module_schema_info_blob_read() failed");
+
+ werr = dsdb_schema_info_from_blob(&blob, priv, &schema_info);
+ torture_assert_werr_ok(tctx, werr, "dsdb_schema_info_from_blob() failed");
+
+ /* check against default schema_info */
+ torture_assert_schema_info_equal(tctx, schema_info, priv->schema_info,
+ "schemaInfo attribute no updated correctly");
+
+ return true;
+}
+
+
+/**
+ * Reset schemaInfo record to know value
+ */
+static bool _drsut_ldb_schema_info_reset(struct torture_context *tctx,
+ struct ldb_context *ldb,
+ const char *schema_info_str,
+ bool in_setup)
+{
+ bool bret = true;
+ int ldb_err;
+ DATA_BLOB blob;
+ struct ldb_message *msg;
+ TALLOC_CTX *mem_ctx = talloc_new(tctx);
+
+ blob = strhex_to_data_blob(mem_ctx, schema_info_str);
+ torture_assert_goto(tctx, blob.data, bret, DONE, "Not enough memory!");
+
+ msg = ldb_msg_new(mem_ctx);
+ torture_assert_goto(tctx, msg, bret, DONE, "Not enough memory!");
+
+ msg->dn = ldb_get_schema_basedn(ldb);
+ ldb_err = ldb_msg_add_value(msg, "schemaInfo", &blob, NULL);
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE,
+ "ldb_msg_add_value() failed");
+
+ if (in_setup) {
+ ldb_err = ldb_add(ldb, msg);
+ } else {
+ ldb_err = dsdb_replace(ldb, msg, DSDB_MODIFY_PERMISSIVE);
+ }
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE,
+ "dsdb_replace() failed");
+
+DONE:
+ talloc_free(mem_ctx);
+ return bret;
+}
+
+/**
+ * Prepare temporary LDB and opens it
+ */
+static bool _drsut_ldb_setup(struct torture_context *tctx, struct drsut_schemainfo_data *priv)
+{
+ int ldb_err;
+ char *ldb_url;
+ bool bret = true;
+ char *tempdir = NULL;
+ NTSTATUS status;
+ TALLOC_CTX* mem_ctx;
+
+ mem_ctx = talloc_new(priv);
+ torture_assert(tctx, mem_ctx, "Not enough memory!");
+
+ status = torture_temp_dir(tctx, "drs_", &tempdir);
+ torture_assert_ntstatus_ok_goto(tctx, status, bret, DONE, "creating temp dir");
+
+ ldb_url = talloc_asprintf(priv, "%s/drs_schemainfo.ldb", tempdir);
+ torture_assert_goto(tctx, ldb_url, bret, DONE, "Not enough memory!");
+
+ /* create LDB */
+ priv->ldb = ldb_wrap_connect(priv, tctx->ev, tctx->lp_ctx,
+ ldb_url, NULL, NULL, 0);
+ torture_assert_goto(tctx, priv->ldb, bret, DONE, "ldb_wrap_connect() failed");
+
+ /* set some schemaNamingContext */
+ ldb_err = ldb_set_opaque(priv->ldb,
+ "schemaNamingContext",
+ ldb_dn_new(priv->ldb, priv->ldb, "CN=Schema,CN=Config"));
+ torture_assert_int_equal_goto(tctx, ldb_err, LDB_SUCCESS, bret, DONE,
+ "ldb_set_opaque() failed");
+
+ /* add schemaInfo attribute so tested layer could work properly */
+ torture_assert_goto(tctx,
+ _drsut_ldb_schema_info_reset(tctx, priv->ldb, SCHEMA_INFO_INIT_STR, true),
+ bret, DONE,
+ "_drsut_ldb_schema_info_reset() failed");
+
+DONE:
+ talloc_free(tempdir);
+ talloc_free(mem_ctx);
+ return bret;
+}
+
+/*
+ * Setup/Teardown for test case
+ */
+static bool torture_drs_unit_schemainfo_setup(struct torture_context *tctx,
+ struct drsut_schemainfo_data **_priv)
+{
+ size_t i;
+ int ldb_err;
+ NTSTATUS status;
+ DATA_BLOB ndr_blob;
+ struct GUID guid;
+ struct drsut_schemainfo_data *priv;
+
+ priv = talloc_zero(tctx, struct drsut_schemainfo_data);
+ torture_assert(tctx, priv, "Not enough memory!");
+
+ /* returned allocated pointer here
+ * teardown() will be called even in case of failure,
+ * so we'll get a changes to clean up */
+ *_priv = priv;
+
+ /* create initial schemaInfo */
+ torture_assert(tctx,
+ _drsut_schemainfo_new(tctx, SCHEMA_INFO_DEFAULT_STR, &priv->schema_info),
+ "Failed to create schema_info test object");
+
+ /* create data to test with */
+ priv->test_data_count = ARRAY_SIZE(_schemainfo_test_data);
+ priv->test_data = talloc_array(tctx, struct schemainfo_data, priv->test_data_count);
+
+ for (i = 0; i < ARRAY_SIZE(_schemainfo_test_data); i++) {
+ struct schemainfo_data *data = &priv->test_data[i];
+
+ ndr_blob = strhex_to_data_blob(priv,
+ _schemainfo_test_data[i].schema_info_str);
+ torture_assert(tctx, ndr_blob.data, "Not enough memory!");
+
+ status = GUID_from_string(_schemainfo_test_data[i].guid_str, &guid);
+ torture_assert_ntstatus_ok(tctx, status,
+ talloc_asprintf(tctx,
+ "GUID_from_string() failed for %s",
+ _schemainfo_test_data[i].guid_str));
+
+ data->ndr_blob = ndr_blob;
+ data->schi.invocation_id = guid;
+ data->schi.revision = _schemainfo_test_data[i].revision;
+ data->werr_expected = _schemainfo_test_data[i].werr_expected;
+ data->test_both_ways = _schemainfo_test_data[i].test_both_ways;
+
+ }
+
+ /* create temporary LDB and populate with data */
+ if (!_drsut_ldb_setup(tctx, priv)) {
+ return false;
+ }
+
+ /* create ldb_module mockup object */
+ priv->ldb_module = ldb_module_new(priv, priv->ldb, "schemaInfo_test_module", NULL);
+ torture_assert(tctx, priv->ldb_module, "Not enough memory!");
+
+ /* create schema mockup object */
+ priv->schema = dsdb_new_schema(priv);
+
+ /* set schema_info in dsdb_schema for testing */
+ torture_assert(tctx,
+ _drsut_schemainfo_new(tctx, SCHEMA_INFO_DEFAULT_STR, &priv->schema->schema_info),
+ "Failed to create schema_info test object");
+
+ /* pre-cache invocationId for samdb_ntds_invocation_id()
+ * to work with our mock ldb */
+ ldb_err = ldb_set_opaque(priv->ldb, "cache.invocation_id",
+ &priv->schema_info->invocation_id);
+ torture_assert_int_equal(tctx, ldb_err, LDB_SUCCESS, "ldb_set_opaque() failed");
+
+ /* Perform all tests in transactions so that
+ * underlying modify calls not to fail */
+ ldb_err = ldb_transaction_start(priv->ldb);
+ torture_assert_int_equal(tctx,
+ ldb_err,
+ LDB_SUCCESS,
+ "ldb_transaction_start() failed");
+
+ return true;
+}
+
+static bool torture_drs_unit_schemainfo_teardown(struct torture_context *tctx,
+ struct drsut_schemainfo_data *priv)
+{
+ int ldb_err;
+
+ /* commit pending transaction so we will
+ * be able to check what LDB state is */
+ ldb_err = ldb_transaction_commit(priv->ldb);
+ if (ldb_err != LDB_SUCCESS) {
+ torture_comment(tctx, "ldb_transaction_commit() - %s (%s)",
+ ldb_strerror(ldb_err),
+ ldb_errstring(priv->ldb));
+ }
+
+ talloc_free(priv);
+
+ return true;
+}
+
+/**
+ * Test case initialization for
+ * drs.unit.schemaInfo
+ */
+struct torture_tcase * torture_drs_unit_schemainfo(struct torture_suite *suite)
+{
+ typedef bool (*pfn_setup)(struct torture_context *, void **);
+ typedef bool (*pfn_teardown)(struct torture_context *, void *);
+ typedef bool (*pfn_run)(struct torture_context *, void *);
+
+ struct torture_tcase * tc = torture_suite_add_tcase(suite, "schemaInfo");
+
+ torture_tcase_set_fixture(tc,
+ (pfn_setup)torture_drs_unit_schemainfo_setup,
+ (pfn_teardown)torture_drs_unit_schemainfo_teardown);
+
+ tc->description = talloc_strdup(tc, "Unit tests for DRSUAPI::schemaInfo implementation");
+
+ torture_tcase_add_simple_test(tc, "dsdb_schema_info_new",
+ (pfn_run)test_dsdb_schema_info_new);
+ torture_tcase_add_simple_test(tc, "dsdb_schema_info_from_blob",
+ (pfn_run)test_dsdb_schema_info_from_blob);
+ torture_tcase_add_simple_test(tc, "dsdb_blob_from_schema_info",
+ (pfn_run)test_dsdb_blob_from_schema_info);
+ torture_tcase_add_simple_test(tc, "dsdb_schema_info_cmp",
+ (pfn_run)test_dsdb_schema_info_cmp);
+ torture_tcase_add_simple_test(tc, "dsdb_module_schema_info_blob read|write",
+ (pfn_run)test_dsdb_module_schema_info_blob_rw);
+ torture_tcase_add_simple_test(tc, "dsdb_module_schema_info_update",
+ (pfn_run)test_dsdb_module_schema_info_update);
+
+
+ return tc;
+}
diff --git a/source4/torture/drs/wscript_build b/source4/torture/drs/wscript_build
new file mode 100644
index 0000000..0dc26d6
--- /dev/null
+++ b/source4/torture/drs/wscript_build
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+bld.SAMBA_MODULE('TORTURE_DRS',
+ source='drs_init.c drs_util.c unit/prefixmap_tests.c unit/schemainfo_tests.c rpc/dssync.c rpc/msds_intid.c',
+ autoproto='proto.h',
+ subsystem='smbtorture',
+ init_function='torture_drs_init',
+ deps='samba-util ldb samba-errors torture ldbsamba talloc dcerpc ndr NDR_DRSUAPI gensec samba-hostconfig RPC_NDR_DRSUAPI DSDB_MODULE_HELPERS asn1util samdb NDR_DRSBLOBS samba-credentials samdb-common LIBCLI_RESOLVE LP_RESOLVE torturemain',
+ internal_module=True,
+ enabled=bld.PYTHON_BUILD_IS_ENABLED()
+ )
+