summaryrefslogtreecommitdiffstats
path: root/source4/torture/drs/python
diff options
context:
space:
mode:
Diffstat (limited to 'source4/torture/drs/python')
-rw-r--r--source4/torture/drs/python/cracknames.py204
-rw-r--r--source4/torture/drs/python/delete_object.py378
-rw-r--r--source4/torture/drs/python/drs_base.py622
-rw-r--r--source4/torture/drs/python/fsmo.py152
-rw-r--r--source4/torture/drs/python/getnc_exop.py1305
-rw-r--r--source4/torture/drs/python/getnc_schema.py308
-rw-r--r--source4/torture/drs/python/getnc_unpriv.py306
-rw-r--r--source4/torture/drs/python/getncchanges.py1427
-rw-r--r--source4/torture/drs/python/link_conflicts.py763
-rw-r--r--source4/torture/drs/python/linked_attributes_drs.py176
-rw-r--r--source4/torture/drs/python/repl_move.py2593
-rw-r--r--source4/torture/drs/python/repl_rodc.py739
-rw-r--r--source4/torture/drs/python/repl_schema.py444
-rw-r--r--source4/torture/drs/python/repl_secdesc.py400
-rw-r--r--source4/torture/drs/python/replica_sync.py747
-rw-r--r--source4/torture/drs/python/replica_sync_rodc.py155
-rw-r--r--source4/torture/drs/python/ridalloc_exop.py813
-rw-r--r--source4/torture/drs/python/samba_tool_drs.py417
-rw-r--r--source4/torture/drs/python/samba_tool_drs_critical.py98
-rw-r--r--source4/torture/drs/python/samba_tool_drs_no_dns.py175
-rw-r--r--source4/torture/drs/python/samba_tool_drs_showrepl.py333
21 files changed, 12555 insertions, 0 deletions
diff --git a/source4/torture/drs/python/cracknames.py b/source4/torture/drs/python/cracknames.py
new file mode 100644
index 0000000..f244605
--- /dev/null
+++ b/source4/torture/drs/python/cracknames.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) Catalyst .Net Ltd 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+import ldb
+import drs_base
+
+from samba.dcerpc import drsuapi
+
+
+class DrsCracknamesTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsCracknamesTestCase, self).setUp()
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+
+ self.ou = "ou=Cracknames_ou,%s" % self.ldb_dc1.get_default_basedn()
+ self.username = "Cracknames_user"
+ self.user = "cn=%s,%s" % (self.username, self.ou)
+
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+
+ self.user_record = {
+ "dn": self.user,
+ "objectclass": "user",
+ "sAMAccountName": self.username,
+ "userPrincipalName": "test@test.com",
+ "servicePrincipalName": "test/%s" % self.ldb_dc1.get_default_basedn(),
+ "displayName": "test"}
+
+ self.ldb_dc1.add(self.user_record)
+ self.ldb_dc1.delete(self.user_record["dn"])
+ self.ldb_dc1.add(self.user_record)
+
+ # The formats specified in MS-DRSR 4.1.4.13; DS_NAME_FORMAT
+ # We don't support any of the ones specified in 4.1.4.1.2.
+ self.formats = {
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_DISPLAY,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_USER_PRINCIPAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL_EX,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SID_OR_SID_HISTORY,
+ # This format is not supported by Windows (or us)
+ # drsuapi.DRSUAPI_DS_NAME_FORMAT_DNS_DOMAIN,
+ }
+
+ def tearDown(self):
+ self.ldb_dc1.delete(self.user)
+ self.ldb_dc1.delete(self.ou)
+ super(DrsCracknamesTestCase, self).tearDown()
+
+ def test_Cracknames(self):
+ """
+ Verifies that we can cracknames any of the standard formats
+ (DS_NAME_FORMAT) to a GUID, and that we can cracknames a
+ GUID to any of the standard formats.
+
+ GUID was chosen just so that we don't have to do an n^2 loop.
+ """
+ (result, ctr) = self._do_cracknames(self.user,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
+
+ user_guid = ctr.array[0].result_name
+
+ for name_format in self.formats:
+ (result, ctr) = self._do_cracknames(user_guid,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ name_format)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK,
+ "Expected 0, got %s, desired format is %s"
+ % (ctr.array[0].status, name_format))
+
+ (result, ctr) = self._do_cracknames(ctr.array[0].result_name,
+ name_format,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK,
+ "Expected 0, got %s, offered format is %s"
+ % (ctr.array[0].status, name_format))
+
+ def test_MultiValuedAttribute(self):
+ """
+ Verifies that, if we try and cracknames with the desired output
+ being a multi-valued attribute, it returns
+ DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE.
+ """
+ username = "Cracknames_user_MVA"
+ user = "cn=%s,%s" % (username, self.ou)
+
+ user_record = {
+ "dn": user,
+ "objectclass": "user",
+ "sAMAccountName": username,
+ "userPrincipalName": "test2@test.com",
+ "servicePrincipalName": ["test2/%s" % self.ldb_dc1.get_default_basedn(),
+ "test3/%s" % self.ldb_dc1.get_default_basedn()],
+ "displayName": "test2"}
+
+ self.ldb_dc1.add(user_record)
+
+ (result, ctr) = self._do_cracknames(user,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
+
+ user_guid = ctr.array[0].result_name
+
+ (result, ctr) = self._do_cracknames(user_guid,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE)
+
+ self.ldb_dc1.delete(user)
+
+ def test_NoSPNAttribute(self):
+ """
+ Verifies that, if we try and cracknames with the desired output
+ being an SPN, it returns
+ DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE.
+ """
+ username = "Cracknames_no_SPN"
+ user = "cn=%s,%s" % (username, self.ou)
+
+ user_record = {
+ "dn": user,
+ "objectclass": "user",
+ "sAMAccountName" : username,
+ "userPrincipalName" : "test4@test.com",
+ "displayName" : "test4"}
+
+ self.ldb_dc1.add(user_record)
+
+ (result, ctr) = self._do_cracknames(user,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
+
+ user_guid = ctr.array[0].result_name
+
+ (result, ctr) = self._do_cracknames(user_guid,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL)
+
+ self.assertEqual(ctr.count, 1)
+ self.assertEqual(ctr.array[0].status,
+ drsuapi.DRSUAPI_DS_NAME_STATUS_NOT_FOUND)
+
+ self.ldb_dc1.delete(user)
+
+ def _do_cracknames(self, name, format_offered, format_desired):
+ req = drsuapi.DsNameRequest1()
+ names = drsuapi.DsNameString()
+ names.str = name
+
+ req.codepage = 1252 # German, but it doesn't really matter here
+ req.language = 1033
+ req.format_flags = 0
+ req.format_offered = format_offered
+ req.format_desired = format_desired
+ req.count = 1
+ req.names = [names]
+
+ (result, ctr) = self.drs.DsCrackNames(self.drs_handle, 1, req)
+ return (result, ctr)
diff --git a/source4/torture/drs/python/delete_object.py b/source4/torture/drs/python/delete_object.py
new file mode 100644
index 0000000..3db0d0b
--- /dev/null
+++ b/source4/torture/drs/python/delete_object.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN delete_object -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import time
+
+
+from ldb import (
+ SCOPE_SUBTREE,
+)
+
+import drs_base
+import ldb
+
+
+class DrsDeleteObjectTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsDeleteObjectTestCase, self).setUp()
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ def tearDown(self):
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsDeleteObjectTestCase, self).tearDown()
+
+ def _make_username(self):
+ return "DrsDelObjUser_" + time.strftime("%s", time.gmtime())
+
+ # now also used to check the group
+ def _check_obj(self, sam_ldb, obj_orig, is_deleted):
+ # search the user by guid as it may be deleted
+ guid_str = self._GUID_string(obj_orig["objectGUID"][0])
+ expression = "(objectGUID=%s)" % guid_str
+ res = sam_ldb.search(base=self.domain_dn,
+ expression=expression,
+ controls=["show_deleted:1"])
+ self.assertEqual(len(res), 1)
+ user_cur = res[0]
+ # Deleted Object base DN
+ dodn = self._deleted_objects_dn(sam_ldb)
+ # now check properties of the user
+ cn_orig = str(obj_orig["cn"][0])
+ cn_cur = str(user_cur["cn"][0])
+ name_orig = str(obj_orig["name"][0])
+ name_cur = str(user_cur["name"][0])
+ if is_deleted:
+ self.assertEqual(str(user_cur["isDeleted"][0]), "TRUE")
+ self.assertFalse("objectCategory" in user_cur)
+ self.assertFalse("sAMAccountType" in user_cur)
+ self.assertFalse("description" in user_cur)
+ self.assertFalse("memberOf" in user_cur)
+ self.assertFalse("member" in user_cur)
+ self.assertTrue(dodn in str(user_cur["dn"]),
+ "User %s is deleted but it is not located under %s (found at %s)!" % (name_orig, dodn, user_cur["dn"]))
+ self.assertEqual(name_cur, name_orig + "\nDEL:" + guid_str)
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+ self.assertEqual(cn_cur, cn_orig + "\nDEL:" + guid_str)
+ self.assertEqual(name_cur, cn_cur)
+ else:
+ self.assertFalse("isDeleted" in user_cur)
+ self.assertEqual(name_cur, name_orig)
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+ self.assertEqual(cn_cur, cn_orig)
+ self.assertEqual(name_cur, cn_cur)
+ self.assertEqual(obj_orig["dn"], user_cur["dn"])
+ self.assertTrue(dodn not in str(user_cur["dn"]))
+ return user_cur
+
+ def test_ReplicateDeletedObject1(self):
+ """Verifies how a deleted-object is replicated between two DCs.
+ This test should verify that:
+ - deleted-object is replicated properly
+ - We verify that after replication,
+ object's state to conform to a tombstone-object state
+ - This test replicates the object modifications to
+ the server with the user deleted first
+
+ TODO: It will also be great if check replPropertyMetaData.
+ TODO: Check for deleted-object state, depending on DC's features
+ when recycle-bin is enabled
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username, password="P@sswOrd!")
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # delete user on DC1
+ self.ldb_dc1.delete(user_dn)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description or memberOf yet
+ self.assertFalse("description" in user_cur)
+ self.assertFalse("memberOf" in user_cur)
+
+ self.ldb_dc2.newgroup("group_%s" % username)
+
+ self.ldb_dc2.newgroup("group2_%s" % username)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group_orig = ldb_res[0]
+ group_dn = ldb_res[0]["dn"]
+
+ # modify user on DC2 to have a description and be a member of the group
+ m = ldb.Message()
+ m.dn = user_dn
+ m["description"] = ldb.MessageElement("a description",
+ ldb.FLAG_MOD_ADD, "description")
+ self.ldb_dc2.modify(m)
+ m = ldb.Message()
+ m.dn = group_dn
+ m["member"] = ldb.MessageElement(str(user_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group2_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group2_dn = ldb_res[0]["dn"]
+ group2_orig = ldb_res[0]
+
+ m = ldb.Message()
+ m.dn = group2_dn
+ m["member"] = ldb.MessageElement(str(group_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description yet
+ self.assertTrue("description" in user_cur)
+ self.assertTrue("memberOf" in user_cur)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+
+ # This group is a member of another group
+ self.assertTrue("memberOf" in ldb_res[0])
+
+ # The user was deleted on DC1, but check the modify we just did on DC2
+ self.assertTrue("member" in ldb_res[0])
+
+ # trigger replication from DC2 to DC1
+ # to check if deleted object gets restored
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be valid user
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+
+ # This group is a member of another group
+ self.assertTrue("memberOf" in ldb_res[0])
+
+ # The user was deleted on DC1, but the modify we did on DC2, check it never replicated in
+ self.assertFalse("member" in ldb_res[0])
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=True)
+
+ # delete group on DC1
+ self.ldb_dc1.delete(group_dn)
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check group info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=group_orig, is_deleted=True)
+ # check group info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=group_orig, is_deleted=True)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group2_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertFalse("member" in ldb_res[0])
+
+ # delete group on DC1
+ self.ldb_dc1.delete(group2_dn)
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check group info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=group2_orig, is_deleted=True)
+ # check group info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=group2_orig, is_deleted=True)
+
+ def test_ReplicateDeletedObject2(self):
+ """Verifies how a deleted-object is replicated between two DCs.
+ This test should verify that:
+ - deleted-object is replicated properly
+ - We verify that after replication,
+ object's state to conform to a tombstone-object state
+ - This test replicates the delete to the server with the
+ object modifications first
+
+ TODO: It will also be great if check replPropertyMetaData.
+ TODO: Check for deleted-object state, depending on DC's features
+ when recycle-bin is enabled
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username, password="P@sswOrd!")
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # delete user on DC1
+ self.ldb_dc1.delete(user_dn)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description or memberOf yet
+ self.assertFalse("description" in user_cur)
+ self.assertFalse("memberOf" in user_cur)
+
+ self.ldb_dc2.newgroup("group_%s" % username)
+
+ self.ldb_dc2.newgroup("group2_%s" % username)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group_orig = ldb_res[0]
+ group_dn = ldb_res[0]["dn"]
+
+ # modify user on DC2 to have a description and be a member of the group
+ m = ldb.Message()
+ m.dn = user_dn
+ m["description"] = ldb.MessageElement("a description",
+ ldb.FLAG_MOD_ADD, "description")
+ self.ldb_dc2.modify(m)
+ m = ldb.Message()
+ m.dn = group_dn
+ m["member"] = ldb.MessageElement(str(user_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group2_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("sAMAccountName" in ldb_res[0])
+ group2_dn = ldb_res[0]["dn"]
+ group2_orig = ldb_res[0]
+
+ m = ldb.Message()
+ m.dn = group2_dn
+ m["member"] = ldb.MessageElement(str(group_dn),
+ ldb.FLAG_MOD_ADD, "member")
+ self.ldb_dc2.modify(m)
+
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ # The user should not have a description yet
+ self.assertTrue("description" in user_cur)
+ self.assertTrue("memberOf" in user_cur)
+
+ # trigger replication from DC1 to DC2
+ # to check if deleted object gets restored
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=True)
+
+ ldb_res = self.ldb_dc2.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("memberOf" in ldb_res[0])
+ self.assertFalse("member" in ldb_res[0])
+
+ # trigger replication from DC2 to DC1
+ # to check if deleted object is replicated
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=True)
+ # check user info on DC2 - should be deleted
+ self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=True)
+
+ ldb_res = self.ldb_dc1.search(base=self.domain_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=group_%s)" % username)
+ self.assertTrue(len(ldb_res) == 1)
+ self.assertTrue("memberOf" in ldb_res[0])
+ self.assertFalse("member" in ldb_res[0])
+
+ # delete group on DC1
+ self.ldb_dc1.delete(group_dn)
+ self.ldb_dc1.delete(group2_dn)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
diff --git a/source4/torture/drs/python/drs_base.py b/source4/torture/drs/python/drs_base.py
new file mode 100644
index 0000000..db7a87a
--- /dev/null
+++ b/source4/torture/drs/python/drs_base.py
@@ -0,0 +1,622 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+# Copyright (C) Catalyst IT Ltd. 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import time
+import os
+import ldb
+
+sys.path.insert(0, "bin/python")
+import samba.tests
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba import dsdb
+from samba.dcerpc import drsuapi, misc, drsblobs, security
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.drs_utils import drs_DsBind
+from samba import gensec
+from ldb import (
+ SCOPE_BASE,
+ Message,
+ FLAG_MOD_REPLACE,
+)
+from samba.common import cmp
+from samba.common import get_string
+
+
+class DrsBaseTestCase(SambaToolCmdTest):
+ """Base class implementation for all DRS python tests.
+ It is intended to provide common initialization and
+ and functionality used by all DRS tests in drs/python
+ test package. For instance, DC1 and DC2 are always used
+ to pass URLs for DCs to test against"""
+
+ def setUp(self):
+ super(DrsBaseTestCase, self).setUp()
+ creds = self.get_credentials()
+ creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+
+ # connect to DCs
+ self.url_dc1 = samba.tests.env_get_var_value("DC1")
+ (self.ldb_dc1, self.info_dc1) = samba.tests.connect_samdb_ex(self.url_dc1,
+ ldap_only=True)
+ self.url_dc2 = samba.tests.env_get_var_value("DC2")
+ (self.ldb_dc2, self.info_dc2) = samba.tests.connect_samdb_ex(self.url_dc2,
+ ldap_only=True)
+ self.test_ldb_dc = self.ldb_dc1
+
+ # cache some of RootDSE props
+ self.schema_dn = str(self.info_dc1["schemaNamingContext"][0])
+ self.domain_dn = str(self.info_dc1["defaultNamingContext"][0])
+ self.config_dn = str(self.info_dc1["configurationNamingContext"][0])
+ self.forest_level = int(self.info_dc1["forestFunctionality"][0])
+
+ # we will need DCs DNS names for 'samba-tool drs' command
+ self.dnsname_dc1 = str(self.info_dc1["dnsHostName"][0])
+ self.dnsname_dc2 = str(self.info_dc2["dnsHostName"][0])
+
+ # for debugging the test code
+ self._debug = False
+
+ def tearDown(self):
+ super(DrsBaseTestCase, self).tearDown()
+
+ def set_test_ldb_dc(self, ldb_dc):
+ """Sets which DC's LDB we perform operations on during the test"""
+ self.test_ldb_dc = ldb_dc
+
+ def _GUID_string(self, guid):
+ return get_string(self.test_ldb_dc.schema_format_value("objectGUID", guid))
+
+ def _ldap_schemaUpdateNow(self, sam_db):
+ rec = {"dn": "",
+ "schemaUpdateNow": "1"}
+ m = Message.from_dict(sam_db, rec, FLAG_MOD_REPLACE)
+ sam_db.modify(m)
+
+ def _deleted_objects_dn(self, sam_ldb):
+ wkdn = "<WKGUID=18E2EA80684F11D2B9AA00C04F79F805,%s>" % self.domain_dn
+ res = sam_ldb.search(base=wkdn,
+ scope=SCOPE_BASE,
+ controls=["show_deleted:1"])
+ self.assertEqual(len(res), 1)
+ return str(res[0]["dn"])
+
+ def _lost_and_found_dn(self, sam_ldb, nc):
+ wkdn = "<WKGUID=%s,%s>" % (dsdb.DS_GUID_LOSTANDFOUND_CONTAINER, nc)
+ res = sam_ldb.search(base=wkdn,
+ scope=SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ return str(res[0]["dn"])
+
+ def _make_obj_name(self, prefix):
+ return prefix + time.strftime("%s", time.gmtime())
+
+ def _samba_tool_cmd_list(self, drs_command):
+ # make command line credentials string
+
+ # If test runs on windows then it can provide its own auth string
+ if hasattr(self, 'cmdline_auth'):
+ cmdline_auth = self.cmdline_auth
+ else:
+ ccache_name = self.get_creds_ccache_name()
+
+ # Tunnel the command line credentials down to the
+ # subcommand to avoid a new kinit
+ cmdline_auth = "--use-krb5-ccache=%s" % ccache_name
+
+ # bin/samba-tool drs <drs_command> <cmdline_auth>
+ return ["drs", drs_command, cmdline_auth]
+
+ def _net_drs_replicate(self, DC, fromDC, nc_dn=None, forced=True,
+ local=False, full_sync=False, single=False):
+ if nc_dn is None:
+ nc_dn = self.domain_dn
+ # make base command line
+ samba_tool_cmdline = self._samba_tool_cmd_list("replicate")
+ # bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
+ samba_tool_cmdline += [DC, fromDC, nc_dn]
+
+ if forced:
+ samba_tool_cmdline += ["--sync-forced"]
+ if local:
+ samba_tool_cmdline += ["--local"]
+ if full_sync:
+ samba_tool_cmdline += ["--full-sync"]
+ if single:
+ samba_tool_cmdline += ["--single-object"]
+
+ (result, out, err) = self.runsubcmd(*samba_tool_cmdline)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _enable_inbound_repl(self, DC):
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # disable replication
+ samba_tool_cmd += [DC, "--dsa-option=-DISABLE_INBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _disable_inbound_repl(self, DC):
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # disable replication
+ samba_tool_cmd += [DC, "--dsa-option=+DISABLE_INBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _enable_all_repl(self, DC):
+ self._enable_inbound_repl(DC)
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # enable replication
+ samba_tool_cmd += [DC, "--dsa-option=-DISABLE_OUTBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _disable_all_repl(self, DC):
+ self._disable_inbound_repl(DC)
+ # make base command line
+ samba_tool_cmd = self._samba_tool_cmd_list("options")
+ # disable replication
+ samba_tool_cmd += [DC, "--dsa-option=+DISABLE_OUTBOUND_REPL"]
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _get_highest_hwm_utdv(self, ldb_conn):
+ res = ldb_conn.search("", scope=ldb.SCOPE_BASE, attrs=["highestCommittedUSN"])
+ hwm = drsuapi.DsReplicaHighWaterMark()
+ hwm.tmp_highest_usn = int(res[0]["highestCommittedUSN"][0])
+ hwm.reserved_usn = 0
+ hwm.highest_usn = hwm.tmp_highest_usn
+
+ utdv = drsuapi.DsReplicaCursorCtrEx()
+ cursors = []
+ c1 = drsuapi.DsReplicaCursor()
+ c1.source_dsa_invocation_id = misc.GUID(ldb_conn.get_invocation_id())
+ c1.highest_usn = hwm.highest_usn
+ cursors.append(c1)
+ utdv.count = len(cursors)
+ utdv.cursors = cursors
+ return (hwm, utdv)
+
+ def _get_identifier(self, ldb_conn, dn):
+ res = ldb_conn.search(dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID", "objectSid"])
+ id = drsuapi.DsReplicaObjectIdentifier()
+ id.guid = ndr_unpack(misc.GUID, res[0]['objectGUID'][0])
+ if "objectSid" in res[0]:
+ id.sid = ndr_unpack(security.dom_sid, res[0]['objectSid'][0])
+ id.dn = str(res[0].dn)
+ return id
+
+ def _get_ctr6_links(self, ctr6):
+ """
+ Unpacks the linked attributes from a DsGetNCChanges response
+ and returns them as a list.
+ """
+ ctr6_links = []
+ for lidx in range(0, ctr6.linked_attributes_count):
+ l = ctr6.linked_attributes[lidx]
+ try:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ l.value.blob)
+ except:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3Binary,
+ l.value.blob)
+ al = AbstractLink(l.attid, l.flags,
+ l.identifier.guid,
+ target.guid, target.dn)
+ ctr6_links.append(al)
+
+ return ctr6_links
+
+ def _get_ctr6_object_guids(self, ctr6):
+ """Returns all the object GUIDs in a GetNCChanges response"""
+ guid_list = []
+
+ obj = ctr6.first_object
+ for i in range(0, ctr6.object_count):
+ guid_list.append(str(obj.object.identifier.guid))
+ obj = obj.next_object
+
+ return guid_list
+
+ def _ctr6_debug(self, ctr6):
+ """
+ Displays basic info contained in a DsGetNCChanges response.
+ Having this debug code allows us to see the difference in behaviour
+ between Samba and Windows easier. Turn on the self._debug flag to see it.
+ """
+
+ if self._debug:
+ print("------------ recvd CTR6 -------------")
+
+ next_object = ctr6.first_object
+ for i in range(0, ctr6.object_count):
+ print("Obj %d: %s %s" % (i, next_object.object.identifier.dn[:25],
+ next_object.object.identifier.guid))
+ next_object = next_object.next_object
+
+ print("Linked Attributes: %d" % ctr6.linked_attributes_count)
+ for lidx in range(0, ctr6.linked_attributes_count):
+ l = ctr6.linked_attributes[lidx]
+ try:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ l.value.blob)
+ except:
+ target = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3Binary,
+ l.value.blob)
+
+ print("Link Tgt %s... <-- Src %s"
+ % (target.dn[:25], l.identifier.guid))
+ state = "Del"
+ if l.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE:
+ state = "Act"
+ print(" v%u %s changed %u" % (l.meta_data.version, state,
+ l.meta_data.originating_change_time))
+
+ print("HWM: %d" % (ctr6.new_highwatermark.highest_usn))
+ print("Tmp HWM: %d" % (ctr6.new_highwatermark.tmp_highest_usn))
+ print("More data: %d" % (ctr6.more_data))
+
+ def _get_replication(self, replica_flags,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_NONE, drs=None, drs_handle=None,
+ highwatermark=None, uptodateness_vector=None,
+ more_flags=0, max_objects=133, exop=0,
+ dest_dsa=drsuapi.DRSUAPI_DS_BIND_GUID_W2K3,
+ source_dsa=None, invocation_id=None, nc_dn_str=None):
+ """
+ Builds a DsGetNCChanges request based on the information provided
+ and returns the response received from the DC.
+ """
+ if source_dsa is None:
+ source_dsa = self.test_ldb_dc.get_ntds_GUID()
+ if invocation_id is None:
+ invocation_id = self.test_ldb_dc.get_invocation_id()
+ if nc_dn_str is None:
+ nc_dn_str = self.test_ldb_dc.domain_dn()
+
+ if highwatermark is None:
+ if self.default_hwm is None:
+ (highwatermark, _) = self._get_highest_hwm_utdv(self.test_ldb_dc)
+ else:
+ highwatermark = self.default_hwm
+
+ if drs is None:
+ drs = self.drs
+ if drs_handle is None:
+ drs_handle = self.drs_handle
+
+ req10 = self._getnc_req10(dest_dsa=dest_dsa,
+ invocation_id=invocation_id,
+ nc_dn_str=nc_dn_str,
+ exop=exop,
+ max_objects=max_objects,
+ replica_flags=replica_flags,
+ more_flags=more_flags)
+ req10.highwatermark = highwatermark
+ if uptodateness_vector is not None:
+ uptodateness_vector_v1 = drsuapi.DsReplicaCursorCtrEx()
+ cursors = []
+ for i in range(0, uptodateness_vector.count):
+ c = uptodateness_vector.cursors[i]
+ c1 = drsuapi.DsReplicaCursor()
+ c1.source_dsa_invocation_id = c.source_dsa_invocation_id
+ c1.highest_usn = c.highest_usn
+ cursors.append(c1)
+ uptodateness_vector_v1.count = len(cursors)
+ uptodateness_vector_v1.cursors = cursors
+ req10.uptodateness_vector = uptodateness_vector_v1
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 10, req10)
+ self._ctr6_debug(ctr)
+
+ self.assertEqual(level, 6, "expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(source_dsa))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(invocation_id))
+ self.assertEqual(ctr.extended_ret, drs_error)
+
+ return ctr
+
+ def _check_replication(self, expected_dns, replica_flags, expected_links=[],
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_NONE, drs=None, drs_handle=None,
+ highwatermark=None, uptodateness_vector=None,
+ more_flags=0, more_data=False,
+ dn_ordered=True, links_ordered=True,
+ max_objects=133, exop=0,
+ dest_dsa=drsuapi.DRSUAPI_DS_BIND_GUID_W2K3,
+ source_dsa=None, invocation_id=None, nc_dn_str=None,
+ nc_object_count=0, nc_linked_attributes_count=0):
+ """
+ Makes sure that replication returns the specific error given.
+ """
+
+ # send a DsGetNCChanges to the DC
+ ctr6 = self._get_replication(replica_flags,
+ drs_error, drs, drs_handle,
+ highwatermark, uptodateness_vector,
+ more_flags, max_objects, exop, dest_dsa,
+ source_dsa, invocation_id, nc_dn_str)
+
+ # check the response is what we expect
+ self._check_ctr6(ctr6, expected_dns, expected_links,
+ nc_object_count=nc_object_count, more_data=more_data,
+ dn_ordered=dn_ordered)
+ return (ctr6.new_highwatermark, ctr6.uptodateness_vector)
+
+ def _get_ctr6_dn_list(self, ctr6):
+ """
+ Returns the DNs contained in a DsGetNCChanges response.
+ """
+ dn_list = []
+ next_object = ctr6.first_object
+ for i in range(0, ctr6.object_count):
+ dn_list.append(next_object.object.identifier.dn)
+ next_object = next_object.next_object
+ self.assertEqual(next_object, None)
+
+ return dn_list
+
+ def _check_ctr6(self, ctr6, expected_dns=[], expected_links=[],
+ dn_ordered=True, links_ordered=True,
+ more_data=False, nc_object_count=0,
+ nc_linked_attributes_count=0, drs_error=0):
+ """
+ Check that a ctr6 matches the specified parameters.
+ """
+ ctr6_raw_dns = self._get_ctr6_dn_list(ctr6)
+
+ # filter out changes to the RID Set objects, as these can happen
+ # intermittently and mess up the test assertions
+ ctr6_dns = []
+ for dn in ctr6_raw_dns:
+ if "CN=RID Set," in dn or "CN=RID Manager$," in dn:
+ print("Removing {0} from GetNCChanges reply".format(dn))
+ else:
+ ctr6_dns.append(dn)
+
+ self.assertEqual(len(ctr6_dns), len(expected_dns),
+ "Received unexpected objects (%s)" % ctr6_dns)
+ self.assertEqual(ctr6.object_count, len(ctr6_raw_dns))
+ self.assertEqual(ctr6.linked_attributes_count, len(expected_links))
+ self.assertEqual(ctr6.more_data, more_data)
+ self.assertEqual(ctr6.nc_object_count, nc_object_count)
+ self.assertEqual(ctr6.nc_linked_attributes_count, nc_linked_attributes_count)
+ self.assertEqual(ctr6.drs_error[0], drs_error)
+
+ i = 0
+ for dn in expected_dns:
+ # Expect them back in the exact same order as specified.
+ if dn_ordered:
+ self.assertNotEqual(ctr6_dns[i], None)
+ self.assertEqual(ctr6_dns[i], dn)
+ i = i + 1
+ # Don't care what order
+ else:
+ self.assertTrue(dn in ctr6_dns, "Couldn't find DN '%s' anywhere in ctr6 response." % dn)
+
+ # Extract the links from the response
+ ctr6_links = self._get_ctr6_links(ctr6)
+ expected_links.sort()
+
+ lidx = 0
+ for el in expected_links:
+ if links_ordered:
+ self.assertEqual(el, ctr6_links[lidx])
+ lidx += 1
+ else:
+ self.assertTrue(el in ctr6_links, "Couldn't find link '%s' anywhere in ctr6 response." % el)
+
+ def _exop_req8(self, dest_dsa, invocation_id, nc_dn_str, exop,
+ replica_flags=0, max_objects=0, partial_attribute_set=None,
+ partial_attribute_set_ex=None, mapping_ctr=None, nc_guid=None):
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ req8.destination_dsa_guid = misc.GUID(dest_dsa) if dest_dsa else misc.GUID()
+ req8.source_dsa_invocation_id = misc.GUID(invocation_id)
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = str(nc_dn_str)
+ if nc_guid is not None:
+ req8.naming_context.guid = nc_guid
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = replica_flags
+ req8.max_object_count = max_objects
+ req8.max_ndr_size = 402116
+ req8.extended_op = exop
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = partial_attribute_set
+ req8.partial_attribute_set_ex = partial_attribute_set_ex
+ if mapping_ctr:
+ req8.mapping_ctr = mapping_ctr
+ else:
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ return req8
+
+ def _getnc_req10(self, dest_dsa, invocation_id, nc_dn_str, exop,
+ replica_flags=0, max_objects=0, partial_attribute_set=None,
+ partial_attribute_set_ex=None, mapping_ctr=None,
+ more_flags=0, nc_guid=None):
+ req10 = drsuapi.DsGetNCChangesRequest10()
+
+ req10.destination_dsa_guid = misc.GUID(dest_dsa) if dest_dsa else misc.GUID()
+ req10.source_dsa_invocation_id = misc.GUID(invocation_id)
+ req10.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req10.naming_context.dn = str(nc_dn_str)
+ if nc_guid is not None:
+ req10.naming_context.guid = nc_guid
+ req10.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req10.highwatermark.tmp_highest_usn = 0
+ req10.highwatermark.reserved_usn = 0
+ req10.highwatermark.highest_usn = 0
+ req10.uptodateness_vector = None
+ req10.replica_flags = replica_flags
+ req10.max_object_count = max_objects
+ req10.max_ndr_size = 402116
+ req10.extended_op = exop
+ req10.fsmo_info = 0
+ req10.partial_attribute_set = partial_attribute_set
+ req10.partial_attribute_set_ex = partial_attribute_set_ex
+ if mapping_ctr:
+ req10.mapping_ctr = mapping_ctr
+ else:
+ req10.mapping_ctr.num_mappings = 0
+ req10.mapping_ctr.mappings = None
+ req10.more_flags = more_flags
+
+ return req10
+
+ def _ds_bind(self, server_name, creds=None, ip=None):
+ if ip is None:
+ binding_str = f"ncacn_ip_tcp:{server_name}[seal]"
+ else:
+ binding_str = f"ncacn_ip_tcp:{ip}[seal,target_hostname={server_name}]"
+
+ if creds is None:
+ creds = self.get_credentials()
+ drs = drsuapi.drsuapi(binding_str, self.get_loadparm(), creds)
+ (drs_handle, supported_extensions) = drs_DsBind(drs)
+ return (drs, drs_handle)
+
+ def get_partial_attribute_set(self, attids=[drsuapi.DRSUAPI_ATTID_objectClass]):
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+ return partial_attribute_set
+
+
+class AbstractLink:
+ def __init__(self, attid, flags, identifier, targetGUID,
+ targetDN=""):
+ self.attid = attid
+ self.flags = flags
+ self.identifier = str(identifier)
+ self.selfGUID_blob = ndr_pack(identifier)
+ self.targetGUID = str(targetGUID)
+ self.targetGUID_blob = ndr_pack(targetGUID)
+ self.targetDN = targetDN
+
+ def __repr__(self):
+ return "AbstractLink(0x%08x, 0x%08x, %s, %s)" % (
+ self.attid, self.flags, self.identifier, self.targetGUID)
+
+ def __internal_cmp__(self, other, verbose=False):
+ """See CompareLinks() in MS-DRSR section 4.1.10.5.17"""
+ if not isinstance(other, AbstractLink):
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => wrong type" % (self, other))
+ return NotImplemented
+
+ c = cmp(self.selfGUID_blob, other.selfGUID_blob)
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different identifier" % (self, other, c))
+ return c
+
+ c = other.attid - self.attid
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different attid" % (self, other, c))
+ return c
+
+ self_active = self.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ other_active = other.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+
+ c = self_active - other_active
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different FLAG_ACTIVE" % (self, other, c))
+ return c
+
+ c = cmp(self.targetGUID_blob, other.targetGUID_blob)
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different target" % (self, other, c))
+ return c
+
+ c = self.flags - other.flags
+ if c != 0:
+ if verbose:
+ print("AbstractLink.__internal_cmp__(%r, %r) => %d different flags" % (self, other, c))
+ return c
+
+ return 0
+
+ def __lt__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c < 0:
+ return True
+ return False
+
+ def __le__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c <= 0:
+ return True
+ return False
+
+ def __eq__(self, other):
+ c = self.__internal_cmp__(other, verbose=True)
+ if c == NotImplemented:
+ return NotImplemented
+ if c == 0:
+ return True
+ return False
+
+ def __ne__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c != 0:
+ return True
+ return False
+
+ def __gt__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c > 0:
+ return True
+ return False
+
+ def __ge__(self, other):
+ c = self.__internal_cmp__(other)
+ if c == NotImplemented:
+ return NotImplemented
+ if c >= 0:
+ return True
+ return False
+
+ def __hash__(self):
+ return hash((self.attid, self.flags, self.identifier, self.targetGUID))
diff --git a/source4/torture/drs/python/fsmo.py b/source4/torture/drs/python/fsmo.py
new file mode 100644
index 0000000..6021ce4
--- /dev/null
+++ b/source4/torture/drs/python/fsmo.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Anatoliy Atanasov <anatoliy.atanasov@postpath.com> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN fsmo -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import sys
+import time
+import os
+
+sys.path.insert(0, "bin/python")
+
+from ldb import SCOPE_BASE
+
+import drs_base
+
+
+class DrsFsmoTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsFsmoTestCase, self).setUp()
+
+ # we have to wait for the replication before we make the check
+ self.fsmo_wait_max_time = 20
+ self.fsmo_wait_sleep_time = 0.2
+
+ # cache some of RootDSE props
+ self.dsServiceName_dc1 = self.info_dc1["dsServiceName"][0]
+ self.dsServiceName_dc2 = self.info_dc2["dsServiceName"][0]
+ self.infrastructure_dn = "CN=Infrastructure," + self.domain_dn
+ self.naming_dn = "CN=Partitions," + self.config_dn
+ self.rid_dn = "CN=RID Manager$,CN=System," + self.domain_dn
+ self.domain_dns_dn = (
+ "CN=Infrastructure,DC=DomainDnsZones, %s" % self.domain_dn )
+ self.forest_dns_dn = (
+ "CN=Infrastructure,DC=ForestDnsZones, %s" % self.domain_dn )
+
+ def tearDown(self):
+ super(DrsFsmoTestCase, self).tearDown()
+
+ def _net_fsmo_role_transfer(self, DC, role, noop=False):
+ # make command line credentials string
+ ccache_name = self.get_creds_ccache_name()
+ cmd_line_auth = "--use-krb5-ccache=%s" % ccache_name
+ (result, out, err) = self.runsubcmd("fsmo", "transfer",
+ "--role=%s" % role,
+ "-H", "ldap://%s:389" % DC,
+ cmd_line_auth)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ if not noop:
+ self.assertTrue("FSMO transfer of '%s' role successful" % role in out)
+ else:
+ self.assertTrue("This DC already has the '%s' FSMO role" % role in out)
+
+ def _wait_for_role_transfer(self, ldb_dc, role_dn, master):
+ """Wait for role transfer for certain amount of time
+
+ :return: (Result=True|False, CurrentMasterDnsName) tuple
+ """
+ cur_master = ''
+ retries = int(self.fsmo_wait_max_time / self.fsmo_wait_sleep_time) + 1
+ for i in range(0, retries):
+ # check if master has been transfered
+ res = ldb_dc.search(role_dn,
+ scope=SCOPE_BASE, attrs=["fSMORoleOwner"])
+ assert len(res) == 1, "Only one fSMORoleOwner value expected!"
+ cur_master = res[0]["fSMORoleOwner"][0]
+ if master == cur_master:
+ return (True, cur_master)
+ # skip last sleep, if no need to wait anymore
+ if i != (retries - 1):
+ # wait a little bit before next retry
+ time.sleep(self.fsmo_wait_sleep_time)
+ return (False, cur_master)
+
+ def _role_transfer(self, role, role_dn):
+ """Triggers transfer of role from DC1 to DC2
+ and vice versa so the role goes back to the original dc"""
+ # dc2 gets the role from dc1
+ print("Testing for %s role transfer from %s to %s" % (role, self.dnsname_dc1, self.dnsname_dc2))
+
+ self._net_fsmo_role_transfer(DC=self.dnsname_dc2, role=role)
+ # check if the role is transfered
+ (res, master) = self._wait_for_role_transfer(ldb_dc=self.ldb_dc2,
+ role_dn=role_dn,
+ master=self.dsServiceName_dc2)
+ self.assertTrue(res,
+ "Transferring %s role to %s has failed, master is: %s!" % (role, self.dsServiceName_dc2, master))
+
+ # dc1 gets back the role from dc2
+ print("Testing for %s role transfer from %s to %s" % (role, self.dnsname_dc2, self.dnsname_dc1))
+ self._net_fsmo_role_transfer(DC=self.dnsname_dc1, role=role)
+ # check if the role is transfered
+ (res, master) = self._wait_for_role_transfer(ldb_dc=self.ldb_dc1,
+ role_dn=role_dn,
+ master=self.dsServiceName_dc1)
+ self.assertTrue(res,
+ "Transferring %s role to %s has failed, master is: %s!" % (role, self.dsServiceName_dc1, master))
+
+ # dc1 keeps the role
+ print("Testing for no-op %s role transfer from %s to %s" % (role, self.dnsname_dc2, self.dnsname_dc1))
+ self._net_fsmo_role_transfer(DC=self.dnsname_dc1, role=role, noop=True)
+ # check if the role is transfered
+ (res, master) = self._wait_for_role_transfer(ldb_dc=self.ldb_dc1,
+ role_dn=role_dn,
+ master=self.dsServiceName_dc1)
+ self.assertTrue(res,
+ "Transferring %s role to %s has failed, master is: %s!" % (role, self.dsServiceName_dc1, master))
+
+ def test_SchemaMasterTransfer(self):
+ self._role_transfer(role="schema", role_dn=self.schema_dn)
+
+ def test_InfrastructureMasterTransfer(self):
+ self._role_transfer(role="infrastructure", role_dn=self.infrastructure_dn)
+
+ def test_PDCMasterTransfer(self):
+ self._role_transfer(role="pdc", role_dn=self.domain_dn)
+
+ def test_RIDMasterTransfer(self):
+ self._role_transfer(role="rid", role_dn=self.rid_dn)
+
+ def test_NamingMasterTransfer(self):
+ self._role_transfer(role="naming", role_dn=self.naming_dn)
+
+ def test_DomainDnsZonesMasterTransfer(self):
+ self._role_transfer(role="domaindns", role_dn=self.domain_dns_dn)
+
+ def test_ForestDnsZonesMasterTransfer(self):
+ self._role_transfer(role="forestdns", role_dn=self.forest_dns_dn)
diff --git a/source4/torture/drs/python/getnc_exop.py b/source4/torture/drs/python/getnc_exop.py
new file mode 100644
index 0000000..2441567
--- /dev/null
+++ b/source4/torture/drs/python/getnc_exop.py
@@ -0,0 +1,1305 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN getnc_exop -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import random
+
+import drs_base
+from drs_base import AbstractLink
+
+import samba.tests
+from samba import werror, WERRORError
+
+import ldb
+from ldb import SCOPE_BASE
+
+from samba.dcerpc import drsuapi, misc, drsblobs
+from samba.drs_utils import drs_DsBind
+from samba.ndr import ndr_unpack, ndr_pack
+from functools import cmp_to_key
+from samba.common import cmp
+
+
+def _linked_attribute_compare(la1, la2):
+ """See CompareLinks() in MS-DRSR section 4.1.10.5.17"""
+ la1, la1_target = la1
+ la2, la2_target = la2
+
+ # Ascending host object GUID
+ c = cmp(ndr_pack(la1.identifier.guid), ndr_pack(la2.identifier.guid))
+ if c != 0:
+ return c
+
+ # Ascending attribute ID
+ if la1.attid != la2.attid:
+ return -1 if la1.attid < la2.attid else 1
+
+ la1_active = la1.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ la2_active = la2.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+
+ # Ascending 'is present'
+ if la1_active != la2_active:
+ return 1 if la1_active else -1
+
+ # Ascending target object GUID
+ return cmp(ndr_pack(la1_target), ndr_pack(la2_target))
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a semi-black box test case for DsGetNCChanges
+ implementation for extended operations. It should be testing
+ how DsGetNCChanges handles different input params (mostly invalid).
+ Final goal is to make DsGetNCChanges as binary compatible to
+ Windows implementation as possible"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.ou = "OU=test_getncchanges%d,%s" % (random.randint(0, 4294967295),
+ self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ (self.default_hwm, self.default_utdv) = self._get_highest_hwm_utdv(self.ldb_dc1)
+
+ def tearDown(self):
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e:
+ (enum, string) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _determine_fSMORoleOwner(self, fsmo_obj_dn):
+ """Returns (owner, not_owner) pair where:
+ owner: dns name for FSMO owner
+ not_owner: dns name for DC not owning the FSMO"""
+ # collect info to return later
+ fsmo_info_1 = {"dns_name": self.dnsname_dc1,
+ "invocation_id": self.ldb_dc1.get_invocation_id(),
+ "ntds_guid": self.ldb_dc1.get_ntds_GUID(),
+ "server_dn": self.ldb_dc1.get_serverName()}
+ fsmo_info_2 = {"dns_name": self.dnsname_dc2,
+ "invocation_id": self.ldb_dc2.get_invocation_id(),
+ "ntds_guid": self.ldb_dc2.get_ntds_GUID(),
+ "server_dn": self.ldb_dc2.get_serverName()}
+
+ msgs = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=fsmo_info_1["server_dn"], attrs=["serverReference"])
+ fsmo_info_1["server_acct_dn"] = ldb.Dn(self.ldb_dc1, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_1["rid_set_dn"] = ldb.Dn(self.ldb_dc1, "CN=RID Set") + fsmo_info_1["server_acct_dn"]
+
+ msgs = self.ldb_dc2.search(scope=ldb.SCOPE_BASE, base=fsmo_info_2["server_dn"], attrs=["serverReference"])
+ fsmo_info_2["server_acct_dn"] = ldb.Dn(self.ldb_dc2, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_2["rid_set_dn"] = ldb.Dn(self.ldb_dc2, "CN=RID Set") + fsmo_info_2["server_acct_dn"]
+
+ # determine the owner dc
+ res = self.ldb_dc1.search(fsmo_obj_dn,
+ scope=SCOPE_BASE, attrs=["fSMORoleOwner"])
+ assert len(res) == 1, "Only one fSMORoleOwner value expected for %s!" % fsmo_obj_dn
+ fsmo_owner = res[0]["fSMORoleOwner"][0]
+ if fsmo_owner == self.info_dc1["dsServiceName"][0]:
+ return (fsmo_info_1, fsmo_info_2)
+ return (fsmo_info_2, fsmo_info_1)
+
+ def _check_exop_failed(self, ctr6, expected_failure):
+ self.assertEqual(ctr6.extended_ret, expected_failure)
+ #self.assertEqual(ctr6.object_count, 0)
+ #self.assertEqual(ctr6.first_object, None)
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes, [])
+ self.assertEqual(ctr6.drs_error[0], 0)
+
+ def test_do_single_repl(self):
+ """
+ Make sure that DRSUAPI_EXOP_REPL_OBJ never replicates more than
+ one object, even when we use DRS_GET_ANC/GET_TGT.
+ """
+
+ ou1 = "OU=get_anc1,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ ou1_id = self._get_identifier(self.ldb_dc1, ou1)
+ ou2 = "OU=get_anc2,%s" % ou1
+ self.ldb_dc1.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+ ou2_id = self._get_identifier(self.ldb_dc1, ou2)
+ dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2)
+ self.ldb_dc1.add({
+ "dn": dc3,
+ "objectclass": "computer",
+ "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT)
+ })
+ dc3_id = self._get_identifier(self.ldb_dc1, dc3)
+
+ # Add some linked attributes (for checking GET_TGT behaviour)
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc2, ou1)
+ m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ ou1_link = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ ou1_id.guid, ou2_id.guid)
+
+ m.dn = ldb.Dn(self.ldb_dc2, dc3)
+ m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ dc3_link = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ dc3_id.guid, ou2_id.guid)
+
+ req = self._getnc_req10(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=ou1,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req)
+ self._check_ctr6(ctr, [ou1], expected_links=[ou1_link])
+
+ # DRSUAPI_DRS_WRIT_REP means that we should only replicate the dn we give (dc3).
+ # DRSUAPI_DRS_GET_ANC means that we should also replicate its ancestors, but
+ # Windows doesn't do this if we use both.
+ req = self._getnc_req10(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=dc3,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req)
+ self._check_ctr6(ctr, [dc3], expected_links=[dc3_link])
+
+ # Even though the ancestor of ou2 (ou1) has changed since last hwm, and we're
+ # sending DRSUAPI_DRS_GET_ANC, the expected response is that it will only try
+ # and replicate the single object still.
+ req = self._getnc_req10(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=ou2,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req)
+ self._check_ctr6(ctr, [ou2])
+
+ def test_do_full_repl_on_ou(self):
+ """
+ Make sure that a full replication on a not-an-nc fails with
+ the right error code
+ """
+
+ non_nc_ou = "OU=not-an-NC,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": non_nc_ou,
+ "objectclass": "organizationalUnit"
+ })
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=non_nc_ou,
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ try:
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 8, req8)
+ self.fail("Expected DsGetNCChanges to fail with WERR_DS_CANT_FIND_EXPECTED_NC")
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_CANT_FIND_EXPECTED_NC)
+
+ def test_InvalidNC_DummyDN_InvalidGUID_REPL_OBJ(self):
+ """Test single object replication on a totally invalid GUID fails with the right error code"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_DN)
+
+ def test_InvalidNC_DummyDN_InvalidGUID_REPL_SECRET(self):
+ """Test single object replication on a totally invalid GUID fails with the right error code"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_DN)
+
+ def test_InvalidNC_DummyDN_InvalidGUID_RID_ALLOC(self):
+ """Test RID Allocation on a totally invalid GUID fails with the right error code"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_NC)
+
+ def test_valid_GUID_only_REPL_OBJ(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.ou, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="",
+ nc_guid=guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"Failed to call GetNCChanges with EXOP_REPL_OBJ and a GUID: {estr}")
+
+ self.assertEqual(ctr.first_object.object.identifier.guid, guid)
+
+ def test_DummyDN_valid_GUID_REPL_OBJ(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.ou, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"Failed to call GetNCChanges with EXOP_REPL_OBJ, DummyDN and a GUID: {estr}")
+
+ self.assertEqual(ctr.first_object.object.identifier.guid, guid)
+
+ def test_DummyDN_valid_GUID_REPL_SECRET(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.ou, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+
+ # We expect to get as far as failing on the missing dest_dsa
+ self.assertEqual(enum, werror.WERR_DS_DRA_DB_ERROR)
+
+ def test_link_utdv_hwm(self):
+ """Test verify the DRS_GET_ANC behavior."""
+
+ ou1 = "OU=get_anc1,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ ou1_id = self._get_identifier(self.ldb_dc1, ou1)
+ ou2 = "OU=get_anc2,%s" % ou1
+ self.ldb_dc1.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+ ou2_id = self._get_identifier(self.ldb_dc1, ou2)
+ dc3 = "CN=test_anc_dc_%u,%s" % (random.randint(0, 4294967295), ou2)
+ self.ldb_dc1.add({
+ "dn": dc3,
+ "objectclass": "computer",
+ "userAccountControl": "%d" % (samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_SERVER_TRUST_ACCOUNT)
+ })
+ dc3_id = self._get_identifier(self.ldb_dc1, dc3)
+
+ (hwm1, utdv1) = self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, ou1)
+ m["displayName"] = ldb.MessageElement("OU1", ldb.FLAG_MOD_ADD, "displayName")
+ self.ldb_dc1.modify(m)
+
+ (hwm2, utdv2) = self._check_replication([ou2, dc3, ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ uptodateness_vector=utdv1)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, ou2)
+ m["displayName"] = ldb.MessageElement("OU2", ldb.FLAG_MOD_ADD, "displayName")
+ self.ldb_dc1.modify(m)
+
+ (hwm3, utdv3) = self._check_replication([dc3, ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ highwatermark=hwm1)
+
+ self._check_replication([ou1, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ uptodateness_vector=utdv1)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.ou)
+ m["displayName"] = ldb.MessageElement("OU", ldb.FLAG_MOD_ADD, "displayName")
+ self.ldb_dc1.modify(m)
+
+ (hwm4, utdv4) = self._check_replication([dc3, ou1, ou2, self.ou],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([self.ou, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ uptodateness_vector=utdv2)
+
+ cn3 = "CN=get_anc3,%s" % ou2
+ self.ldb_dc1.add({
+ "dn": cn3,
+ "objectclass": "container",
+ })
+ cn3_id = self._get_identifier(self.ldb_dc1, cn3)
+
+ (hwm5, utdv5) = self._check_replication([dc3, ou1, ou2, self.ou, cn3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ self._check_replication([self.ou, ou1, ou2, dc3, cn3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, ou2)
+ m["managedBy"] = ldb.MessageElement(dc3, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ ou2_managedBy_dc3 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ ou2_id.guid, dc3_id.guid)
+
+ (hwm6, utdv6) = self._check_replication([dc3, ou1, self.ou, cn3, ou2],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[ou2_managedBy_dc3])
+
+ # Can fail against Windows due to equal precedence of dc3, cn3
+ self._check_replication([self.ou, ou1, ou2, dc3, cn3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[ou2_managedBy_dc3])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ uptodateness_vector=utdv5,
+ expected_links=[ou2_managedBy_dc3])
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ uptodateness_vector=utdv5)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ uptodateness_vector=utdv5)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, dc3)
+ m["managedBy"] = ldb.MessageElement(ou1, ldb.FLAG_MOD_ADD, "managedBy")
+ self.ldb_dc1.modify(m)
+ dc3_managedBy_ou1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ dc3_id.guid, ou1_id.guid)
+
+ (hwm7, utdv7) = self._check_replication([ou1, self.ou, cn3, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[ou2_managedBy_dc3, dc3_managedBy_ou1])
+
+ # Can fail against Windows due to equal precedence of dc3, cn3
+ # self._check_replication([self.ou,ou1,ou2,dc3,cn3],
+ # drsuapi.DRSUAPI_DRS_WRIT_REP|
+ # drsuapi.DRSUAPI_DRS_GET_ANC,
+ # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1])
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1])
+
+ # GET_TGT seems to override DRS_CRITICAL_ONLY and also returns any
+ # object(s) that relate to the linked attributes (similar to GET_ANC)
+ self._check_replication([ou1, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1], dn_ordered=False)
+
+ # Change DC3's managedBy to OU2 instead of OU1
+ # Note that the OU1 managedBy linked attribute will still exist as
+ # a tombstone object (and so will be returned in the replication still)
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, dc3)
+ m["managedBy"] = ldb.MessageElement(ou2, ldb.FLAG_MOD_REPLACE, "managedBy")
+ self.ldb_dc1.modify(m)
+ dc3_managedBy_ou1.flags &= ~drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ dc3_managedBy_ou2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ dc3_id.guid, ou2_id.guid)
+
+ (hwm8, utdv8) = self._check_replication([ou1, self.ou, cn3, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[ou2_managedBy_dc3, dc3_managedBy_ou1, dc3_managedBy_ou2])
+
+ # Can fail against Windows due to equal precedence of dc3, cn3
+ # self._check_replication([self.ou,ou1,ou2,dc3,cn3],
+ # drsuapi.DRSUAPI_DRS_WRIT_REP|
+ # drsuapi.DRSUAPI_DRS_GET_ANC,
+ # expected_links=[ou2_managedBy_dc3,dc3_managedBy_ou1,dc3_managedBy_ou2])
+
+ self._check_replication([dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2])
+
+ self._check_replication([self.ou, ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2])
+
+ # GET_TGT will also return any DNs referenced by the linked attributes
+ # (including the Tombstone attribute)
+ self._check_replication([ou1, ou2, dc3],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2], dn_ordered=False)
+
+ # Use the highwater-mark prior to changing ManagedBy - this should
+ # only return the old/Tombstone and new linked attributes (we already
+ # know all the DNs)
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ highwatermark=hwm7)
+
+ # Repeat the above set of tests using the uptodateness_vector
+ # instead of the highwater-mark
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ self._check_replication([],
+ drsuapi.DRSUAPI_DRS_CRITICAL_ONLY,
+ more_flags=drsuapi.DRSUAPI_DRS_GET_TGT,
+ expected_links=[dc3_managedBy_ou1, dc3_managedBy_ou2],
+ uptodateness_vector=utdv7)
+
+ def test_FSMONotOwner(self):
+ """Test role transfer with against DC not owner of the role"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa=fsmo_owner["ntds_guid"],
+ invocation_id=fsmo_not_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_REQ_ROLE)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_not_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_FSMO_NOT_OWNER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_not_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_not_owner["invocation_id"]))
+
+ def test_InvalidDestDSA(self):
+ """Test role transfer with invalid destination DSA guid"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_REQ_ROLE)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+ def test_InvalidDestDSA_and_GUID(self):
+ """Test role transfer with invalid destination DSA guid"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_REQ_ROLE)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"DsGetNCChanges failed with {estr}")
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+ def test_InvalidDestDSA_and_GUID_RID_ALLOC(self):
+ """Test role transfer with invalid destination DSA guid"""
+ fsmo_dn = self.ldb_dc1.get_schema_basedn()
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"DsGetNCChanges failed with {estr}")
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+
+class DrsReplicaPrefixMapTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaPrefixMapTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.ou = "ou=pfm_exop%d,%s" % (random.randint(0, 4294967295),
+ self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+ self.user = "cn=testuser,%s" % self.ou
+ self.ldb_dc1.add({
+ "dn": self.user,
+ "objectclass": "user"})
+
+ def tearDown(self):
+ super(DrsReplicaPrefixMapTestCase, self).tearDown()
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e2:
+ (enum, string) = e2.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+
+ def test_missing_prefix_map_dsa(self):
+ partial_attribute_set = self.get_partial_attribute_set()
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(ctr.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ except RuntimeError:
+ self.fail("Missing prefixmap shouldn't have triggered an error")
+
+ def test_invalid_prefix_map_attid(self):
+ # Request for invalid attid
+ partial_attribute_set = self.get_partial_attribute_set([99999])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.fail("Invalid attid (99999) should have triggered an error")
+ except RuntimeError as e3:
+ (ecode, emsg) = e3.args
+ self.assertEqual(ecode, 0x000020E2, "Error code should have been "
+ "WERR_DS_DRA_SCHEMA_MISMATCH")
+
+ def test_secret_prefix_map_attid(self):
+ # Request for a secret attid
+ partial_attribute_set = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_unicodePwd])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ for i, mapping in enumerate(pfm.mappings):
+ # OID: 2.5.4.*
+ # objectClass: 2.5.4.0
+ if mapping.oid.binary_oid == [85, 4]:
+ idx1 = i
+ # OID: 1.2.840.113556.1.4.*
+ # unicodePwd: 1.2.840.113556.1.4.90
+ elif mapping.oid.binary_oid == [42, 134, 72, 134, 247, 20, 1, 4]:
+ idx2 = i
+
+ (pfm.mappings[idx1].id_prefix,
+ pfm.mappings[idx2].id_prefix) = (pfm.mappings[idx2].id_prefix,
+ pfm.mappings[idx1].id_prefix)
+
+ tmp = pfm.mappings
+ tmp[idx1], tmp[idx2] = tmp[idx2], tmp[idx1]
+ pfm.mappings = tmp
+
+ # 90 for unicodePwd (with new prefix = 0)
+ # 589824, 589827 for objectClass and CN
+ # Use of three ensures sorting is correct
+ partial_attribute_set = self.get_partial_attribute_set([90, 589824, 589827])
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ def test_regular_prefix_map_attid(self):
+ # Request for a regular (non-secret) attid
+ partial_attribute_set = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_name])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ for i, mapping in enumerate(pfm.mappings):
+ # OID: 2.5.4.*
+ # objectClass: 2.5.4.0
+ if mapping.oid.binary_oid == [85, 4]:
+ idx1 = i
+ # OID: 1.2.840.113556.1.4.*
+ # name: 1.2.840.113556.1.4.1
+ elif mapping.oid.binary_oid == [42, 134, 72, 134, 247, 20, 1, 4]:
+ idx2 = i
+
+ (pfm.mappings[idx1].id_prefix,
+ pfm.mappings[idx2].id_prefix) = (pfm.mappings[idx2].id_prefix,
+ pfm.mappings[idx1].id_prefix)
+
+ tmp = pfm.mappings
+ tmp[idx1], tmp[idx2] = tmp[idx2], tmp[idx1]
+ pfm.mappings = tmp
+
+ # 1 for name (with new prefix = 0)
+ partial_attribute_set = self.get_partial_attribute_set([1])
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ def test_regular_prefix_map_ex_attid(self):
+ # Request for a regular (non-secret) attid
+ partial_attribute_set = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_name])
+ partial_attribute_set_ex = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_unicodePwd])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ try:
+ pfm = self._samdb_fetch_pfm_and_schi()
+ except KeyError:
+ # On Windows, prefixMap isn't available over LDAP
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ pfm = ctr.mapping_ctr
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ partial_attribute_set_ex=partial_attribute_set_ex,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ for i, mapping in enumerate(pfm.mappings):
+ # OID: 2.5.4.*
+ # objectClass: 2.5.4.0
+ if mapping.oid.binary_oid == [85, 4]:
+ idx1 = i
+ # OID: 1.2.840.113556.1.4.*
+ # name: 1.2.840.113556.1.4.1
+ # unicodePwd: 1.2.840.113556.1.4.90
+ elif mapping.oid.binary_oid == [42, 134, 72, 134, 247, 20, 1, 4]:
+ idx2 = i
+
+ (pfm.mappings[idx1].id_prefix,
+ pfm.mappings[idx2].id_prefix) = (pfm.mappings[idx2].id_prefix,
+ pfm.mappings[idx1].id_prefix)
+
+ tmp = pfm.mappings
+ tmp[idx1], tmp[idx2] = tmp[idx2], tmp[idx1]
+ pfm.mappings = tmp
+
+ # 1 for name (with new prefix = 0)
+ partial_attribute_set = self.get_partial_attribute_set([1])
+ # 90 for unicodePwd (with new prefix = 0)
+ # HOWEVER: Windows doesn't seem to respect incoming maps for PartialAttrSetEx
+ partial_attribute_set_ex = self.get_partial_attribute_set([drsuapi.DRSUAPI_ATTID_unicodePwd])
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.user,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ partial_attribute_set=partial_attribute_set,
+ partial_attribute_set_ex=partial_attribute_set_ex,
+ mapping_ctr=pfm)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_name:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the name attribute back")
+
+ found = False
+ for attr in ctr.first_object.object.attribute_ctr.attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ found = True
+ break
+
+ self.assertTrue(found, "Ensure we get the unicodePwd attribute back")
+
+ def _samdb_fetch_pfm_and_schi(self):
+ """Fetch prefixMap and schemaInfo stored in SamDB using LDB connection"""
+ samdb = self.ldb_dc1
+ res = samdb.search(base=samdb.get_schema_basedn(), scope=SCOPE_BASE,
+ attrs=["prefixMap", "schemaInfo"])
+
+ pfm = ndr_unpack(drsblobs.prefixMapBlob,
+ res[0]['prefixMap'][0])
+
+ schi = drsuapi.DsReplicaOIDMapping()
+ schi.id_prefix = 0
+ if 'schemaInfo' in res[0]:
+ binary_oid = [x if isinstance(x, int) else ord(x) for x in res[0]['schemaInfo'][0]]
+ schi.oid.length = len(binary_oid)
+ schi.oid.binary_oid = binary_oid
+ else:
+ schema_info = drsblobs.schemaInfoBlob()
+ schema_info.revision = 0
+ schema_info.marker = 0xFF
+ schema_info.invocation_id = misc.GUID(samdb.get_invocation_id())
+
+ binary_oid = [x if isinstance(x, int) else ord(x) for x in ndr_pack(schema_info)]
+ # you have to set the length before setting binary_oid
+ schi.oid.length = len(binary_oid)
+ schi.oid.binary_oid = binary_oid
+
+ pfm.ctr.mappings = pfm.ctr.mappings + [schi]
+ pfm.ctr.num_mappings += 1
+ return pfm.ctr
+
+
+class DrsReplicaSyncSortTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaSyncSortTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.ou = "ou=sort_exop%d,%s" % (random.randint(0, 4294967295),
+ self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+
+ def tearDown(self):
+ super(DrsReplicaSyncSortTestCase, self).tearDown()
+ # tidyup groups and users
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e4:
+ (enum, string) = e4.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+
+ def add_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_ADD, attr)
+ self.ldb_dc1.modify(m)
+
+ def remove_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_DELETE, attr)
+ self.ldb_dc1.modify(m)
+
+ def test_sort_behaviour_single_object(self):
+ """Testing sorting behaviour on single objects"""
+
+ user1_dn = "cn=test_user1,%s" % self.ou
+ user2_dn = "cn=test_user2,%s" % self.ou
+ user3_dn = "cn=test_user3,%s" % self.ou
+ group_dn = "cn=test_group,%s" % self.ou
+
+ self.ldb_dc1.add({"dn": user1_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": user2_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": user3_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": group_dn, "objectclass": "group"})
+
+ u1_guid = misc.GUID(self.ldb_dc1.search(base=user1_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+ u2_guid = misc.GUID(self.ldb_dc1.search(base=user2_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+ u3_guid = misc.GUID(self.ldb_dc1.search(base=user3_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+ g_guid = misc.GUID(self.ldb_dc1.search(base=group_dn,
+ attrs=["objectGUID"])[0]['objectGUID'][0])
+
+ self.add_linked_attribute(group_dn, user1_dn,
+ attr='member')
+ self.add_linked_attribute(group_dn, user2_dn,
+ attr='member')
+ self.add_linked_attribute(group_dn, user3_dn,
+ attr='member')
+ self.add_linked_attribute(group_dn, user1_dn,
+ attr='managedby')
+ self.add_linked_attribute(group_dn, user2_dn,
+ attr='nonSecurityMember')
+ self.add_linked_attribute(group_dn, user3_dn,
+ attr='nonSecurityMember')
+
+ set_inactive = AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid, u3_guid)
+
+ expected_links = set([set_inactive,
+ AbstractLink(drsuapi.DRSUAPI_ATTID_member,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u1_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_member,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u2_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_member,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u3_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u1_guid),
+ AbstractLink(drsuapi.DRSUAPI_ATTID_nonSecurityMember,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ g_guid,
+ u2_guid),
+ ])
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=group_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ no_inactive = []
+ for link in ctr.linked_attributes:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob).guid
+ no_inactive.append((link, target_guid))
+ self.assertTrue(AbstractLink(link.attid, link.flags,
+ link.identifier.guid,
+ target_guid) in expected_links)
+
+ no_inactive.sort(key=cmp_to_key(_linked_attribute_compare))
+
+ # assert the two arrays are the same
+ self.assertEqual(len(expected_links), ctr.linked_attributes_count)
+ self.assertEqual([x[0] for x in no_inactive], ctr.linked_attributes)
+
+ self.remove_linked_attribute(group_dn, user3_dn,
+ attr='nonSecurityMember')
+
+ # Set the link inactive
+ expected_links.remove(set_inactive)
+ set_inactive.flags = 0
+ expected_links.add(set_inactive)
+
+ has_inactive = []
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ for link in ctr.linked_attributes:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob).guid
+ has_inactive.append((link, target_guid))
+ self.assertTrue(AbstractLink(link.attid, link.flags,
+ link.identifier.guid,
+ target_guid) in expected_links)
+
+ has_inactive.sort(key=cmp_to_key(_linked_attribute_compare))
+
+ # assert the two arrays are the same
+ self.assertEqual(len(expected_links), ctr.linked_attributes_count)
+ self.assertEqual([x[0] for x in has_inactive], ctr.linked_attributes)
+
+ def test_sort_behaviour_ncchanges(self):
+ """Testing sorting behaviour on a group of objects."""
+ user1_dn = "cn=test_user1,%s" % self.ou
+ group_dn = "cn=test_group,%s" % self.ou
+ self.ldb_dc1.add({"dn": user1_dn, "objectclass": "user"})
+ self.ldb_dc1.add({"dn": group_dn, "objectclass": "group"})
+
+ self.add_linked_attribute(group_dn, user1_dn,
+ attr='member')
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ # Make sure the max objects count is high enough
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.base_dn,
+ replica_flags=0,
+ max_objects=100,
+ exop=drsuapi.DRSUAPI_EXOP_NONE)
+
+ # Loop until we get linked attributes, or we get to the end.
+ # Samba sends linked attributes at the end, unlike Windows.
+ while True:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ if ctr.more_data == 0 or ctr.linked_attributes_count != 0:
+ break
+ req8.highwatermark = ctr.new_highwatermark
+
+ self.assertTrue(ctr.linked_attributes_count != 0)
+
+ no_inactive = []
+ for link in ctr.linked_attributes:
+ try:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob).guid
+ except:
+ target_guid = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3Binary,
+ link.value.blob).guid
+ no_inactive.append((link, target_guid))
+
+ no_inactive.sort(key=cmp_to_key(_linked_attribute_compare))
+
+ # assert the two arrays are the same
+ self.assertEqual([x[0] for x in no_inactive], ctr.linked_attributes)
diff --git a/source4/torture/drs/python/getnc_schema.py b/source4/torture/drs/python/getnc_schema.py
new file mode 100644
index 0000000..5b67b29
--- /dev/null
+++ b/source4/torture/drs/python/getnc_schema.py
@@ -0,0 +1,308 @@
+import drs_base
+import ldb
+import time
+from samba.dcerpc import misc
+from samba.drs_utils import drs_Replicate, drsException
+import samba
+import random
+import time
+import os
+
+break_me = os.getenv("PLEASE_BREAK_MY_WINDOWS") == "1"
+assert break_me, ("This test breaks Windows active directory after "
+ "a few runs. Set PLEASE_BREAK_MY_WINDOWS=1 to run.")
+
+# This test runs against Windows. To run, set up two Windows AD DCs, join one
+# to the other, and make sure the passwords are the same. SMB_CONF_PATH must
+# also be set to any smb.conf file. Set DC1 to the PDC's hostname, and DC2 to
+# the join'd DC's hostname. Example:
+# PLEASE_BREAK_MY_WINDOWS=1
+# DC1=pdc DC2=joindc
+# SMB_CONF_PATH=st/ad_dc/etc/smb.conf
+# PYTHONPATH=$PYTHONPATH:./source4/torture/drs/python
+# python3 ./source4/scripting/bin/subunitrun getnc_schema
+# -UAdministrator%Password
+
+class SchemaReplicationTests(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(SchemaReplicationTests, self).setUp()
+ self.creds = self.get_credentials()
+ self.cmdline_auth = "-U{}%{}".format(self.creds.get_username(),
+ self.creds.get_password())
+
+ self.from_ldb = self.ldb_dc1
+ self.dest_ldb = self.ldb_dc2
+ self._disable_inbound_repl(self.url_dc1)
+ self._disable_all_repl(self.url_dc1)
+ self.free_offset = 0
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.url_dc1)
+ self._enable_all_repl(self.url_dc1)
+
+ def do_repl(self, partition_dn):
+ self._enable_inbound_repl(self.url_dc1)
+ self._enable_all_repl(self.url_dc1)
+
+ samba_tool_cmd = ["drs", "replicate", self.url_dc2, self.url_dc1]
+ samba_tool_cmd += [partition_dn]
+ username = self.creds.get_username()
+ password = self.creds.get_password()
+ samba_tool_cmd += ["-U{0}%{1}".format(username, password)]
+
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+
+ try:
+ self.assertCmdSuccess(result, out, err)
+ except AssertionError:
+ print("Failed repl, retrying in 10s")
+ time.sleep(10)
+ (result, out, err) = self.runsubcmd(*samba_tool_cmd)
+
+ self._disable_inbound_repl(self.url_dc1)
+ self._disable_all_repl(self.url_dc1)
+
+ self.assertCmdSuccess(result, out, err)
+
+ # Get a unique prefix for some search expression like "([att]=[pref]{i}*)"
+ def get_unique(self, expr_templ):
+ found = True
+ while found:
+ i = random.randint(0, 65535)
+ res = self.from_ldb.search(base=self.schema_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=expr_templ.format(i=i))
+ found = len(res) > 0
+
+ return str(i)
+
+ def unique_gov_id_prefix(self):
+ prefix = "1.3.6.1.4.1.7165.4.6.2.8."
+ return prefix + self.get_unique("(governsId=" + prefix + "{i}.*)")
+
+ def unique_cn_prefix(self, prefix="testobj"):
+ return prefix + self.get_unique("(cn=" + prefix + "{i}x*)") + "x"
+
+ # Make test schema classes linked to each other in a line, then modify
+ # them in reverse order so when we repl, a link crosses the chunk
+ # boundary. Chunk size is 133 by default so we do 150.
+ def test_poss_superiors_across_chunk(self):
+ num_schema_objects_to_add = 150
+ class_name = self.unique_cn_prefix()
+
+ ldif_template = """
+dn: CN={class_name}{i},{schema_dn}
+objectClass: top
+objectClass: classSchema
+adminDescription: {class_name}{i}
+adminDisplayName: {class_name}{i}
+cn: {class_name}{i}
+governsId: {gov_id}.{i}
+instanceType: 4
+objectClassCategory: 1
+systemFlags: 16
+systemOnly: FALSE
+"""
+
+ ldif_kwargs = {'class_name': class_name,
+ 'schema_dn': self.schema_dn}
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(i=0, gov_id=gov_id, **ldif_kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ ldif_template += "systemPossSuperiors: {possSup}\n"
+
+ ids = list(range(num_schema_objects_to_add))
+ got_no_such_attrib = False
+ for i in ids[1:]:
+ last_class_name = class_name + str(i-1)
+ ldif = ldif_template.format(i=i, gov_id=gov_id,
+ possSup=last_class_name,
+ **ldif_kwargs)
+
+ try:
+ self.from_ldb.add_ldif(ldif)
+ if got_no_such_attrib:
+ self.from_ldb.set_schema_update_now()
+ except ldb.LdbError as e:
+ if e.args[0] != ldb.ERR_NO_SUCH_ATTRIBUTE:
+ self.fail(e)
+ if got_no_such_attrib:
+ self.fail(("got NO_SUCH_ATTRIB even after "
+ "setting schemaUpdateNow", str(e)))
+ print("got NO_SUCH_ATTRIB, trying schemaUpdateNow")
+ got_no_such_attrib = True
+ self.from_ldb.set_schema_update_now()
+ self.from_ldb.add_ldif(ldif)
+ self.from_ldb.set_schema_update_now()
+
+ ldif_template = """
+dn: CN={class_name}{i},{schema_dn}
+changetype: modify
+replace: adminDescription
+adminDescription: new_description
+"""
+
+ for i in reversed(ids):
+ ldif = ldif_template.format(i=i, **ldif_kwargs)
+ self.from_ldb.modify_ldif(ldif)
+
+ self.do_repl(self.schema_dn)
+
+ dn_templ = "CN={class_name}{i},{schema_dn}"
+ for i in ids:
+ dn = dn_templ.format(i=i, **ldif_kwargs)
+ res = self.dest_ldb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+
+ # Test for method of adding linked attributes in schema partition
+ # required by other tests.
+ def test_create_linked_attribute_in_schema(self):
+ # Make an object outside of the schema partition that we can link to
+ user_name = self.unique_cn_prefix("user")
+ user_dn = "CN={},CN=Users,{}".format(user_name, self.domain_dn)
+
+ ldif_template = """
+dn: {user_dn}
+objectClass: person
+objectClass: user"""
+ ldif = ldif_template.format(user_dn=user_dn)
+ self.from_ldb.add_ldif(ldif)
+
+ # Make test class name unique so test can run multiple times
+ class_name = self.unique_cn_prefix("class")
+
+ kwargs = {'class_name': class_name,
+ 'schema_dn': self.schema_dn,
+ 'user_dn': user_dn}
+
+ # Add an auxiliary schemaClass (cat 3) class and give it managedBy
+ # so we can create schema objects with linked attributes.
+ ldif_template = """
+dn: CN={class_name},{schema_dn}
+objectClass: classSchema
+governsId: {gov_id}.0
+instanceType: 4
+systemFlags: 16
+systemOnly: FALSE
+objectClassCategory: 3
+mayContain: managedBy
+"""
+
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ # Now make an instance that points back to the user with managedBy,
+ # thus creating an object in the schema with a linked attribute
+ ldif_template = """
+dn: CN=link{class_name},{schema_dn}
+objectClass: classSchema
+objectClass: {class_name}
+instanceType: 4
+governsId: {gov_id}.0
+systemFlags: 16
+managedBy: {user_dn}
+"""
+
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ # Check link exists on test schema object
+ dn_templ = "CN=link{class_name},{schema_dn}"
+ dn = dn_templ.format(**kwargs)
+ res = self.from_ldb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ self.assertIsNotNone(res[0].get("managedBy"))
+ self.assertEqual(str(res[0].get("managedBy")[0]), user_dn)
+
+ # Check backlink on user object
+ res = self.from_ldb.search(base=user_dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ managed_objs = res[0].get("managedObjects")
+ self.assertEqual(len(managed_objs), 1)
+ managed_objs = [str(o) for o in managed_objs]
+ self.assertEqual(managed_objs, [dn_templ.format(**kwargs)])
+
+ def test_schema_linked_attributes(self):
+ num_test_objects = 9
+
+ # Make an object outside of the schema partition that we can link to
+ user_name = self.unique_cn_prefix("user")
+ user_dn = "CN={},CN=Users,{}".format(user_name, self.domain_dn)
+
+ ldif_template = """
+dn: {user_dn}
+objectClass: person
+objectClass: user"""
+ ldif = ldif_template.format(user_dn=user_dn)
+ self.from_ldb.add_ldif(ldif)
+
+ self.do_repl(self.domain_dn)
+
+ # Make test object name prefixes unique so test can run multiple times
+ # in a single testenv (can't delete schema objects)
+ class_name = self.unique_cn_prefix("class")
+ link_class_name = self.unique_cn_prefix("linkClass")
+
+ kwargs = {'class_name': class_name,
+ 'schema_dn': self.schema_dn,
+ 'link_class_name': link_class_name,
+ 'user_dn': user_dn}
+
+ # Add an auxiliary schemaClass (cat 3) class and give it managedBy
+ # so we can create schema objects with linked attributes.
+ ldif_template = """
+dn: CN={class_name},{schema_dn}
+objectClass: classSchema
+governsId: {gov_id}.0
+instanceType: 4
+systemFlags: 16
+systemOnly: FALSE
+objectClassCategory: 3
+mayContain: managedBy
+"""
+
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ # Now make instances that point back to the user with managedBy,
+ # thus creating objects in the schema with linked attributes
+ ldif_template = """
+dn: CN={link_class_name}{i},{schema_dn}
+objectClass: classSchema
+objectClass: {class_name}
+instanceType: 4
+governsId: {gov_id}.0
+systemFlags: 16
+managedBy: {user_dn}
+"""
+
+ id_range = list(range(num_test_objects))
+ for i in id_range:
+ gov_id = self.unique_gov_id_prefix()
+ ldif = ldif_template.format(i=i, gov_id=gov_id, **kwargs)
+ self.from_ldb.add_ldif(ldif)
+
+ self.do_repl(self.schema_dn)
+
+ # Check link exists in each test schema objects at destination DC
+ dn_templ = "CN={link_class_name}{i},{schema_dn}"
+ for i in id_range:
+ dn = dn_templ.format(i=i, **kwargs)
+ res = self.dest_ldb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ self.assertIsNotNone(res[0].get("managedBy"))
+ self.assertEqual(str(res[0].get("managedBy")[0]), user_dn)
+
+ # Check backlinks list on user object contains DNs of test objects.
+ res = self.dest_ldb.search(base=user_dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+ managed_objs = res[0].get("managedObjects")
+ self.assertIsNotNone(managed_objs)
+ managed_objs_set = {str(el) for el in managed_objs}
+ expected = {dn_templ.format(i=i, **kwargs) for i in id_range}
+ self.assertEqual(managed_objs_set, expected)
diff --git a/source4/torture/drs/python/getnc_unpriv.py b/source4/torture/drs/python/getnc_unpriv.py
new file mode 100644
index 0000000..c53906a
--- /dev/null
+++ b/source4/torture/drs/python/getnc_unpriv.py
@@ -0,0 +1,306 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests replication scenarios with different user privileges.
+# We want to test every replication scenario we can think of against:
+# - users with only GET_CHANGES privileges
+# - users with only GET_ALL_CHANGES privileges
+# - users with both GET_CHANGES and GET_ALL_CHANGES privileges
+# - users with no privileges
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN getnc_unpriv -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+from samba import werror, WERRORError
+
+from samba import sd_utils
+import ldb
+from ldb import SCOPE_BASE
+import random
+
+from samba.dcerpc import drsuapi, security
+from samba.credentials import DONT_USE_KERBEROS
+
+
+class DrsReplicaSyncUnprivTestCase(drs_base.DrsBaseTestCase):
+ """Confirm the behaviour of DsGetNCChanges for unprivileged users"""
+
+ def setUp(self):
+ super(DrsReplicaSyncUnprivTestCase, self).setUp()
+ self.get_changes_user = "get-changes-user"
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+ self.user_pass = samba.generate_random_password(12, 16)
+
+ # add some randomness to the test OU. (Deletion of the last test's
+ # objects can be slow to replicate out. So the OU created by a previous
+ # testenv may still exist at this point).
+ rand = random.randint(1, 10000000)
+ test_ou = "OU=test_getnc_unpriv%d" % rand
+ self.ou = "%s,%s" % (test_ou, self.base_dn)
+ self.ldb_dc1.add({
+ "dn": self.ou,
+ "objectclass": "organizationalUnit"})
+ self.ldb_dc1.newuser(self.get_changes_user, self.user_pass,
+ userou=test_ou)
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+
+ self.sd_utils = sd_utils.SDUtils(self.ldb_dc1)
+ self.user_dn = "cn=%s,%s" % (self.get_changes_user, self.ou)
+ user_sid = self.sd_utils.get_object_sid(self.user_dn)
+ self.acl_mod_get_changes = "(OA;;CR;%s;;%s)" % (security.GUID_DRS_GET_CHANGES,
+ str(user_sid))
+ self.acl_mod_get_all_changes = "(OA;;CR;%s;;%s)" % (security.GUID_DRS_GET_ALL_CHANGES,
+ str(user_sid))
+ self.desc_sddl = self.sd_utils.get_sd_as_sddl(self.base_dn)
+
+ # We set DONT_USE_KERBEROS to avoid a race with getting the
+ # user replicated to our selected KDC
+ self.user_creds = self.insta_creds(template=self.get_credentials(),
+ username=self.get_changes_user,
+ userpass=self.user_pass,
+ kerberos_state=DONT_USE_KERBEROS)
+ (self.user_drs, self.user_drs_handle) = self._ds_bind(self.dnsname_dc1,
+ self.user_creds)
+
+ def tearDown(self):
+ self.sd_utils.modify_sd_on_dn(self.base_dn, self.desc_sddl)
+ try:
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ except ldb.LdbError as e1:
+ (enum, string) = e1.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ super(DrsReplicaSyncUnprivTestCase, self).tearDown()
+
+ def _test_repl_exop(self, exop, repl_obj, expected_error, dest_dsa=None,
+ partial_attribute_set=None):
+ """
+ Common function to send a replication request and check the result
+ matches what's expected.
+ """
+ req8 = self._exop_req8(dest_dsa=dest_dsa,
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=repl_obj,
+ exop=exop,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP,
+ partial_attribute_set=partial_attribute_set)
+
+ if expected_error is None:
+ # user is OK, request should be accepted without throwing an error
+ (level, ctr) = self.user_drs.DsGetNCChanges(self.user_drs_handle,
+ 8, req8)
+ else:
+ # check the request is rejected (with the error we're expecting)
+ try:
+ (level, ctr) = self.user_drs.DsGetNCChanges(self.user_drs_handle,
+ 8, req8)
+ self.fail("Should have failed with user denied access")
+ except WERRORError as e:
+ (enum, estr) = e.args
+ self.assertTrue(enum in expected_error,
+ "Got unexpected error: %s" % estr)
+
+ def _test_repl_single_obj(self, repl_obj, expected_error,
+ partial_attribute_set=None):
+ """
+ Checks that replication on a single object either succeeds or fails as
+ expected (based on the user's access rights)
+ """
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ repl_obj=repl_obj,
+ expected_error=expected_error,
+ partial_attribute_set=partial_attribute_set)
+
+ def _test_repl_secret(self, repl_obj, expected_error, dest_dsa=None):
+ """
+ Checks that REPL_SECRET on an object either succeeds or fails as
+ expected (based on the user's access rights)
+ """
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ repl_obj=repl_obj,
+ expected_error=expected_error,
+ dest_dsa=dest_dsa)
+
+ def _test_repl_full(self, expected_error, partial_attribute_set=None):
+ """
+ Checks that a full replication either succeeds or fails as expected
+ (based on the user's access rights)
+ """
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_NONE,
+ repl_obj=self.ldb_dc1.get_default_basedn(),
+ expected_error=expected_error,
+ partial_attribute_set=partial_attribute_set)
+
+ def _test_repl_full_on_ou(self, repl_obj, expected_error):
+ """
+ Full replication on a specific OU should always fail (it should be done
+ against a base NC). The error may vary based on the user's access rights
+ """
+ # Just try against the OU created in the test setup
+ self._test_repl_exop(exop=drsuapi.DRSUAPI_EXOP_NONE,
+ repl_obj=repl_obj,
+ expected_error=expected_error)
+
+ def test_repl_getchanges_userpriv(self):
+ """
+ Tests various replication requests made by a user with only GET_CHANGES
+ rights. Some requests will be accepted, but most will be rejected.
+ """
+
+ # Assign the user GET_CHANGES rights
+ self.sd_utils.dacl_add_ace(self.base_dn, self.acl_mod_get_changes)
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ bad_ou = "OU=bad_obj,%s" % self.ou
+ self._test_repl_single_obj(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ self._test_repl_secret(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=self.user_dn,
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=self.user_dn,
+ dest_dsa=self.ldb_dc1.get_ntds_GUID(),
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN])
+
+ self._test_repl_full(expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_full_on_ou(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_CANT_FIND_EXPECTED_NC,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_full_on_ou(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_NC,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ # Partial Attribute Sets don't require GET_ALL_CHANGES rights, so we
+ # expect the following to succeed
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+ self._test_repl_full(expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+
+ def test_repl_getallchanges_userpriv(self):
+ """
+ Tests various replication requests made by a user with only
+ GET_ALL_CHANGES rights. Note that assigning these rights is possible,
+ but doesn't make a lot of sense. We test it anyway for consistency.
+ """
+
+ # Assign the user GET_ALL_CHANGES rights
+ self.sd_utils.dacl_add_ace(self.base_dn, self.acl_mod_get_all_changes)
+
+ # We can expect to get the same responses as an unprivileged user,
+ # i.e. we have permission to see the results, but don't have permission
+ # to ask
+ self.test_repl_no_userpriv()
+
+ def test_repl_both_userpriv(self):
+ """
+ Tests various replication requests made by a privileged user (i.e. has
+ both GET_CHANGES and GET_ALL_CHANGES). We expect any valid requests
+ to be accepted.
+ """
+
+ # Assign the user both GET_CHANGES and GET_ALL_CHANGES rights
+ both_rights = self.acl_mod_get_changes + self.acl_mod_get_all_changes
+ self.sd_utils.dacl_add_ace(self.base_dn, both_rights)
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=None)
+ bad_ou = "OU=bad_obj,%s" % self.ou
+ self._test_repl_single_obj(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN])
+
+ # Microsoft returns DB_ERROR, Samba returns ACCESS_DENIED
+ self._test_repl_secret(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_DRA_DB_ERROR,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_secret(repl_obj=self.user_dn,
+ expected_error=[werror.WERR_DS_DRA_DB_ERROR,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+ # Note that Windows accepts this but Samba rejects it
+ self._test_repl_secret(repl_obj=self.user_dn,
+ dest_dsa=self.ldb_dc1.get_ntds_GUID(),
+ expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ self._test_repl_secret(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_DN])
+
+ self._test_repl_full(expected_error=None)
+ self._test_repl_full_on_ou(repl_obj=self.ou,
+ expected_error=[werror.WERR_DS_CANT_FIND_EXPECTED_NC])
+ self._test_repl_full_on_ou(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_NC,
+ werror.WERR_DS_DRA_BAD_DN])
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+ self._test_repl_full(expected_error=None,
+ partial_attribute_set=self.get_partial_attribute_set())
+
+ def test_repl_no_userpriv(self):
+ """
+ Tests various replication requests made by a unprivileged user.
+ We expect all these requests to be rejected.
+ """
+
+ # Microsoft usually returns BAD_DN, Samba returns ACCESS_DENIED
+ usual_error = [werror.WERR_DS_DRA_BAD_DN, werror.WERR_DS_DRA_ACCESS_DENIED]
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=usual_error)
+ bad_ou = "OU=bad_obj,%s" % self.ou
+ self._test_repl_single_obj(repl_obj=bad_ou,
+ expected_error=usual_error)
+
+ self._test_repl_secret(repl_obj=self.ou,
+ expected_error=usual_error)
+ self._test_repl_secret(repl_obj=self.user_dn,
+ expected_error=usual_error)
+ self._test_repl_secret(repl_obj=self.user_dn,
+ dest_dsa=self.ldb_dc1.get_ntds_GUID(),
+ expected_error=usual_error)
+ self._test_repl_secret(repl_obj=bad_ou,
+ expected_error=usual_error)
+
+ self._test_repl_full(expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED])
+ self._test_repl_full_on_ou(repl_obj=self.ou,
+ expected_error=usual_error)
+ self._test_repl_full_on_ou(repl_obj=bad_ou,
+ expected_error=[werror.WERR_DS_DRA_BAD_NC,
+ werror.WERR_DS_DRA_ACCESS_DENIED])
+
+ self._test_repl_single_obj(repl_obj=self.ou,
+ expected_error=usual_error,
+ partial_attribute_set=self.get_partial_attribute_set())
+ self._test_repl_full(expected_error=[werror.WERR_DS_DRA_ACCESS_DENIED],
+ partial_attribute_set=self.get_partial_attribute_set())
diff --git a/source4/torture/drs/python/getncchanges.py b/source4/torture/drs/python/getncchanges.py
new file mode 100644
index 0000000..580d8cc
--- /dev/null
+++ b/source4/torture/drs/python/getncchanges.py
@@ -0,0 +1,1427 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN \
+# getncchanges -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import ldb
+from ldb import SCOPE_BASE
+import random
+
+from samba.dcerpc import drsuapi, misc
+from samba import WERRORError
+from samba import werror
+
+class DrsReplicaSyncIntegrityTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaSyncIntegrityTestCase, self).setUp()
+
+ self.init_test_state()
+
+ # Note that DC2 is the DC with the testenv-specific quirks (e.g. it's
+ # the vampire_dc), so we point this test directly at that DC
+ self.set_test_ldb_dc(self.ldb_dc2)
+
+ self.ou = str(samba.tests.create_test_ou(self.test_ldb_dc,
+ "getncchanges." + self.id().rsplit(".", 1)[1]))
+
+ self.addCleanup(self.ldb_dc2.delete, self.ou, ["tree_delete:1"])
+
+ self.base_dn = self.test_ldb_dc.get_default_basedn()
+
+ self.default_conn = DcConnection(self, self.ldb_dc2, self.dnsname_dc2)
+ self.set_dc_connection(self.default_conn)
+
+ def init_test_state(self):
+ self.rxd_dn_list = []
+ self.rxd_links = []
+ self.rxd_guids = []
+ self.last_ctr = None
+
+ # 100 is the minimum max_objects that Microsoft seems to honour
+ # (the max honoured is 400ish), so we use that in these tests
+ self.max_objects = 100
+
+ # store whether we used GET_TGT/GET_ANC flags in the requests
+ self.used_get_tgt = False
+ self.used_get_anc = False
+
+ def add_object(self, dn, objectclass="organizationalunit"):
+ """Adds an OU object"""
+ self.test_ldb_dc.add({"dn": dn, "objectclass": objectclass})
+ res = self.test_ldb_dc.search(base=dn, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 1)
+
+ def modify_object(self, dn, attr, value):
+ """Modifies an object's USN by adding an attribute value to it"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.test_ldb_dc, dn)
+ m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, attr)
+ self.test_ldb_dc.modify(m)
+
+ def delete_attribute(self, dn, attr, value):
+ """Deletes an attribute from an object"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.test_ldb_dc, dn)
+ m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_DELETE, attr)
+ self.test_ldb_dc.modify(m)
+
+ def start_new_repl_cycle(self):
+ """Resets enough state info to start a new replication cycle"""
+ # reset rxd_links, but leave rxd_guids and rxd_dn_list alone so we know
+ # whether a parent/target is unknown and needs GET_ANC/GET_TGT to
+ # resolve
+ self.rxd_links = []
+
+ self.used_get_tgt = False
+ self.used_get_anc = False
+ # mostly preserve self.last_ctr, so that we use the last HWM
+ if self.last_ctr is not None:
+ self.last_ctr.more_data = True
+
+ def create_object_range(self, start, end, prefix="",
+ children=None, parent_list=None):
+ """
+ Creates a block of objects. Object names are numbered sequentially,
+ using the optional prefix supplied. If the children parameter is
+ supplied it will create a parent-child hierarchy and return the
+ top-level parents separately.
+ """
+ dn_list = []
+
+ # Use dummy/empty lists if we're not creating a parent/child hierarchy
+ if children is None:
+ children = []
+
+ if parent_list is None:
+ parent_list = []
+
+ # Create the parents first, then the children.
+ # This makes it easier to see in debug when GET_ANC takes effect
+ # because the parent/children become interleaved (by default,
+ # this approach means the objects are organized into blocks of
+ # parents and blocks of children together)
+ for x in range(start, end):
+ ou = "OU=test_ou_%s%d,%s" % (prefix, x, self.ou)
+ self.add_object(ou)
+ dn_list.append(ou)
+
+ # keep track of the top-level parents (if needed)
+ parent_list.append(ou)
+
+ # create the block of children (if needed)
+ for x in range(start, end):
+ for child in children:
+ ou = "OU=test_ou_child%s%d,%s" % (child, x, parent_list[x])
+ self.add_object(ou)
+ dn_list.append(ou)
+
+ return dn_list
+
+ def assert_expected_data(self, expected_list):
+ """
+ Asserts that we received all the DNs that we expected and
+ none are missing.
+ """
+ received_list = self.rxd_dn_list
+
+ # Note that with GET_ANC Windows can end up sending the same parent
+ # object multiple times, so this might be noteworthy but doesn't
+ # warrant failing the test
+ num_received = len(received_list)
+ num_expected = len(expected_list)
+ if num_received != num_expected:
+ print("Note: received %d objects but expected %d" % (num_received,
+ num_expected))
+
+ # Check that we received every object that we were expecting
+ for dn in expected_list:
+ self.assertTrue(dn in received_list,
+ "DN '%s' missing from replication." % dn)
+
+ def test_repl_integrity(self):
+ """
+ Modify the objects being replicated while the replication is still
+ in progress and check that no object loss occurs.
+ """
+
+ # The server behaviour differs between samba and Windows. Samba returns
+ # the objects in the original order (up to the pre-modify HWM). Windows
+ # incorporates the modified objects and returns them in the new order
+ # (i.e. modified objects last), up to the post-modify HWM. The
+ # Microsoft docs state the Windows behaviour is optional.
+
+ # Create a range of objects to replicate.
+ expected_dn_list = self.create_object_range(0, 400)
+ (orig_hwm, unused) = self._get_highest_hwm_utdv(self.test_ldb_dc)
+
+ # We ask for the first page of 100 objects.
+ # For this test, we don't care what order we receive the objects in,
+ # so long as by the end we've received everything
+ self.repl_get_next()
+
+ # Modify some of the second page of objects. This should bump the
+ # highwatermark
+ for x in range(100, 200):
+ self.modify_object(expected_dn_list[x], "displayName", "OU%d" % x)
+
+ (post_modify_hwm, _) = self._get_highest_hwm_utdv(self.test_ldb_dc)
+ self.assertTrue(post_modify_hwm.highest_usn > orig_hwm.highest_usn)
+
+ # Get the remaining blocks of data
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we still receive all the objects we're expecting
+ self.assert_expected_data(expected_dn_list)
+
+ def is_parent_known(self, dn, known_dn_list):
+ """
+ Returns True if the parent of the dn specified is in known_dn_list
+ """
+
+ # we can sometimes get system objects like the RID Manager returned.
+ # Ignore anything that is not under the test OU we created
+ if self.ou not in dn:
+ return True
+
+ # Remove the child portion from the name to get the parent's DN
+ name_substrings = dn.split(",")
+ del name_substrings[0]
+
+ parent_dn = ",".join(name_substrings)
+
+ # check either this object is a parent (it's parent is the top-level
+ # test object), or its parent has been seen previously
+ return parent_dn == self.ou or parent_dn in known_dn_list
+
+ def _repl_send_request(self, get_anc=False, get_tgt=False):
+ """
+ Sends a GetNCChanges request for the next block of replication data.
+ """
+
+ # we're just trying to mimic regular client behaviour here, so just
+ # use the highwatermark in the last response we received
+ if self.last_ctr:
+ highwatermark = self.last_ctr.new_highwatermark
+ uptodateness_vector = self.last_ctr.uptodateness_vector
+ else:
+ # this is the first replication chunk
+ highwatermark = None
+ uptodateness_vector = None
+
+ # Ask for the next block of replication data
+ replica_flags = drsuapi.DRSUAPI_DRS_WRIT_REP
+ more_flags = 0
+
+ if get_anc:
+ replica_flags |= drsuapi.DRSUAPI_DRS_GET_ANC
+ self.used_get_anc = True
+
+ if get_tgt:
+ more_flags = drsuapi.DRSUAPI_DRS_GET_TGT
+ self.used_get_tgt = True
+
+ # return the response from the DC
+ return self._get_replication(replica_flags,
+ max_objects=self.max_objects,
+ highwatermark=highwatermark,
+ uptodateness_vector=uptodateness_vector,
+
+ more_flags=more_flags)
+
+ def repl_get_next(self, get_anc=False, get_tgt=False, assert_links=False):
+ """
+ Requests the next block of replication data. This tries to simulate
+ client behaviour - if we receive a replicated object that we don't know
+ the parent of, then re-request the block with the GET_ANC flag set.
+ If we don't know the target object for a linked attribute, then
+ re-request with GET_TGT.
+ """
+
+ # send a request to the DC and get the response
+ ctr6 = self._repl_send_request(get_anc=get_anc, get_tgt=get_tgt)
+
+ # extract the object DNs and their GUIDs from the response
+ rxd_dn_list = self._get_ctr6_dn_list(ctr6)
+ rxd_guid_list = self._get_ctr6_object_guids(ctr6)
+
+ # we'll add new objects as we discover them, so take a copy of the
+ # ones we already know about, so we can modify these lists safely
+ known_objects = self.rxd_dn_list[:]
+ known_guids = self.rxd_guids[:]
+
+ # check that we know the parent for every object received
+ for i in range(0, len(rxd_dn_list)):
+
+ dn = rxd_dn_list[i]
+ guid = rxd_guid_list[i]
+
+ if self.is_parent_known(dn, known_objects):
+
+ # the new DN is now known so add it to the list.
+ # It may be the parent of another child in this block
+ known_objects.append(dn)
+ known_guids.append(guid)
+ else:
+ # If we've already set the GET_ANC flag then it should mean
+ # we receive the parents before the child
+ self.assertFalse(get_anc, "Unknown parent for object %s" % dn)
+
+ print("Unknown parent for %s - try GET_ANC" % dn)
+
+ # try the same thing again with the GET_ANC flag set this time
+ return self.repl_get_next(get_anc=True, get_tgt=get_tgt,
+ assert_links=assert_links)
+
+ # check we know about references to any objects in the linked attrs
+ received_links = self._get_ctr6_links(ctr6)
+
+ # This is so that older versions of Samba fail - we want the links to
+ # be sent roughly with the objects, rather than getting all links at
+ # the end
+ if assert_links:
+ self.assertTrue(len(received_links) > 0,
+ "Links were expected in the GetNCChanges response")
+
+ for link in received_links:
+
+ # skip any links that aren't part of the test
+ if self.ou not in link.targetDN:
+ continue
+
+ # check the source object is known (Windows can actually send links
+ # where we don't know the source object yet). Samba shouldn't ever
+ # hit this case because it gets the links based on the source
+ if link.identifier not in known_guids:
+
+ # If we've already set the GET_ANC flag then it should mean
+ # this case doesn't happen
+ self.assertFalse(get_anc, "Unknown source object for GUID %s"
+ % link.identifier)
+
+ print("Unknown source GUID %s - try GET_ANC" % link.identifier)
+
+ # try the same thing again with the GET_ANC flag set this time
+ return self.repl_get_next(get_anc=True, get_tgt=get_tgt,
+ assert_links=assert_links)
+
+ # check we know the target object
+ if link.targetGUID not in known_guids:
+
+ # If we've already set the GET_TGT flag then we should have
+ # already received any objects we need to know about
+ self.assertFalse(get_tgt, "Unknown linked target for object %s"
+ % link.targetDN)
+
+ print("Unknown target for %s - try GET_TGT" % link.targetDN)
+
+ # try the same thing again with the GET_TGT flag set this time
+ return self.repl_get_next(get_anc=get_anc, get_tgt=True,
+ assert_links=assert_links)
+
+ # store the last successful result so we know what HWM to request next
+ self.last_ctr = ctr6
+
+ # store the objects, GUIDs, and links we received
+ self.rxd_dn_list += self._get_ctr6_dn_list(ctr6)
+ self.rxd_links += self._get_ctr6_links(ctr6)
+ self.rxd_guids += self._get_ctr6_object_guids(ctr6)
+
+ return ctr6
+
+ def replication_complete(self):
+ """Returns True if the current/last replication cycle is complete"""
+
+ if self.last_ctr is None or self.last_ctr.more_data:
+ return False
+ else:
+ return True
+
+ def test_repl_integrity_get_anc(self):
+ """
+ Modify the parent objects being replicated while the replication is
+ still in progress (using GET_ANC) and check that no object loss occurs.
+ """
+
+ # Note that GET_ANC behaviour varies between Windows and Samba.
+ # On Samba GET_ANC results in the replication restarting from the very
+ # beginning. After that, Samba remembers GET_ANC and also sends the
+ # parents in subsequent requests (regardless of whether GET_ANC is
+ # specified in the later request).
+ # Windows only sends the parents if GET_ANC was specified in the last
+ # request. It will also resend a parent, even if it's already sent the
+ # parent in a previous response (whereas Samba doesn't).
+
+ # Create a small block of 50 parents, each with 2 children (A and B)
+ # This is so that we receive some children in the first block, so we
+ # can resend with GET_ANC before we learn too many parents
+ parent_dn_list = []
+ expected_dn_list = self.create_object_range(0, 50, prefix="parent",
+ children=("A", "B"),
+ parent_list=parent_dn_list)
+
+ # create the remaining parents and children
+ expected_dn_list += self.create_object_range(50, 150, prefix="parent",
+ children=("A", "B"),
+ parent_list=parent_dn_list)
+
+ # We've now got objects in the following order:
+ # [50 parents][100 children][100 parents][200 children]
+
+ # Modify the first parent so that it's now ordered last by USN
+ # This means we set the GET_ANC flag pretty much straight away
+ # because we receive the first child before the first parent
+ self.modify_object(parent_dn_list[0], "displayName", "OU0")
+
+ # modify a later block of parents so they also get reordered
+ for x in range(50, 100):
+ self.modify_object(parent_dn_list[x], "displayName", "OU%d" % x)
+
+ # Get the first block of objects - this should resend the request with
+ # GET_ANC set because we won't know about the first child's parent.
+ # On samba GET_ANC essentially starts the sync from scratch again, so
+ # we get this over with early before we learn too many parents
+ self.repl_get_next()
+
+ # modify the last chunk of parents. They should now have a USN higher
+ # than the highwater-mark for the replication cycle
+ for x in range(100, 150):
+ self.modify_object(parent_dn_list[x], "displayName", "OU%d" % x)
+
+ # Get the remaining blocks of data - this will resend the request with
+ # GET_ANC if it encounters an object it doesn't have the parent for.
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_ANC flag. If this doesn't
+ # actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_dn_list)
+
+ def assert_expected_links(self, objects_with_links, link_attr="managedBy",
+ num_expected=None):
+ """
+ Asserts that a GetNCChanges response contains any expected links
+ for the objects it contains.
+ """
+ received_links = self.rxd_links
+
+ if num_expected is None:
+ num_expected = len(objects_with_links)
+
+ self.assertTrue(len(received_links) == num_expected,
+ "Received %d links but expected %d"
+ % (len(received_links), num_expected))
+
+ for dn in objects_with_links:
+ self.assert_object_has_link(dn, link_attr, received_links)
+
+ def assert_object_has_link(self, dn, link_attr, received_links):
+ """
+ Queries the object in the DB and asserts there is a link in the
+ GetNCChanges response that matches.
+ """
+
+ # Look up the link attribute in the DB
+ # The extended_dn option will dump the GUID info for the link
+ # attribute (as a hex blob)
+ res = self.test_ldb_dc.search(ldb.Dn(self.test_ldb_dc, dn),
+ attrs=[link_attr],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+
+ # We didn't find the expected link attribute in the DB for the object.
+ # Something has gone wrong somewhere...
+ self.assertTrue(link_attr in res[0],
+ "%s in DB doesn't have attribute %s" % (dn, link_attr))
+
+ # find the received link in the list and assert that the target and
+ # source GUIDs match what's in the DB
+ for val in [str(val) for val in res[0][link_attr]]:
+ # Work out the expected source and target GUIDs for the DB link
+ target_dn = ldb.Dn(self.test_ldb_dc, val)
+ targetGUID_blob = target_dn.get_extended_component("GUID")
+ sourceGUID_blob = res[0].dn.get_extended_component("GUID")
+
+ found = False
+
+ for link in received_links:
+ if link.selfGUID_blob == sourceGUID_blob and \
+ link.targetGUID_blob == targetGUID_blob:
+
+ found = True
+
+ if self._debug:
+ print("Link %s --> %s" % (dn[:25], link.targetDN[:25]))
+ break
+
+ self.assertTrue(found,
+ "Did not receive expected link for DN %s" % dn)
+
+ def test_repl_get_tgt(self):
+ """
+ Creates a scenario where we should receive the linked attribute before
+ we know about the target object, and therefore need to use GET_TGT.
+ Note: Samba currently avoids this problem by sending all its links last
+ """
+
+ # create the test objects
+ reportees = self.create_object_range(0, 100, prefix="reportee")
+ managers = self.create_object_range(0, 100, prefix="manager")
+ all_objects = managers + reportees
+ expected_links = reportees
+
+ # add a link attribute to each reportee object that points to the
+ # corresponding manager object as the target
+ for i in range(0, 100):
+ self.modify_object(reportees[i], "managedBy", managers[i])
+
+ # touch the managers (the link-target objects) again to make sure the
+ # reportees (link source objects) get returned first by the replication
+ for i in range(0, 100):
+ self.modify_object(managers[i], "displayName", "OU%d" % i)
+
+ links_expected = True
+
+ # Get all the replication data - this code should resend the requests
+ # with GET_TGT
+ while not self.replication_complete():
+
+ # get the next block of replication data (this sets GET_TGT
+ # if needed)
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_TGT flag. If this doesn't
+ # actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links for all the reportees
+ self.assert_expected_links(expected_links)
+
+ def test_repl_get_tgt_chain(self):
+ """
+ Tests the behaviour of GET_TGT with a more complicated scenario.
+ Here we create a chain of objects linked together, so if we follow
+ the link target, then we'd traverse ~200 objects each time.
+ """
+
+ # create the test objects
+ objectsA = self.create_object_range(0, 100, prefix="AAA")
+ objectsB = self.create_object_range(0, 100, prefix="BBB")
+ objectsC = self.create_object_range(0, 100, prefix="CCC")
+
+ # create a complex set of object links:
+ # A0-->B0-->C1-->B2-->C3-->B4-->and so on...
+ # Basically each object-A should link to a circular chain of 200 B/C
+ # objects. We create the links in separate chunks here, as it makes it
+ # clearer what happens with the USN (links on Windows have their own
+ # USN, so this approach means the A->B/B->C links aren't interleaved)
+ for i in range(0, 100):
+ self.modify_object(objectsA[i], "managedBy", objectsB[i])
+
+ for i in range(0, 100):
+ self.modify_object(objectsB[i], "managedBy",
+ objectsC[(i + 1) % 100])
+
+ for i in range(0, 100):
+ self.modify_object(objectsC[i], "managedBy",
+ objectsB[(i + 1) % 100])
+
+ all_objects = objectsA + objectsB + objectsC
+ expected_links = all_objects
+
+ # the default order the objects now get returned in should be:
+ # [A0-A99][B0-B99][C0-C99]
+
+ links_expected = True
+
+ # Get all the replication data - this code should resend the requests
+ # with GET_TGT
+ while not self.replication_complete():
+
+ # get the next block of replication data (this sets GET_TGT
+ # if needed)
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_TGT flag. If this doesn't
+ # actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links for all the reportees
+ self.assert_expected_links(expected_links)
+
+ def test_repl_integrity_link_attr(self):
+ """
+ Tests adding links to new objects while a replication is in progress.
+ """
+
+ # create some source objects for the linked attributes, sandwiched
+ # between 2 blocks of filler objects
+ filler = self.create_object_range(0, 100, prefix="filler")
+ reportees = self.create_object_range(0, 100, prefix="reportee")
+ filler += self.create_object_range(100, 200, prefix="filler")
+
+ # Start the replication and get the first block of filler objects
+ # (We're being mean here and setting the GET_TGT flag right from the
+ # start. On earlier Samba versions, if the client encountered an
+ # unknown target object and retried with GET_TGT, it would restart the
+ # replication cycle from scratch, which avoids the problem).
+ self.repl_get_next(get_tgt=True)
+
+ # create the target objects and add the links. These objects should be
+ # outside the scope of the Samba replication cycle, but the links
+ # should still get sent with the source object
+ managers = self.create_object_range(0, 100, prefix="manager")
+
+ for i in range(0, 100):
+ self.modify_object(reportees[i], "managedBy", managers[i])
+
+ expected_objects = managers + reportees + filler
+ expected_links = reportees
+
+ # complete the replication
+ while not self.replication_complete():
+ self.repl_get_next(get_tgt=True)
+
+ # If we didn't receive the most recently created objects in the last
+ # replication cycle, then kick off another replication to get them
+ if len(self.rxd_dn_list) < len(expected_objects):
+ self.repl_get_next()
+
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_objects)
+
+ # Check we received links for all the parents
+ self.assert_expected_links(expected_links)
+
+ def test_repl_get_anc_link_attr(self):
+ """
+ A basic GET_ANC test where the parents have linked attributes
+ """
+
+ # Create a block of 100 parents and 100 children
+ parent_dn_list = []
+ expected_dn_list = self.create_object_range(0, 100, prefix="parent",
+ children=("A"),
+ parent_list=parent_dn_list)
+
+ # Add links from the parents to the children
+ for x in range(0, 100):
+ self.modify_object(parent_dn_list[x], "managedBy",
+ expected_dn_list[x + 100])
+
+ # add some filler objects at the end. This allows us to easily see
+ # which chunk the links get sent in
+ expected_dn_list += self.create_object_range(0, 100, prefix="filler")
+
+ # We've now got objects in the following order:
+ # [100 x children][100 x parents][100 x filler]
+
+ # Get the replication data - because the block of children come first,
+ # this should retry the request with GET_ANC
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_dn_list)
+
+ # Check we received links for all the parents
+ self.assert_expected_links(parent_dn_list)
+
+ def test_repl_get_tgt_and_anc(self):
+ """
+ Check we can resolve an unknown ancestor when fetching the link target,
+ i.e. tests using GET_TGT and GET_ANC in combination
+ """
+
+ # Create some parent/child objects (the child will be the link target)
+ parents = []
+ all_objects = self.create_object_range(0, 100, prefix="parent",
+ children=["la_tgt"],
+ parent_list=parents)
+
+ children = [item for item in all_objects if item not in parents]
+
+ # create the link source objects and link them to the child/target
+ la_sources = self.create_object_range(0, 100, prefix="la_src")
+ all_objects += la_sources
+
+ for i in range(0, 100):
+ self.modify_object(la_sources[i], "managedBy", children[i])
+
+ expected_links = la_sources
+
+ # modify the children/targets so they come after the link source
+ for x in range(0, 100):
+ self.modify_object(children[x], "displayName", "OU%d" % x)
+
+ # modify the parents, so they now come last in the replication
+ for x in range(0, 100):
+ self.modify_object(parents[x], "displayName", "OU%d" % x)
+
+ # We've now got objects in the following order:
+ # [100 la_source][100 la_target][100 parents (of la_target)]
+
+ links_expected = True
+
+ # Get all the replication data - this code should resend the requests
+ # with GET_TGT and GET_ANC
+ while not self.replication_complete():
+
+ # get the next block of replication data (this sets
+ # GET_TGT/GET_ANC)
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ # The way the test objects have been created should force
+ # self.repl_get_next() to use the GET_TGT/GET_ANC flags. If this
+ # doesn't actually happen, then the test isn't doing its job properly
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links for all the link sources
+ self.assert_expected_links(expected_links)
+
+ # Second part of test. Add some extra objects and kick off another
+ # replication. The test code will use the HWM from the last replication
+ # so we'll only receive the objects we modify below
+ self.start_new_repl_cycle()
+
+ # add an extra level of grandchildren that hang off a child
+ # that got created last time
+ new_parent = "OU=test_new_parent,%s" % children[0]
+ self.add_object(new_parent)
+ new_children = []
+
+ for x in range(0, 50):
+ dn = "OU=test_new_la_tgt%d,%s" % (x, new_parent)
+ self.add_object(dn)
+ new_children.append(dn)
+
+ # replace half of the links to point to the new children
+ for x in range(0, 50):
+ self.delete_attribute(la_sources[x], "managedBy", children[x])
+ self.modify_object(la_sources[x], "managedBy", new_children[x])
+
+ # add some filler objects to fill up the 1st chunk
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ # modify the new children/targets so they come after the link source
+ for x in range(0, 50):
+ self.modify_object(new_children[x], "displayName", "OU-%d" % x)
+
+ # modify the parent, so it now comes last in the replication
+ self.modify_object(new_parent, "displayName", "OU%d" % x)
+
+ # We should now get the modified objects in the following order:
+ # [50 links (x 2)][100 filler][50 new children][new parent]
+ # Note that the link sources aren't actually sent (their new linked
+ # attributes are sent, but apart from that, nothing has changed)
+ all_objects = filler + new_children + [new_parent]
+ expected_links = la_sources[:50]
+
+ links_expected = True
+
+ while not self.replication_complete():
+ self.repl_get_next(assert_links=links_expected)
+ links_expected = len(self.rxd_links) < len(expected_links)
+
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+ self.assertTrue(self.used_get_anc,
+ "Test didn't use the GET_ANC flag as expected")
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(all_objects)
+
+ # Check we received links (50 deleted links and 50 new)
+ self.assert_expected_links(expected_links, num_expected=100)
+
+ def _repl_integrity_obj_deletion(self, delete_link_source=True):
+ """
+ Tests deleting link objects while a replication is in progress.
+ """
+
+ # create some objects and link them together, with some filler
+ # object in between the link sources
+ la_sources = self.create_object_range(0, 100, prefix="la_source")
+ la_targets = self.create_object_range(0, 100, prefix="la_targets")
+
+ for i in range(0, 50):
+ self.modify_object(la_sources[i], "managedBy", la_targets[i])
+
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ for i in range(50, 100):
+ self.modify_object(la_sources[i], "managedBy", la_targets[i])
+
+ # touch the targets so that the sources get replicated first
+ for i in range(0, 100):
+ self.modify_object(la_targets[i], "displayName", "OU%d" % i)
+
+ # objects should now be in the following USN order:
+ # [50 la_source][100 filler][50 la_source][100 la_target]
+
+ # Get the first block containing 50 link sources
+ self.repl_get_next()
+
+ # delete either the link targets or link source objects
+ if delete_link_source:
+ objects_to_delete = la_sources
+ # in GET_TGT testenvs we only receive the first 50 source objects
+ expected_objects = la_sources[:50] + la_targets + filler
+ else:
+ objects_to_delete = la_targets
+ expected_objects = la_sources + filler
+
+ for obj in objects_to_delete:
+ self.ldb_dc2.delete(obj)
+
+ # complete the replication
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we get all the objects we're expecting
+ self.assert_expected_data(expected_objects)
+
+ # we can't use assert_expected_links() here because it tries to check
+ # against the deleted objects on the DC. (Although we receive some
+ # links from the first block processed, the Samba client should end up
+ # deleting these, as the source/target object involved is deleted)
+ self.assertTrue(len(self.rxd_links) == 50,
+ "Expected 50 links, not %d" % len(self.rxd_links))
+
+ def test_repl_integrity_src_obj_deletion(self):
+ self._repl_integrity_obj_deletion(delete_link_source=True)
+
+ def test_repl_integrity_tgt_obj_deletion(self):
+ self._repl_integrity_obj_deletion(delete_link_source=False)
+
+ def restore_deleted_object(self, guid, new_dn):
+ """Re-animates a deleted object"""
+
+ guid_str = self._GUID_string(guid)
+ res = self.test_ldb_dc.search(base="<GUID=%s>" % guid_str,
+ attrs=["isDeleted"],
+ controls=['show_deleted:1'],
+ scope=ldb.SCOPE_BASE)
+ if len(res) != 1:
+ return
+
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["isDeleted"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE,
+ "isDeleted")
+ msg["distinguishedName"] = ldb.MessageElement([new_dn],
+ ldb.FLAG_MOD_REPLACE,
+ "distinguishedName")
+ self.test_ldb_dc.modify(msg, ["show_deleted:1"])
+
+ def sync_DCs(self, nc_dn=None):
+ # make sure DC1 has all the changes we've made to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2,
+ nc_dn=nc_dn)
+
+ def get_object_guid(self, dn):
+ res = self.test_ldb_dc.search(base=dn, attrs=["objectGUID"],
+ scope=ldb.SCOPE_BASE)
+ return res[0]['objectGUID'][0]
+
+ def set_dc_connection(self, conn):
+ """
+ Switches over the connection state info that the underlying drs_base
+ class uses so that we replicate with a different DC.
+ """
+ self.default_hwm = conn.default_hwm
+ self.default_utdv = conn.default_utdv
+ self.drs = conn.drs
+ self.drs_handle = conn.drs_handle
+ self.set_test_ldb_dc(conn.ldb_dc)
+
+ def assert_DCs_replication_is_consistent(self, peer_conn, all_objects,
+ expected_links):
+ """
+ Replicates against both the primary and secondary DCs in the testenv
+ and checks that both return the expected results.
+ """
+ print("Checking replication against primary test DC...")
+
+ # get the replication data from the test DC first
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ # Check we get all the objects and links we're expecting
+ self.assert_expected_data(all_objects)
+ self.assert_expected_links(expected_links)
+
+ # switch over the DC state info so we now talk to the peer DC
+ self.set_dc_connection(peer_conn)
+ self.init_test_state()
+
+ print("Checking replication against secondary test DC...")
+
+ # check that we get the same information from the 2nd DC
+ while not self.replication_complete():
+ self.repl_get_next()
+
+ self.assert_expected_data(all_objects)
+ self.assert_expected_links(expected_links)
+
+ # switch back to using the default connection
+ self.set_dc_connection(self.default_conn)
+
+ def test_repl_integrity_obj_reanimation(self):
+ """
+ Checks receiving links for a re-animated object doesn't lose links.
+ We test this against the peer DC to make sure it doesn't drop links.
+ """
+
+ # This test is a little different in that we're particularly interested
+ # in exercising the replmd client code on the second DC.
+ # First, make sure the peer DC has the base OU, then connect to it (so
+ # we store its initial HWM)
+ self.sync_DCs()
+ peer_conn = DcConnection(self, self.ldb_dc1, self.dnsname_dc1)
+
+ # create the link source/target objects
+ la_sources = self.create_object_range(0, 100, prefix="la_src")
+ la_targets = self.create_object_range(0, 100, prefix="la_tgt")
+
+ # store the target object's GUIDs (we need to know these to
+ # reanimate them)
+ target_guids = []
+
+ for dn in la_targets:
+ target_guids.append(self.get_object_guid(dn))
+
+ # delete the link target
+ for x in range(0, 100):
+ self.ldb_dc2.delete(la_targets[x])
+
+ # sync the DCs, then disable replication. We want the peer DC to get
+ # all the following changes in a single replication cycle
+ self.sync_DCs()
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # restore the target objects for the linked attributes again
+ for x in range(0, 100):
+ self.restore_deleted_object(target_guids[x], la_targets[x])
+
+ # add the links
+ for x in range(0, 100):
+ self.modify_object(la_sources[x], "managedBy", la_targets[x])
+
+ # create some additional filler objects
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ # modify the targets so they now come last
+ for x in range(0, 100):
+ self.modify_object(la_targets[x], "displayName", "OU-%d" % x)
+
+ # the objects should now be sent in the following order:
+ # [la sources + links][filler][la targets]
+ all_objects = la_sources + la_targets + filler
+ expected_links = la_sources
+
+ # Enable replication again make sure the 2 DCs are back in sync
+ self._enable_all_repl(self.dnsname_dc2)
+ self.sync_DCs()
+
+ # Get the replication data from each DC in turn.
+ # Check that both give us all the objects and links we're expecting,
+ # i.e. no links were lost
+ self.assert_DCs_replication_is_consistent(peer_conn, all_objects,
+ expected_links)
+
+ def _test_repl_integrity_cross_partition_links(self, get_tgt=False):
+ """
+ Checks that a cross-partition link to an unknown target object does
+ not result in missing links.
+ """
+
+ # check the peer DC is up-to-date, then connect (storing its HWM)
+ self.sync_DCs()
+ peer_conn = DcConnection(self, self.ldb_dc1, self.dnsname_dc1)
+
+ # stop replication so the peer gets the following objects in one go
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # optionally force the client-side to use GET_TGT locally, by adding a
+ # one-way link to a missing/deleted target object
+ if get_tgt:
+ missing_target = "OU=missing_tgt,%s" % self.ou
+ self.add_object(missing_target)
+ get_tgt_source = "CN=get_tgt_src,%s" % self.ou
+ self.add_object(get_tgt_source,
+ objectclass="msExchConfigurationContainer")
+ self.modify_object(get_tgt_source, "addressBookRoots2",
+ missing_target)
+ self.test_ldb_dc.delete(missing_target)
+
+ # create a link source object in the main NC
+ la_source = "OU=cross_nc_src,%s" % self.ou
+ self.add_object(la_source)
+
+ # create the link target (a server object) in the config NC
+ sites_dn = "CN=Sites,%s" % self.config_dn
+ servers_dn = "CN=Servers,CN=Default-First-Site-Name,%s" % sites_dn
+ rand = random.randint(1, 10000000)
+ la_target = "CN=getncchanges-%d,%s" % (rand, servers_dn)
+ self.add_object(la_target, objectclass="server")
+
+ # add a cross-partition link between the two
+ self.modify_object(la_source, "managedBy", la_target)
+
+ # First, sync to the peer the NC containing the link source object
+ self.sync_DCs()
+
+ # Now, before the peer has received the partition containing the target
+ # object, try replicating from the peer. It will only know about half
+ # of the link at this point, but it should be a valid scenario
+ self.set_dc_connection(peer_conn)
+
+ while not self.replication_complete():
+ # pretend we've received other link targets out of order and that's
+ # forced us to use GET_TGT. This checks the peer doesn't fail
+ # trying to fetch a cross-partition target object that doesn't
+ # exist
+ self.repl_get_next(get_tgt=True)
+
+ self.set_dc_connection(self.default_conn)
+
+ # delete the GET_TGT test object. We're not interested in asserting its
+ # links - it was just there to make the client use GET_TGT (and it
+ # creates an inconsistency because one DC correctly ignores the link,
+ # because it points to a deleted object)
+ if get_tgt:
+ self.test_ldb_dc.delete(get_tgt_source)
+
+ self.init_test_state()
+
+ # Now sync across the partition containing the link target object
+ self.sync_DCs(nc_dn=self.config_dn)
+ self._enable_all_repl(self.dnsname_dc2)
+
+ # Get the replication data from each DC in turn.
+ # Check that both return the cross-partition link (note we're not
+ # checking the config domain NC here for simplicity)
+ self.assert_DCs_replication_is_consistent(peer_conn,
+ all_objects=[la_source],
+ expected_links=[la_source])
+
+ # the cross-partition linked attribute has a missing backlink. Check
+ # that we can still delete it successfully
+ self.delete_attribute(la_source, "managedBy", la_target)
+ self.sync_DCs()
+
+ res = self.test_ldb_dc.search(ldb.Dn(self.ldb_dc1, la_source),
+ attrs=["managedBy"],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+ self.assertFalse("managedBy" in res[0],
+ "%s in DB still has managedBy attribute" % la_source)
+ res = self.test_ldb_dc.search(ldb.Dn(self.ldb_dc2, la_source),
+ attrs=["managedBy"],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+ self.assertFalse("managedBy" in res[0],
+ "%s in DB still has managedBy attribute" % la_source)
+
+ # Check receiving a cross-partition link to a deleted target.
+ # Delete the target and make sure the deletion is sync'd between DCs
+ target_guid = self.get_object_guid(la_target)
+ self.test_ldb_dc.delete(la_target)
+ self.sync_DCs(nc_dn=self.config_dn)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # re-animate the target
+ self.restore_deleted_object(target_guid, la_target)
+ self.modify_object(la_source, "managedBy", la_target)
+
+ # now sync the link - because the target is in another partition, the
+ # peer DC receives a link for a deleted target, which it should accept
+ self.sync_DCs()
+ res = self.test_ldb_dc.search(ldb.Dn(self.ldb_dc1, la_source),
+ attrs=["managedBy"],
+ controls=['extended_dn:1:0'],
+ scope=ldb.SCOPE_BASE)
+ self.assertTrue("managedBy" in res[0],
+ "%s in DB missing managedBy attribute" % la_source)
+
+ # cleanup the server object we created in the Configuration partition
+ self.test_ldb_dc.delete(la_target)
+ self._enable_all_repl(self.dnsname_dc2)
+
+ def test_repl_integrity_cross_partition_links(self):
+ self._test_repl_integrity_cross_partition_links(get_tgt=False)
+
+ def test_repl_integrity_cross_partition_links_with_tgt(self):
+ self._test_repl_integrity_cross_partition_links(get_tgt=True)
+
+ def test_repl_get_tgt_multivalued_links(self):
+ """Tests replication with multi-valued link attributes."""
+
+ # create the target/source objects and link them together
+ la_targets = self.create_object_range(0, 500, prefix="la_tgt")
+ la_source = "CN=la_src,%s" % self.ou
+ self.add_object(la_source, objectclass="msExchConfigurationContainer")
+
+ for tgt in la_targets:
+ self.modify_object(la_source, "addressBookRoots2", tgt)
+
+ filler = self.create_object_range(0, 100, prefix="filler")
+
+ # We should receive the objects/links in the following order:
+ # [500 targets + 1 source][500 links][100 filler]
+ expected_objects = la_targets + [la_source] + filler
+ link_only_chunk = False
+
+ # First do the replication without needing GET_TGT
+ while not self.replication_complete():
+ ctr6 = self.repl_get_next()
+
+ if ctr6.object_count == 0 and ctr6.linked_attributes_count != 0:
+ link_only_chunk = True
+
+ # we should receive one chunk that contains only links
+ self.assertTrue(link_only_chunk,
+ "Expected to receive a chunk containing only links")
+
+ # check we received all the expected objects/links
+ self.assert_expected_data(expected_objects)
+ self.assert_expected_links([la_source], link_attr="addressBookRoots2",
+ num_expected=500)
+
+ # Do the replication again, forcing the use of GET_TGT this time
+ self.init_test_state()
+
+ for x in range(0, 500):
+ self.modify_object(la_targets[x], "displayName", "OU-%d" % x)
+
+ # The objects/links should get sent in the following order:
+ # [1 source][500 targets][500 links][100 filler]
+
+ while not self.replication_complete():
+ ctr6 = self.repl_get_next()
+
+ self.assertTrue(self.used_get_tgt,
+ "Test didn't use the GET_TGT flag as expected")
+
+ # check we received all the expected objects/links
+ self.assert_expected_data(expected_objects)
+ self.assert_expected_links([la_source], link_attr="addressBookRoots2",
+ num_expected=500)
+
+
+ def test_InvalidNC_DummyDN_InvalidGUID_full_repl(self):
+ """Test full replication on a totally invalid GUID fails with the right error code"""
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=misc.GUID("c2d2f745-1610-4e93-964b-d4ba73eb32f8"),
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ max_objects=1)
+
+ (drs, drs_handle) = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, werror.WERR_DS_DRA_BAD_NC)
+
+ def test_DummyDN_valid_GUID_full_repl(self):
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ res = self.ldb_dc1.search(base=self.base_dn, scope=SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ guid = misc.GUID(res[0]["objectGUID"][0])
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str="DummyDN",
+ nc_guid=guid,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_GET_ANC,
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ max_objects=1)
+
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"Failed to call GetNCChanges with DummyDN and a GUID: {estr}")
+
+ # The NC should be the first object returned due to GET_ANC
+ self.assertEqual(ctr.first_object.object.identifier.guid, guid)
+
+ def _test_do_full_repl_no_overlap(self, mix=True, get_anc=False):
+ self.default_hwm = drsuapi.DsReplicaHighWaterMark()
+
+ # We set get_anc=True so we can assert the BASE DN will be the
+ # first object
+ ctr6 = self._repl_send_request(get_anc=get_anc)
+ guid_list_1 = self._get_ctr6_object_guids(ctr6)
+
+ if mix:
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=self.ldb_dc1.get_default_basedn(),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ (level, ctr_repl_obj) = self.drs.DsGetNCChanges(self.drs_handle, 8, req8)
+
+ self.assertEqual(ctr_repl_obj.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+
+ repl_obj_guid_list = self._get_ctr6_object_guids(ctr_repl_obj)
+
+ self.assertEqual(len(repl_obj_guid_list), 1)
+
+ # This should be the first object in the main replication due
+ # to get_anc=True above in one case, and a rule that the NC must be first regardless otherwise
+ self.assertEqual(repl_obj_guid_list[0], guid_list_1[0])
+
+ self.last_ctr = ctr6
+ ctr6 = self._repl_send_request(get_anc=True)
+ guid_list_2 = self._get_ctr6_object_guids(ctr6)
+
+ self.assertNotEqual(guid_list_1, guid_list_2)
+
+ def test_do_full_repl_no_overlap_get_anc(self):
+ """
+ Make sure that a full replication on an nc succeeds to the goal despite needing multiple passes
+ """
+ self._test_do_full_repl_no_overlap(mix=False, get_anc=True)
+
+ def test_do_full_repl_no_overlap(self):
+ """
+ Make sure that a full replication on an nc succeeds to the goal despite needing multiple passes
+ """
+ self._test_do_full_repl_no_overlap(mix=False)
+
+ def test_do_full_repl_mix_no_overlap(self):
+ """
+ Make sure that a full replication on an nc succeeds to the goal despite needing multiple passes
+
+ Assert this is true even if we do a REPL_OBJ in between the replications
+
+ """
+ self._test_do_full_repl_no_overlap(mix=True)
+
+ def nc_change(self):
+ old_base_msg = self.default_conn.ldb_dc.search(base=self.base_dn,
+ scope=SCOPE_BASE,
+ attrs=["oEMInformation"])
+ rec_cleanup = {"dn": self.base_dn,
+ "oEMInformation": old_base_msg[0]["oEMInformation"][0]}
+ m_cleanup = ldb.Message.from_dict(self.default_conn.ldb_dc,
+ rec_cleanup,
+ ldb.FLAG_MOD_REPLACE)
+
+ self.addCleanup(self.default_conn.ldb_dc.modify, m_cleanup)
+
+ rec = {"dn": self.base_dn,
+ "oEMInformation": f"Tortured by Samba's getncchanges.py {self.id()} against {self.default_conn.dnsname_dc}"}
+ m = ldb.Message.from_dict(self.default_conn.ldb_dc, rec, ldb.FLAG_MOD_REPLACE)
+ self.default_conn.ldb_dc.modify(m)
+
+ def _test_repl_nc_is_first(self, start_at_zero=True, nc_change=True, ou_change=True, mid_change=False):
+ """Tests that the NC is always replicated first, but does not move the
+ tmp_highest_usn at that point, just like 'early' GET_ANC objects.
+ """
+
+ # create objects, twice more than the page size of 133
+ objs = self.create_object_range(0, 300, prefix="obj")
+
+ if nc_change:
+ self.nc_change()
+
+ if mid_change:
+ # create even moire objects
+ objs = self.create_object_range(301, 450, prefix="obj2")
+
+ base_msg = self.default_conn.ldb_dc.search(base=self.base_dn,
+ scope=SCOPE_BASE,
+ attrs=["uSNChanged",
+ "objectGUID"])
+
+ base_guid = misc.GUID(base_msg[0]["objectGUID"][0])
+ base_usn = int(base_msg[0]["uSNChanged"][0])
+
+ if ou_change:
+ # Make one more modification. We want to assert we have
+ # caught up to the base DN, but Windows both promotes the NC
+ # to the front and skips including it in the tmp_highest_usn,
+ # so we make a later modification that will be to show we get
+ # this change.
+ rec = {"dn": self.ou,
+ "postalCode": "0"}
+ m = ldb.Message.from_dict(self.default_conn.ldb_dc, rec, ldb.FLAG_MOD_REPLACE)
+ self.default_conn.ldb_dc.modify(m)
+
+ ou_msg = self.default_conn.ldb_dc.search(base=self.ou,
+ scope=SCOPE_BASE,
+ attrs=["uSNChanged",
+ "objectGUID"])
+
+ ou_guid = misc.GUID(ou_msg[0]["objectGUID"][0])
+ ou_usn = int(ou_msg[0]["uSNChanged"][0])
+
+ # Check some predicates about USN ordering that the below tests will rely on
+ if ou_change and nc_change:
+ self.assertGreater(ou_usn, base_usn);
+ elif not ou_change and nc_change:
+ self.assertGreater(base_usn, ou_usn);
+
+ ctr6 = self.repl_get_next()
+
+ guid_list_1 = self._get_ctr6_object_guids(ctr6)
+ if nc_change or start_at_zero:
+ self.assertEqual(base_guid, misc.GUID(guid_list_1[0]))
+ self.assertIn(str(base_guid), guid_list_1)
+ self.assertNotIn(str(base_guid), guid_list_1[1:])
+ else:
+ self.assertNotEqual(base_guid, misc.GUID(guid_list_1[0]))
+ self.assertNotIn(str(base_guid), guid_list_1)
+
+ self.assertTrue(ctr6.more_data)
+
+ if not ou_change and nc_change:
+ self.assertLess(ctr6.new_highwatermark.tmp_highest_usn, base_usn)
+
+ i = 0
+ while not self.replication_complete():
+ i = i + 1
+ last_tmp_highest_usn = ctr6.new_highwatermark.tmp_highest_usn
+ ctr6 = self.repl_get_next()
+ guid_list_2 = self._get_ctr6_object_guids(ctr6)
+ if len(guid_list_2) > 0:
+ self.assertNotEqual(last_tmp_highest_usn, ctr6.new_highwatermark.tmp_highest_usn)
+
+ if (nc_change or start_at_zero) and base_usn > last_tmp_highest_usn:
+ self.assertEqual(base_guid, misc.GUID(guid_list_2[0]),
+ f"pass={i} more_data={ctr6.more_data} base_usn={base_usn} tmp_highest_usn={ctr6.new_highwatermark.tmp_highest_usn} last_tmp_highest_usn={last_tmp_highest_usn}")
+ self.assertIn(str(base_guid), guid_list_2,
+ f"pass {i}·more_data={ctr6.more_data} base_usn={base_usn} tmp_highest_usn={ctr6.new_highwatermark.tmp_highest_usn} last_tmp_highest_usn={last_tmp_highest_usn}")
+ else:
+ self.assertNotIn(str(base_guid), guid_list_2,
+ f"pass {i}·more_data={ctr6.more_data} base_usn={base_usn} tmp_highest_usn={ctr6.new_highwatermark.tmp_highest_usn} last_tmp_highest_usn={last_tmp_highest_usn}")
+
+ if ou_change:
+ # The modification to the base OU should be in the final chunk
+ self.assertIn(str(ou_guid), guid_list_2)
+ self.assertGreaterEqual(ctr6.new_highwatermark.highest_usn,
+ ou_usn)
+ else:
+ # Show that the NC root change does not show up in the
+ # highest_usn. We either get the change before or after
+ # it.
+ self.assertNotEqual(ctr6.new_highwatermark.highest_usn,
+ base_usn)
+ self.assertEqual(ctr6.new_highwatermark.highest_usn,
+ ctr6.new_highwatermark.tmp_highest_usn)
+
+ self.assertFalse(ctr6.more_data)
+
+ def test_repl_nc_is_first_start_zero_nc_change(self):
+ self.default_hwm = drsuapi.DsReplicaHighWaterMark()
+ self._test_repl_nc_is_first(start_at_zero=True, nc_change=True, ou_change=True)
+
+ def test_repl_nc_is_first_start_zero(self):
+ # Get the NC change in the middle of the replication stream, certainly not at the start or end
+ self.nc_change()
+ self.default_hwm = drsuapi.DsReplicaHighWaterMark()
+ self._test_repl_nc_is_first(start_at_zero=True, nc_change=False, ou_change=False)
+
+ def test_repl_nc_is_first_mid(self):
+ # This is a modification of the next test, that Samba
+ # will pass as it will always include the NC in the
+ # tmp_highest_usn at the point where it belongs
+ self._test_repl_nc_is_first(start_at_zero=False,
+ nc_change=True,
+ ou_change=True,
+ mid_change=True)
+
+ def test_repl_nc_is_first(self):
+ # This is a modification of the next test, that Samba
+ # will pass as it will always include the NC in the
+ # tmp_highest_usn at the point where it belongs
+ self._test_repl_nc_is_first(start_at_zero=False, nc_change=True, ou_change=True)
+
+ def test_repl_nc_is_first_nc_change_only(self):
+ # This shows that the NC change is not reflected in the tmp_highest_usn
+ self._test_repl_nc_is_first(start_at_zero=False, nc_change=True, ou_change=False)
+
+ def test_repl_nc_is_first_no_change(self):
+ # The NC should not be present in this replication
+ self._test_repl_nc_is_first(start_at_zero=False, nc_change=False, ou_change=False)
+
+class DcConnection:
+ """Helper class to track a connection to another DC"""
+
+ def __init__(self, drs_base, ldb_dc, dnsname_dc):
+ self.ldb_dc = ldb_dc
+ (self.drs, self.drs_handle) = drs_base._ds_bind(dnsname_dc)
+ (self.default_hwm, utdv) = drs_base._get_highest_hwm_utdv(ldb_dc)
+ self.default_utdv = utdv
+ self.dnsname_dc = dnsname_dc
diff --git a/source4/torture/drs/python/link_conflicts.py b/source4/torture/drs/python/link_conflicts.py
new file mode 100644
index 0000000..2c2f9a6
--- /dev/null
+++ b/source4/torture/drs/python/link_conflicts.py
@@ -0,0 +1,763 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests replication scenarios that involve conflicting linked attribute
+# information between the 2 DCs.
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN \
+# link_conflicts -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import ldb
+from ldb import SCOPE_BASE
+import random
+import time
+
+from drs_base import AbstractLink
+from samba.dcerpc import drsuapi, misc
+from samba.dcerpc.drsuapi import DRSUAPI_EXOP_ERR_SUCCESS
+
+# specifies the order to sync DCs in
+DC1_TO_DC2 = 1
+DC2_TO_DC1 = 2
+
+
+class DrsReplicaLinkConflictTestCase(drs_base.DrsBaseTestCase):
+ def setUp(self):
+ super(DrsReplicaLinkConflictTestCase, self).setUp()
+
+ self.ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "test_link_conflict")
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+ (self.drs2, self.drs2_handle) = self._ds_bind(self.dnsname_dc2)
+
+ # disable replication for the tests so we can control at what point
+ # the DCs try to replicate
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ def tearDown(self):
+ # re-enable replication
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+ super(DrsReplicaLinkConflictTestCase, self).tearDown()
+
+ def get_guid(self, samdb, dn):
+ """Returns an object's GUID (in string format)"""
+ res = samdb.search(base=dn, attrs=["objectGUID"], scope=ldb.SCOPE_BASE)
+ return self._GUID_string(res[0]['objectGUID'][0])
+
+ def add_object(self, samdb, dn, objectclass="organizationalunit"):
+ """Adds an object"""
+ samdb.add({"dn": dn, "objectclass": objectclass})
+ return self.get_guid(samdb, dn)
+
+ def modify_object(self, samdb, dn, attr, value):
+ """Modifies an attribute for an object"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, dn)
+ m[attr] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, attr)
+ samdb.modify(m)
+
+ def add_link_attr(self, samdb, source_dn, attr, target_dn):
+ """Adds a linked attribute between 2 objects"""
+ # add the specified attribute to the source object
+ self.modify_object(samdb, source_dn, attr, target_dn)
+
+ def del_link_attr(self, samdb, src, attr, target):
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, src)
+ m[attr] = ldb.MessageElement(target, ldb.FLAG_MOD_DELETE, attr)
+ samdb.modify(m)
+
+ def sync_DCs(self, sync_order=DC1_TO_DC2):
+ """Manually syncs the 2 DCs to ensure they're in sync"""
+ if sync_order == DC1_TO_DC2:
+ # sync DC1-->DC2, then DC2-->DC1
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2)
+ else:
+ # sync DC2-->DC1, then DC1-->DC2
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2)
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1)
+
+ def ensure_unique_timestamp(self):
+ """Waits a second to ensure a unique timestamp between 2 objects"""
+ time.sleep(1)
+
+ def unique_dn(self, obj_name):
+ """Returns a unique object DN"""
+ # Because we run each test case twice, we need to create a unique DN so
+ # that the 2nd run doesn't hit objects that already exist. Add some
+ # randomness to the object DN to make it unique
+ rand = random.randint(1, 10000000)
+ return "%s-%d,%s" % (obj_name, rand, self.ou)
+
+ def assert_attrs_match(self, res1, res2, attr, expected_count):
+ """
+ Asserts that the search results contain the expected number of
+ attributes and the results match on both DCs
+ """
+ actual_len = len(res1[0][attr])
+ self.assertTrue(actual_len == expected_count,
+ "Expected %u %s attributes, got %u" % (expected_count,
+ attr,
+ actual_len))
+ actual_len = len(res2[0][attr])
+ self.assertTrue(actual_len == expected_count,
+ "Expected %u %s attributes, got %u" % (expected_count,
+ attr,
+ actual_len))
+
+ # check DCs both agree on the same linked attributes
+ for val in res1[0][attr]:
+ self.assertTrue(val in res2[0][attr],
+ "%s '%s' not found on DC2" % (attr, val))
+
+ def zero_highwatermark(self):
+ """Returns a zeroed highwatermark so that all DRS data gets returned"""
+ hwm = drsuapi.DsReplicaHighWaterMark()
+ hwm.tmp_highest_usn = 0
+ hwm.reserved_usn = 0
+ hwm.highest_usn = 0
+ return hwm
+
+ def _check_replicated_links(self, src_obj_dn, expected_links):
+ """Checks that replication sends back the expected linked attributes"""
+ self._check_replication([src_obj_dn],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ dest_dsa=None,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_SUCCESS,
+ nc_dn_str=src_obj_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ expected_links=expected_links,
+ highwatermark=self.zero_highwatermark())
+
+ # Check DC2 as well
+ self.set_test_ldb_dc(self.ldb_dc2)
+
+ self._check_replication([src_obj_dn],
+ drsuapi.DRSUAPI_DRS_WRIT_REP,
+ dest_dsa=None,
+ drs_error=drsuapi.DRSUAPI_EXOP_ERR_SUCCESS,
+ nc_dn_str=src_obj_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ expected_links=expected_links,
+ highwatermark=self.zero_highwatermark(),
+ drs=self.drs2, drs_handle=self.drs2_handle)
+ self.set_test_ldb_dc(self.ldb_dc1)
+
+ def _test_conflict_single_valued_link(self, sync_order):
+ """
+ Tests a simple single-value link conflict, i.e. each DC adds a link to
+ the same source object but linking to different targets.
+ """
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # create a unique target on each DC
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc2, target2_ou)
+
+ # link the test OU to the respective targets created
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+
+ # sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target2_ou,
+ "Expected most recent update to win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check DRS
+ # to make sure the DC kept a copy of the conflicting link
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_single_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_single_valued_link(sync_order=DC1_TO_DC2)
+ self._test_conflict_single_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_duplicate_single_valued_link(self, sync_order):
+ """
+ Adds the same single-valued link on 2 DCs and checks we don't end up
+ with 2 copies of the link.
+ """
+ # create unique objects for the link
+ target_ou = self.unique_dn("OU=target")
+ self.add_object(self.ldb_dc1, target_ou)
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # link the same test OU to the same target on both DCs
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target_ou)
+
+ # sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ def test_duplicate_single_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_duplicate_single_valued_link(sync_order=DC1_TO_DC2)
+ self._test_duplicate_single_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_conflict_multi_valued_link(self, sync_order):
+ """
+ Tests a simple multi-valued link conflict. This adds 2 objects with the
+ same username on 2 different DCs and checks their group membership is
+ preserved after the conflict is resolved.
+ """
+
+ # create a common link source
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ self.sync_DCs()
+
+ # create the same user (link target) on each DC.
+ # Note that the GUIDs will differ between the DCs
+ target_dn = self.unique_dn("CN=target")
+ target1_guid = self.add_object(self.ldb_dc1, target_dn,
+ objectclass="user")
+ self.ensure_unique_timestamp()
+ target2_guid = self.add_object(self.ldb_dc2, target_dn,
+ objectclass="user")
+
+ # link the src group to the respective target created
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ # sync the 2 DCs. We expect the more recent target2 object to win
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ target1_conflict = False
+
+ # we expect exactly 2 members in our test group (both DCs should agree)
+ self.assert_attrs_match(res1, res2, "member", 2)
+
+ for val in [str(val) for val in res1[0]["member"]]:
+ # check the expected conflicting object was renamed
+ self.assertFalse("CNF:%s" % target2_guid in val)
+ if "CNF:%s" % target1_guid in val:
+ target1_conflict = True
+
+ self.assertTrue(target1_conflict,
+ "Expected link to conflicting target object not found")
+
+ def test_conflict_multi_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_multi_valued_link(sync_order=DC1_TO_DC2)
+ self._test_conflict_multi_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_duplicate_multi_valued_link(self, sync_order):
+ """
+ Adds the same multivalued link on 2 DCs and checks we don't end up
+ with 2 copies of the link.
+ """
+
+ # create the link source/target objects
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ target_dn = self.unique_dn("CN=target")
+ self.add_object(self.ldb_dc1, target_dn, objectclass="user")
+ self.sync_DCs()
+
+ # link the src group to the same target user separately on each DC
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+
+ # we expect to still have only 1 member in our test group
+ self.assert_attrs_match(res1, res2, "member", 1)
+
+ def test_duplicate_multi_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_duplicate_multi_valued_link(sync_order=DC1_TO_DC2)
+ self._test_duplicate_multi_valued_link(sync_order=DC2_TO_DC1)
+
+ def _test_conflict_backlinks(self, sync_order):
+ """
+ Tests that resolving a source object conflict fixes up any backlinks,
+ e.g. the same user is added to a conflicting group.
+ """
+
+ # create a common link target
+ target_dn = self.unique_dn("CN=target")
+ target_guid = self.add_object(self.ldb_dc1, target_dn,
+ objectclass="user")
+ self.sync_DCs()
+
+ # create the same group (link source) on each DC.
+ # Note that the GUIDs will differ between the DCs
+ src_dn = self.unique_dn("CN=src")
+ src1_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ self.ensure_unique_timestamp()
+ src2_guid = self.add_object(self.ldb_dc2, src_dn, objectclass="group")
+
+ # link the src group to the respective target created
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ # sync the 2 DCs. We expect the more recent src2 object to win
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % target_guid,
+ scope=SCOPE_BASE, attrs=["memberOf"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % target_guid,
+ scope=SCOPE_BASE, attrs=["memberOf"])
+ src1_backlink = False
+
+ # our test user should still be a member of 2 groups (check both
+ # DCs agree)
+ self.assert_attrs_match(res1, res2, "memberOf", 2)
+
+ for val in [str(val) for val in res1[0]["memberOf"]]:
+ # check the conflicting object was renamed
+ self.assertFalse("CNF:%s" % src2_guid in val)
+ if "CNF:%s" % src1_guid in val:
+ src1_backlink = True
+
+ self.assertTrue(src1_backlink,
+ "Backlink to conflicting source object not found")
+
+ def test_conflict_backlinks(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_backlinks(sync_order=DC1_TO_DC2)
+ self._test_conflict_backlinks(sync_order=DC2_TO_DC1)
+
+ def _test_link_deletion_conflict(self, sync_order):
+ """
+ Checks that a deleted link conflicting with an active link is
+ resolved correctly.
+ """
+
+ # Add the link objects
+ target_dn = self.unique_dn("CN=target")
+ self.add_object(self.ldb_dc1, target_dn, objectclass="user")
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ self.sync_DCs()
+
+ # add the same link on both DCs, and resolve any conflict
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.sync_DCs(sync_order=sync_order)
+
+ # delete and re-add the link on one DC
+ self.del_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ # just delete it on the other DC
+ self.ensure_unique_timestamp()
+ self.del_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+ # sanity-check the link is gone on this DC
+ res1 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ self.assertFalse("member" in res1[0], "Couldn't delete member attr")
+
+ # sync the 2 DCs. We expect the more older DC1 attribute to win
+ # because it has a higher version number (even though it's older)
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+
+ # our test user should still be a member of the group (check both
+ # DCs agree)
+ self.assertTrue("member" in res1[0],
+ "Expected member attribute missing")
+ self.assert_attrs_match(res1, res2, "member", 1)
+
+ def test_link_deletion_conflict(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_link_deletion_conflict(sync_order=DC1_TO_DC2)
+ self._test_link_deletion_conflict(sync_order=DC2_TO_DC1)
+
+ def _test_obj_deletion_conflict(self, sync_order, del_target):
+ """
+ Checks that a receiving a new link for a deleted object gets
+ resolved correctly.
+ """
+
+ target_dn = self.unique_dn("CN=target")
+ target_guid = self.add_object(self.ldb_dc1, target_dn,
+ objectclass="user")
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+
+ self.sync_DCs()
+
+ # delete the object on one DC
+ if del_target:
+ search_guid = src_guid
+ self.ldb_dc2.delete(target_dn)
+ else:
+ search_guid = target_guid
+ self.ldb_dc2.delete(src_dn)
+
+ # add a link on the other DC
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ self.sync_DCs(sync_order=sync_order)
+
+ # the object deletion should trump the link addition.
+ # Check the link no longer exists on the remaining object
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % search_guid,
+ scope=SCOPE_BASE,
+ attrs=["member", "memberOf"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % search_guid,
+ scope=SCOPE_BASE,
+ attrs=["member", "memberOf"])
+
+ self.assertFalse("member" in res1[0], "member attr shouldn't exist")
+ self.assertFalse("member" in res2[0], "member attr shouldn't exist")
+ self.assertFalse("memberOf" in res1[0], "member attr shouldn't exist")
+ self.assertFalse("memberOf" in res2[0], "member attr shouldn't exist")
+
+ def test_obj_deletion_conflict(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_obj_deletion_conflict(sync_order=DC1_TO_DC2,
+ del_target=True)
+ self._test_obj_deletion_conflict(sync_order=DC2_TO_DC1,
+ del_target=True)
+
+ # and also try deleting the source object instead of the link target
+ self._test_obj_deletion_conflict(sync_order=DC1_TO_DC2,
+ del_target=False)
+ self._test_obj_deletion_conflict(sync_order=DC2_TO_DC1,
+ del_target=False)
+
+ def _test_full_sync_link_conflict(self, sync_order):
+ """
+ Checks that doing a full sync doesn't affect how conflicts get resolved
+ """
+
+ # create the objects for the linked attribute
+ src_dn = self.unique_dn("CN=src")
+ src_guid = self.add_object(self.ldb_dc1, src_dn, objectclass="group")
+ target_dn = self.unique_dn("CN=target")
+ self.add_object(self.ldb_dc1, target_dn, objectclass="user")
+ self.sync_DCs()
+
+ # add the same link on both DCs
+ self.add_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ # Do a couple of full syncs which should resolve the conflict
+ # (but only for one DC)
+ if sync_order == DC1_TO_DC2:
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ full_sync=True)
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ full_sync=True)
+ else:
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2,
+ full_sync=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2,
+ full_sync=True)
+
+ # delete and re-add the link on one DC
+ self.del_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc1, src_dn, "member", target_dn)
+
+ # just delete the link on the 2nd DC
+ self.ensure_unique_timestamp()
+ self.del_link_attr(self.ldb_dc2, src_dn, "member", target_dn)
+
+ # sync the 2 DCs. We expect DC1 to win based on version number
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["member"])
+
+ # check the membership still exits (and both DCs agree)
+ self.assertTrue("member" in res1[0],
+ "Expected member attribute missing")
+ self.assert_attrs_match(res1, res2, "member", 1)
+
+ def test_full_sync_link_conflict(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_full_sync_link_conflict(sync_order=DC1_TO_DC2)
+ self._test_full_sync_link_conflict(sync_order=DC2_TO_DC1)
+
+ def _singleval_link_conflict_deleted_winner(self, sync_order):
+ """
+ Tests a single-value link conflict where the more-up-to-date link value
+ is deleted.
+ """
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # create a unique target on each DC
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc2, target2_ou)
+
+ # add the links for the respective targets, and delete one of the links
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+ self.ensure_unique_timestamp()
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+
+ # sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # Although the more up-to-date link value is deleted, this shouldn't
+ # trump DC1's active link
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target2_ou,
+ "Expected active link win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check that
+ # the deleted links exist using DRS
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_single_valued_link_deleted_winner(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._singleval_link_conflict_deleted_winner(sync_order=DC1_TO_DC2)
+ self._singleval_link_conflict_deleted_winner(sync_order=DC2_TO_DC1)
+
+ def _singleval_link_conflict_deleted_loser(self, sync_order):
+ """
+ Tests a single-valued link conflict, where the losing link value is
+ deleted.
+ """
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+ self.sync_DCs()
+
+ # create a unique target on each DC
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc2, target2_ou)
+
+ # add the links - we want the link to end up deleted on DC2, but active
+ # on DC1. DC1 has the better version and DC2 has the better timestamp -
+ # the better version should win
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+ self.del_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target1_ou,
+ "Expected most recent update to win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check DRS
+ # to make sure the DC kept a copy of the conflicting link
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_single_valued_link_deleted_loser(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._singleval_link_conflict_deleted_loser(sync_order=DC1_TO_DC2)
+ self._singleval_link_conflict_deleted_loser(sync_order=DC2_TO_DC1)
+
+ def _test_conflict_existing_single_valued_link(self, sync_order):
+ """
+ Tests a single-valued link conflict, where the conflicting link value
+ already exists (as inactive) on both DCs.
+ """
+ # create the link objects
+ src_ou = self.unique_dn("OU=src")
+ src_guid = self.add_object(self.ldb_dc1, src_ou)
+
+ target1_ou = self.unique_dn("OU=target1")
+ target2_ou = self.unique_dn("OU=target2")
+ target1_guid = self.add_object(self.ldb_dc1, target1_ou)
+ target2_guid = self.add_object(self.ldb_dc1, target2_ou)
+
+ # add the links, but then delete them
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target2_ou)
+ self.del_link_attr(self.ldb_dc1, src_ou, "managedBy", target2_ou)
+ self.sync_DCs()
+
+ # re-add the links independently on each DC
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+ self.ensure_unique_timestamp()
+ self.add_link_attr(self.ldb_dc2, src_ou, "managedBy", target2_ou)
+
+ # try to sync the 2 DCs
+ self.sync_DCs(sync_order=sync_order)
+
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % src_guid,
+ scope=SCOPE_BASE, attrs=["managedBy"])
+
+ # check the object has only have one occurence of the single-valued
+ # attribute and it matches on both DCs
+ self.assert_attrs_match(res1, res2, "managedBy", 1)
+
+ # here we expect DC2 to win because it has the more recent link
+ self.assertTrue(str(res1[0]["managedBy"][0]) == target2_ou,
+ "Expected most recent update to win conflict")
+
+ # we can't query the deleted links over LDAP, but we can check DRS
+ # to make sure the DC kept a copy of the conflicting link
+ link1 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy, 0,
+ misc.GUID(src_guid), misc.GUID(target1_guid))
+ link2 = AbstractLink(drsuapi.DRSUAPI_ATTID_managedBy,
+ drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE,
+ misc.GUID(src_guid), misc.GUID(target2_guid))
+ self._check_replicated_links(src_ou, [link1, link2])
+
+ def test_conflict_existing_single_valued_link(self):
+ # repeat the test twice, to give each DC a chance to resolve
+ # the conflict
+ self._test_conflict_existing_single_valued_link(sync_order=DC1_TO_DC2)
+ self._test_conflict_existing_single_valued_link(sync_order=DC2_TO_DC1)
+
+ def test_link_attr_version(self):
+ """
+ Checks the link attribute version starts from the correct value
+ """
+ # create some objects and add a link
+ src_ou = self.unique_dn("OU=src")
+ self.add_object(self.ldb_dc1, src_ou)
+ target1_ou = self.unique_dn("OU=target1")
+ self.add_object(self.ldb_dc1, target1_ou)
+ self.add_link_attr(self.ldb_dc1, src_ou, "managedBy", target1_ou)
+
+ # get the link info via replication
+ ctr6 = self._get_replication(drsuapi.DRSUAPI_DRS_WRIT_REP,
+ dest_dsa=None,
+ drs_error=DRSUAPI_EXOP_ERR_SUCCESS,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ highwatermark=self.zero_highwatermark(),
+ nc_dn_str=src_ou)
+
+ self.assertTrue(ctr6.linked_attributes_count == 1,
+ "DRS didn't return a link")
+ link = ctr6.linked_attributes[0]
+ rcvd_version = link.meta_data.version
+ self.assertTrue(rcvd_version == 1,
+ "Link version started from %u, not 1" % rcvd_version)
diff --git a/source4/torture/drs/python/linked_attributes_drs.py b/source4/torture/drs/python/linked_attributes_drs.py
new file mode 100644
index 0000000..aa815ff
--- /dev/null
+++ b/source4/torture/drs/python/linked_attributes_drs.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Originally based on ./sam.py
+import sys
+import os
+import base64
+import random
+import re
+
+sys.path.insert(0, "bin/python")
+import samba
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+
+import samba.getopt as options
+
+from samba.auth import system_session
+import ldb
+from samba.samdb import SamDB
+
+from samba.dcerpc import drsuapi, misc, drsblobs
+from samba.drs_utils import drs_DsBind
+from samba.ndr import ndr_unpack, ndr_pack
+
+import drs_base
+
+import time
+
+
+class LATestException(Exception):
+ pass
+
+
+class LATests(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(LATests, self).setUp()
+ # DrsBaseTestCase sets up self.ldb_dc1, self.ldb_dc2
+ # we're only using one
+ self.samdb = self.ldb_dc1
+
+ self.base_dn = self.samdb.domain_dn()
+ self.ou = "OU=la,%s" % self.base_dn
+ if True:
+ try:
+ self.samdb.delete(self.ou, ['tree_delete:1'])
+ except ldb.LdbError as e:
+ pass
+ self.samdb.add({'objectclass': 'organizationalUnit',
+ 'dn': self.ou})
+
+ self.dc_guid = self.samdb.get_invocation_id()
+ self.drs, self.drs_handle = self._ds_bind(self.dnsname_dc1)
+
+ def tearDown(self):
+ super(LATests, self).tearDown()
+ try:
+ self.samdb.delete(self.ou, ['tree_delete:1'])
+ except ldb.LdbError as e:
+ pass
+
+ def delete_user(self, user):
+ self.samdb.delete(user['dn'])
+ del self.users[self.users.index(user)]
+
+ def add_object(self, cn, objectclass):
+ dn = "CN=%s,%s" % (cn, self.ou)
+ self.samdb.add({'cn': cn,
+ 'objectclass': objectclass,
+ 'dn': dn})
+
+ return dn
+
+ def add_objects(self, n, objectclass, prefix=None):
+ if prefix is None:
+ prefix = objectclass
+ dns = []
+ for i in range(n):
+ dns.append(self.add_object("%s%d" % (prefix, i + 1),
+ objectclass))
+ return dns
+
+ def add_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_ADD, attr)
+ self.samdb.modify(m)
+
+ def remove_linked_attribute(self, src, dest, attr='member'):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, src)
+ m[attr] = ldb.MessageElement(dest, ldb.FLAG_MOD_DELETE, attr)
+ self.samdb.modify(m)
+
+ def attr_search(self, obj, expected, attr, scope=ldb.SCOPE_BASE):
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=self.dc_guid,
+ nc_dn_str=obj,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ)
+
+ level, ctr = self.drs.DsGetNCChanges(self.drs_handle, 8, req8)
+ expected_attid = getattr(drsuapi, 'DRSUAPI_ATTID_' + attr)
+
+ links = []
+ for link in ctr.linked_attributes:
+ if link.attid == expected_attid:
+ unpacked = ndr_unpack(drsuapi.DsReplicaObjectIdentifier3,
+ link.value.blob)
+ active = link.flags & drsuapi.DRSUAPI_DS_LINKED_ATTRIBUTE_FLAG_ACTIVE
+ links.append((str(unpacked.dn), bool(active)))
+
+ return links
+
+ def assert_forward_links(self, obj, expected, attr='member'):
+ results = self.attr_search(obj, expected, attr)
+ self.assertEqual(len(results), len(expected))
+
+ for k, v in results:
+ self.assertTrue(k in expected)
+ self.assertEqual(expected[k], v, "%s active flag should be %d, not %d" %
+ (k, expected[k], v))
+
+ def get_object_guid(self, dn):
+ res = self.samdb.search(dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['objectGUID'])
+ return str(misc.GUID(res[0]['objectGUID'][0]))
+
+ def test_links_all_delete_group(self):
+ u1, u2 = self.add_objects(2, 'user', 'u_all_del_group')
+ g1, g2 = self.add_objects(2, 'group', 'g_all_del_group')
+ g2guid = self.get_object_guid(g2)
+
+ self.add_linked_attribute(g1, u1)
+ self.add_linked_attribute(g2, u1)
+ self.add_linked_attribute(g2, u2)
+
+ self.samdb.delete(g2)
+ self.assert_forward_links(g1, {u1: True})
+ res = self.samdb.search('<GUID=%s>' % g2guid,
+ scope=ldb.SCOPE_BASE,
+ controls=['show_deleted:1'])
+ new_dn = res[0].dn
+ self.assert_forward_links(new_dn, {})
+
+ def test_la_links_delete_link(self):
+ u1, u2 = self.add_objects(2, 'user', 'u_del_link')
+ g1, g2 = self.add_objects(2, 'group', 'g_del_link')
+
+ self.add_linked_attribute(g1, u1)
+ self.add_linked_attribute(g2, u1)
+ self.add_linked_attribute(g2, u2)
+
+ self.remove_linked_attribute(g2, u1)
+
+ self.assert_forward_links(g1, {u1: True})
+ self.assert_forward_links(g2, {u1: False, u2: True})
+
+ self.add_linked_attribute(g2, u1)
+ self.remove_linked_attribute(g2, u2)
+ self.assert_forward_links(g2, {u1: True, u2: False})
+ self.remove_linked_attribute(g2, u1)
+ self.assert_forward_links(g2, {u1: False, u2: False})
+
+ def test_la_links_delete_user(self):
+ u1, u2 = self.add_objects(2, 'user', 'u_del_user')
+ g1, g2 = self.add_objects(2, 'group', 'g_del_user')
+
+ self.add_linked_attribute(g1, u1)
+ self.add_linked_attribute(g2, u1)
+ self.add_linked_attribute(g2, u2)
+
+ self.samdb.delete(u1)
+
+ self.assert_forward_links(g1, {})
+ self.assert_forward_links(g2, {u2: True})
diff --git a/source4/torture/drs/python/repl_move.py b/source4/torture/drs/python/repl_move.py
new file mode 100644
index 0000000..3827c7c
--- /dev/null
+++ b/source4/torture/drs/python/repl_move.py
@@ -0,0 +1,2593 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN repl_move -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import time
+import uuid
+import samba.tests
+
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+from samba.dcerpc import misc
+from samba.drs_utils import drs_DsBind
+
+from ldb import (
+ SCOPE_BASE,
+ SCOPE_SUBTREE,
+)
+
+import drs_base
+import ldb
+from samba.dcerpc.drsuapi import *
+
+
+class DrsMoveObjectTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsMoveObjectTestCase, self).setUp()
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ self.top_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "replica_move")
+
+ self.ou1_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU1")
+ self.ou1_dn.add_base(self.top_ou)
+ ou1 = {}
+ ou1["dn"] = self.ou1_dn
+ ou1["objectclass"] = "organizationalUnit"
+ ou1["ou"] = self.ou1_dn.get_component_value(0)
+ self.ldb_dc1.add(ou1)
+
+ self.ou2_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2")
+ self.ou2_dn.add_base(self.top_ou)
+ ou2 = {}
+ ou2["dn"] = self.ou2_dn
+ ou2["objectclass"] = "organizationalUnit"
+ ou2["ou"] = self.ou2_dn.get_component_value(0)
+ self.ldb_dc1.add(ou2)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self.dc1_guid = self.ldb_dc1.get_invocation_id()
+ self.dc2_guid = self.ldb_dc2.get_invocation_id()
+
+ self.drs_dc1 = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+ self.drs_dc2 = self._ds_bind(self.dnsname_dc2, ip=self.url_dc2)
+
+ def tearDown(self):
+ try:
+ self.ldb_dc1.delete(self.top_ou, ["tree_delete:1"])
+ except ldb.LdbError as e:
+ (enum, string) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsMoveObjectTestCase, self).tearDown()
+
+ def _make_username(self):
+ return "DrsMoveU_" + time.strftime("%s", time.gmtime())
+
+ def _check_metadata(self, user_dn, sam_ldb, drs, metadata, expected):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, metadata[0])
+
+ self.assertEqual(len(repl.ctr.array), len(expected))
+
+ i = 0
+ for o in repl.ctr.array:
+ e = expected[i]
+ (attid, orig_dsa, version) = e
+ self.assertEqual(attid, o.attid,
+ "(LDAP) Wrong attid "
+ "for expected value %d, wanted 0x%08x got 0x%08x"
+ % (i, attid, o.attid))
+ self.assertEqual(o.originating_invocation_id,
+ misc.GUID(orig_dsa),
+ "(LDAP) Wrong originating_invocation_id "
+ "for expected value %d, attid 0x%08x, wanted %s got %s"
+ % (i, o.attid,
+ misc.GUID(orig_dsa),
+ o.originating_invocation_id))
+ # Allow version to be skipped when it does not matter
+ if version is not None:
+ self.assertEqual(o.version, version,
+ "(LDAP) Wrong version for expected value %d, "
+ "attid 0x%08x, "
+ "wanted %d got %d"
+ % (i, o.attid,
+ version, o.version))
+ i = i + 1
+
+ if drs is None:
+ return
+
+ req8 = DsGetNCChangesRequest8()
+
+ req8.source_dsa_invocation_id = misc.GUID(sam_ldb.get_invocation_id())
+ req8.naming_context = DsReplicaObjectIdentifier()
+ req8.naming_context.dn = str(user_dn)
+ req8.highwatermark = DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = DRSUAPI_DRS_SYNC_FORCED
+ req8.max_object_count = 1
+ req8.max_ndr_size = 402116
+ req8.extended_op = DRSUAPI_EXOP_REPL_OBJ
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ (drs_conn, drs_handle) = drs
+
+ (level, drs_ctr) = drs_conn.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6)
+ self.assertEqual(drs_ctr.object_count, 1)
+
+ self.assertEqual(len(drs_ctr.first_object.meta_data_ctr.meta_data), len(expected) - 1)
+ att_idx = 0
+ for o in drs_ctr.first_object.meta_data_ctr.meta_data:
+ i = 0
+ drs_attid = drs_ctr.first_object.object.attribute_ctr.attributes[att_idx]
+ e = expected[i]
+ (attid, orig_dsa, version) = e
+
+ # Skip the RDN from the expected set, it is not sent over DRS
+ if (user_dn.get_rdn_name().upper() == "CN"
+ and attid == DRSUAPI_ATTID_cn) \
+ or (user_dn.get_rdn_name().upper() == "OU"
+ and attid == DRSUAPI_ATTID_ou):
+ i = i + 1
+ e = expected[i]
+ (attid, orig_dsa, version) = e
+
+ self.assertEqual(attid, drs_attid.attid,
+ "(DRS) Wrong attid "
+ "for expected value %d, wanted 0x%08x got 0x%08x"
+ % (i, attid, drs_attid.attid))
+
+ self.assertEqual(o.originating_invocation_id,
+ misc.GUID(orig_dsa),
+ "(DRS) Wrong originating_invocation_id "
+ "for expected value %d, attid 0x%08x, wanted %s got %s"
+ % (i, attid,
+ misc.GUID(orig_dsa),
+ o.originating_invocation_id))
+ # Allow version to be skipped when it does not matter
+ if version is not None:
+ self.assertEqual(o.version, version,
+ "(DRS) Wrong version for expected value %d, "
+ "attid 0x%08x, "
+ "wanted %d got %d"
+ % (i, attid, version, o.version))
+ break
+ i = i + 1
+ att_idx = att_idx + 1
+
+ # now also used to check the group
+ def _check_obj(self, sam_ldb, obj_orig, is_deleted, expected_metadata=None, drs=None):
+ # search the user by guid as it may be deleted
+ guid_str = self._GUID_string(obj_orig["objectGUID"][0])
+ res = sam_ldb.search(base='<GUID=%s>' % guid_str,
+ controls=["show_deleted:1"],
+ attrs=["*", "parentGUID",
+ "replPropertyMetaData"])
+ self.assertEqual(len(res), 1)
+ user_cur = res[0]
+ rdn_orig = str(obj_orig[user_cur.dn.get_rdn_name()][0])
+ rdn_cur = str(user_cur[user_cur.dn.get_rdn_name()][0])
+ name_orig = str(obj_orig["name"][0])
+ name_cur = str(user_cur["name"][0])
+ dn_orig = obj_orig["dn"]
+ dn_cur = user_cur["dn"]
+ # now check properties of the user
+ if is_deleted:
+ self.assertTrue("isDeleted" in user_cur)
+ self.assertEqual(rdn_cur.split('\n')[0], rdn_orig)
+ self.assertEqual(name_cur.split('\n')[0], name_orig)
+ self.assertEqual(dn_cur.get_rdn_value().split('\n')[0],
+ dn_orig.get_rdn_value())
+ self.assertEqual(name_cur, rdn_cur)
+ else:
+ self.assertFalse("isDeleted" in user_cur)
+ self.assertEqual(rdn_cur, rdn_orig)
+ self.assertEqual(name_cur, name_orig)
+ self.assertEqual(dn_cur, dn_orig)
+ self.assertEqual(name_cur, rdn_cur)
+ parent_cur = user_cur["parentGUID"][0]
+ try:
+ parent_orig = obj_orig["parentGUID"][0]
+ self.assertEqual(parent_orig, parent_cur)
+ except KeyError:
+ pass
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+
+ if expected_metadata is not None:
+ self._check_metadata(dn_cur, sam_ldb, drs, user_cur["replPropertyMetaData"],
+ expected_metadata)
+
+ return user_cur
+
+ def test_ReplicateMoveObject1(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is replicated properly
+ - the OU is renamed
+ - We verify that after replication,
+ that the user has the correct DN (under OU2)
+ - the OU is deleted
+ - the OU is modified on DC2
+ - We verify that after replication,
+ that the user has the correct DN (deleted) and has not description
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ moved_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata_dc2)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=True, expected_metadata=deleted_metadata)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = new_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ modified_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=modified_metadata)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_modified_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_modified_metadata_dc2)
+ self.assertFalse("description" in user_cur)
+
+ # trigger replication from DC2 to DC1, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ deleted_modified_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_modified_metadata_dc1)
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveObject2(self):
+ """Verifies how a moved container with a user inside is not
+ replicated between two DCs as no replication is triggered
+ This test should verify that:
+ - the OU is not replicated
+ - the user is not replicated
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved_orig = ldb_res[0]
+
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # check user info on DC2 - should not be there, we have not done replication
+ ldb_res = self.ldb_dc2.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 0)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ # trigger replication from DC2 to DC1, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ def test_ReplicateMoveObject3(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is created on DC1
+ - the OU is renamed on DC1
+ - We verify that after replication,
+ that the user has the correct DN (under OU2).
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ def test_ReplicateMoveObject3b(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is created on DC1
+ - the OU is renamed on DC1
+ - We verify that after replication,
+ that the user has the correct DN (under OU2).
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC2 (Which has never seen the object) to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ def test_ReplicateMoveObject4(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is replicated properly
+ - the user is modified on DC2
+ - the OU is renamed on DC1
+ - We verify that after replication DC1 -> DC2,
+ that the user has the correct DN (under OU2), and the description
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ initial_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ initial_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_orig, is_deleted=False,
+ expected_metadata=initial_metadata_dc2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ moved_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC1 after rename - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=moved_metadata)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = user_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ modified_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_orig,
+ is_deleted=False,
+ expected_metadata=modified_metadata)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ modified_renamed_metadata = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc2_guid, 1),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=modified_renamed_metadata)
+
+ self.assertTrue("description" in user_cur)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=False,
+ expected_metadata=modified_renamed_metadata)
+
+ self.assertTrue("description" in user_cur)
+
+ deleted_metadata_dc1 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_description, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, drs=self.drs_dc1,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc1)
+
+ self.assertFalse("description" in user_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ deleted_metadata_dc2 = [
+ (DRSUAPI_ATTID_objectClass, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_cn, self.dc2_guid, 3),
+ (DRSUAPI_ATTID_description, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_instanceType, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_whenCreated, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_isDeleted, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntSecurityDescriptor, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_name, self.dc1_guid, 3),
+ (DRSUAPI_ATTID_userAccountControl, self.dc1_guid, None),
+ (DRSUAPI_ATTID_codePage, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_countryCode, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_dBCSPwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_logonHours, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_unicodePwd, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_ntPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_pwdLastSet, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_primaryGroupID, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_objectSid, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_accountExpires, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lmPwdHistory, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountName, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_sAMAccountType, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_userPrincipalName, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_lastKnownParent, self.dc1_guid, 1),
+ (DRSUAPI_ATTID_objectCategory, self.dc1_guid, 2),
+ (DRSUAPI_ATTID_isRecycled, self.dc1_guid, 1)]
+
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, drs=self.drs_dc2,
+ obj_orig=user_moved_orig,
+ is_deleted=True,
+ expected_metadata=deleted_metadata_dc2)
+
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveObject5(self):
+ """Verifies how a moved container with a user inside is replicated between two DCs.
+ This test should verify that:
+ - the OU is replicated properly
+ - the user is modified on DC2
+ - the OU is renamed on DC1
+ - We verify that after replication DC2 -> DC1,
+ that the user has the correct DN (under OU2), and the description
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = user_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should still be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertTrue("description" in user_cur)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertTrue("description" in user_cur)
+
+ # delete user on DC2
+ self.ldb_dc2.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1 for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=True)
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveObject6(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 has the correct DN (under OU2), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+ ou_moved_dn = ldb_res[0]["dn"]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject7(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1 to be under OU2
+ - We verify that after replication DC2 -> DC1,
+ that the OU1 has the correct DN (under OU2), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+ ou_moved_dn = ldb_res[0]["dn"]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject8(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1 to OU1-renamed
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 has the correct DN (OU1-renamed), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s-renamed" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou1_dn.parent())
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+ ou_moved_dn = ldb_res[0]["dn"]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject9(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is renamed on DC1 to be under OU2
+ - the OU1 is renamed on DC1 to OU1-renamed
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 has the correct DN (OU1-renamed), and the description
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "OU=%s-renamed" % self.ou1_dn.get_component_value(0))
+ new_dn.add_base(self.ou1_dn.parent())
+ self.ldb_dc1.rename(ou_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=new_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ ou_moved_orig = ldb_res[0]
+ ou_moved_dn = ldb_res[0]["dn"]
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=False)
+ self.assertTrue("description" in ou_cur)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_moved_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject10(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is deleted on DC1
+ - We verify that after replication DC1 -> DC2,
+ that the OU1 is deleted, and the description has gone away
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ def test_ReplicateMoveObject11(self):
+ """Verifies how a moved container is replicated between two DCs.
+ This test should verify that:
+ - the OU1 is replicated properly
+ - the OU1 is modified on DC2
+ - the OU1 is deleted on DC1
+ - We verify that after replication DC2 -> DC1,
+ that the OU1 is deleted, and the description has gone away
+
+ """
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_BASE,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ ou_orig = ldb_res[0]
+ ou_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (self.ou1_dn, self._GUID_string(ou_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should still be valid user
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=False)
+
+ # Modify description on DC2. This triggers a replication, but
+ # not of 'name' and so a bug in Samba regarding the DN.
+ msg = ldb.Message()
+ msg.dn = ou_dn
+ msg["description"] = ldb.MessageElement("OU Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # delete OU on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(ou_orig["objectGUID"][0]))
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # check user info on DC2 - should be deleted OU
+ ou_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=ou_orig, is_deleted=True)
+ self.assertFalse("description" in ou_cur)
+
+
+class DrsMoveBetweenTreeOfObjectTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(DrsMoveBetweenTreeOfObjectTestCase, self).setUp()
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ self.top_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "replica_move")
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ self.ou1_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU1")
+ self.ou1_dn.add_base(self.top_ou)
+ self.ou1 = {}
+ self.ou1["dn"] = self.ou1_dn
+ self.ou1["objectclass"] = "organizationalUnit"
+ self.ou1["ou"] = self.ou1_dn.get_component_value(0)
+
+ self.ou2_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2,OU=DrsOU1")
+ self.ou2_dn.add_base(self.top_ou)
+ self.ou2 = {}
+ self.ou2["dn"] = self.ou2_dn
+ self.ou2["objectclass"] = "organizationalUnit"
+ self.ou2["ou"] = self.ou2_dn.get_component_value(0)
+
+ self.ou2b_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2B,OU=DrsOU1")
+ self.ou2b_dn.add_base(self.top_ou)
+ self.ou2b = {}
+ self.ou2b["dn"] = self.ou2b_dn
+ self.ou2b["objectclass"] = "organizationalUnit"
+ self.ou2b["ou"] = self.ou2b_dn.get_component_value(0)
+
+ self.ou2c_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU2C,OU=DrsOU1")
+ self.ou2c_dn.add_base(self.top_ou)
+
+ self.ou3_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou3_dn.add_base(self.top_ou)
+ self.ou3 = {}
+ self.ou3["dn"] = self.ou3_dn
+ self.ou3["objectclass"] = "organizationalUnit"
+ self.ou3["ou"] = self.ou3_dn.get_component_value(0)
+
+ self.ou4_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU4,OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou4_dn.add_base(self.top_ou)
+ self.ou4 = {}
+ self.ou4["dn"] = self.ou4_dn
+ self.ou4["objectclass"] = "organizationalUnit"
+ self.ou4["ou"] = self.ou4_dn.get_component_value(0)
+
+ self.ou5_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU5,OU=DrsOU4,OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou5_dn.add_base(self.top_ou)
+ self.ou5 = {}
+ self.ou5["dn"] = self.ou5_dn
+ self.ou5["objectclass"] = "organizationalUnit"
+ self.ou5["ou"] = self.ou5_dn.get_component_value(0)
+
+ self.ou6_dn = ldb.Dn(self.ldb_dc1, "OU=DrsOU6,OU=DrsOU5,OU=DrsOU4,OU=DrsOU3,OU=DrsOU2,OU=DrsOU1")
+ self.ou6_dn.add_base(self.top_ou)
+ self.ou6 = {}
+ self.ou6["dn"] = self.ou6_dn
+ self.ou6["objectclass"] = "organizationalUnit"
+ self.ou6["ou"] = self.ou6_dn.get_component_value(0)
+
+ def tearDown(self):
+ self.ldb_dc1.delete(self.top_ou, ["tree_delete:1"])
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsMoveBetweenTreeOfObjectTestCase, self).tearDown()
+
+ def _make_username(self):
+ return "DrsTreeU_" + time.strftime("%s", time.gmtime())
+
+ # now also used to check the group
+ def _check_obj(self, sam_ldb, obj_orig, is_deleted):
+ # search the user by guid as it may be deleted
+ guid_str = self._GUID_string(obj_orig["objectGUID"][0])
+ res = sam_ldb.search(base='<GUID=%s>' % guid_str,
+ controls=["show_deleted:1"],
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(res), 1)
+ user_cur = res[0]
+ cn_orig = str(obj_orig["cn"][0])
+ cn_cur = str(user_cur["cn"][0])
+ name_orig = str(obj_orig["name"][0])
+ name_cur = str(user_cur["name"][0])
+ dn_orig = obj_orig["dn"]
+ dn_cur = user_cur["dn"]
+ # now check properties of the user
+ if is_deleted:
+ self.assertTrue("isDeleted" in user_cur)
+ self.assertEqual(cn_cur.split('\n')[0], cn_orig)
+ self.assertEqual(name_cur.split('\n')[0], name_orig)
+ self.assertEqual(dn_cur.get_rdn_value().split('\n')[0],
+ dn_orig.get_rdn_value())
+ self.assertEqual(name_cur, cn_cur)
+ else:
+ self.assertFalse("isDeleted" in user_cur)
+ self.assertEqual(cn_cur, cn_orig)
+ self.assertEqual(name_cur, name_orig)
+ self.assertEqual(dn_cur, dn_orig)
+ self.assertEqual(name_cur, cn_cur)
+ self.assertEqual(name_cur, user_cur.dn.get_rdn_value())
+
+ return user_cur
+
+ def test_ReplicateMoveInTree1(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou3)
+ self.ldb_dc1.add(self.ou4)
+ self.ldb_dc1.add(self.ou5)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou5_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateMoveInTree2(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ - that a rename back works correctly, and is replicated
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou2b)
+ self.ldb_dc1.add(self.ou3)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou3_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ new_dn3 = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou3_dn.get_component_value(0))
+ new_dn3.add_base(self.ou2b_dn)
+ self.ldb_dc1.rename(self.ou3_dn, new_dn3)
+
+ ldb_res = self.ldb_dc1.search(base=new_dn3,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ # Rename on DC1
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou1_dn)
+ self.ldb_dc1.rename(user_moved_dn, new_dn)
+
+ # Modify description on DC2
+ msg = ldb.Message()
+ msg.dn = user_moved_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertTrue("description" in user_cur)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # check user info on DC1 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_moved_orig, is_deleted=True)
+ self.assertFalse("description" in user_cur)
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be deleted user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=True)
+ self.assertFalse("description" in user_cur)
+
+ def test_ReplicateMoveInTree3(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ - that a rename back works correctly, and is replicated
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou2b)
+ self.ldb_dc1.add(self.ou3)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou3_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ new_dn3 = ldb.Dn(self.ldb_dc1, "OU=%s" % self.ou3_dn.get_component_value(0))
+ new_dn3.add_base(self.ou2b_dn)
+ self.ldb_dc1.rename(self.ou3_dn, new_dn3)
+
+ ldb_res = self.ldb_dc1.search(base=new_dn3,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_moved_dn, new_dn)
+
+ self.ldb_dc1.rename(self.ou2_dn, self.ou2c_dn)
+ self.ldb_dc1.rename(self.ou2b_dn, self.ou2_dn)
+ self.ldb_dc1.rename(self.ou2c_dn, self.ou2b_dn)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved_orig["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateMoveInTree3b(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - a complex OU tree can be replicated correctly
+ - the user is in the correct spot (renamed into) within the tree
+ on both DCs
+ - that a rename back works correctly, and is replicated
+ - that a complex rename suffle, combined with unrelated changes to the object,
+ is replicated correctly. The aim here is the send the objects out-of-order
+ when sorted by usnChanged.
+ - confirm that the OU tree and (in particular the user DN) is identical between
+ the DCs once this has been replicated.
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+ self.ldb_dc1.add(self.ou2b)
+ self.ldb_dc1.add(self.ou3)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ msg = ldb.Message()
+ msg.dn = new_dn
+ msg["description"] = ldb.MessageElement("User Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ # The sleep(1) calls here ensure that the name objects get a
+ # new 1-sec based timestamp, and so we select how the conflict
+ # resolution resolves.
+ self.ldb_dc1.rename(self.ou2_dn, self.ou2c_dn)
+ time.sleep(1)
+ self.ldb_dc1.rename(self.ou2b_dn, self.ou2_dn)
+ time.sleep(1)
+ self.ldb_dc1.rename(self.ou2c_dn, self.ou2b_dn)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]), new_dn)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2b_dn
+ msg["description"] = ldb.MessageElement("OU2b Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+ self.assertEqual(user_cur["parentGUID"][0], user_moved_orig["parentGUID"][0])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateMoveInTree4(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - an OU and user can be replicated correctly, even after a rename
+ - The creation and rename of the OU has been combined with unrelated changes to the object,
+ The aim here is the send the objects out-of-order when sorted by usnChanged.
+ - That is, the OU will be sorted by usnChanged after the user that is within that OU.
+ - That will cause the client to need to get the OU first, by use of the GET_ANC flag
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # check user info on DC1
+ print("Testing for %s with GUID %s" % (username, self._GUID_string(user_orig["objectGUID"][0])))
+ self._check_obj(sam_ldb=self.ldb_dc1, obj_orig=user_orig, is_deleted=False)
+
+ self.ldb_dc1.add(self.ou2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou2_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username)
+ self.assertEqual(len(ldb_res), 1)
+
+ user_moved_orig = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved_orig, is_deleted=False)
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInOU(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - an OU and user can be replicated correctly
+ - The creation of the OU has been combined with unrelated changes to the object,
+ The aim here is the send the objects out-of-order when sorted by usnChanged.
+ - That is, the OU will be sorted by usnChanged after the user that is within that OU.
+ - That will cause the client to need to get the OU first, by use of the GET_ANC flag
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ msg = ldb.Message()
+ msg.dn = self.ou1_dn
+ msg["description"] = ldb.MessageElement("OU1 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_orig, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_orig["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInMovedOU(self):
+ """Verifies how an object is replicated between two DCs.
+ This test should verify that:
+ - an OU and user can be replicated correctly
+ - The creation of the OU has been combined with unrelated changes to the object,
+ The aim here is the send the objects out-of-order when sorted by usnChanged.
+ - That is, the OU will be sorted by usnChanged after the user that is within that OU.
+ - That will cause the client to need to get the OU first, by use of the GET_ANC flag
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+ self.ldb_dc1.add(self.ou2)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ self.ldb_dc1.rename(self.ou2_dn, self.ou2b_dn)
+
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be valid user
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInConflictOU_time(self):
+ """Verifies how an object is replicated between two DCs, when created in an ambigious location
+ This test should verify that:
+ - Without replication, two conflicting objects can be created
+ - force the conflict resolution algorithm so we know which copy will win
+ (by sleeping while creating the objects, therefore increasing that timestamp on 'name')
+ - confirm that the user object, created on DC1, ends up in the right place on DC2
+ - therefore confirm that the conflict algorithm worked correctly, and that parentGUID was used.
+
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Now create two, conflicting objects. This gives the user
+ # object something to be under on both DCs.
+
+ # We sleep between the two adds so that DC1 adds second, and
+ # so wins the conflict resoution due to a later creation time
+ # (modification timestamp on the name attribute).
+ self.ldb_dc2.add(self.ou2)
+ time.sleep(1)
+ self.ldb_dc1.add(self.ou2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ # Now that we have renamed the user (and so bumpted the
+ # usnChanged), bump the value on the OUs.
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be under the OU2 from DC1
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ def test_ReplicateAddInConflictOU2(self):
+ """Verifies how an object is replicated between two DCs, when created in an ambigious location
+ This test should verify that:
+ - Without replication, two conflicting objects can be created
+ - force the conflict resolution algorithm so we know which copy will win
+ (by changing the description twice, therefore increasing that version count)
+ - confirm that the user object, created on DC1, ends up in the right place on DC2
+ - therefore confirm that the conflict algorithm worked correctly, and that parentGUID was used.
+ """
+ # work-out unique username to test with
+ username = self._make_username()
+
+ self.ldb_dc1.add(self.ou1)
+
+ # create user on DC1
+ self.ldb_dc1.newuser(username=username,
+ userou="ou=%s,ou=%s"
+ % (self.ou1_dn.get_component_value(0),
+ self.top_ou.get_component_value(0)),
+ password=None, setpassword=False)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_orig = ldb_res[0]
+ user_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Now create two, conflicting objects. This gives the user
+ # object something to be under on both DCs. We create it on
+ # DC1 1sec later so that it will win the conflict resolution.
+
+ self.ldb_dc2.add(self.ou2)
+ time.sleep(1)
+ self.ldb_dc1.add(self.ou2)
+
+ new_dn = ldb.Dn(self.ldb_dc1, "CN=%s" % username)
+ new_dn.add_base(self.ou2_dn)
+ self.ldb_dc1.rename(user_dn, new_dn)
+
+ # Now that we have renamed the user (and so bumpted the
+ # usnChanged), bump the value on the OUs.
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc1.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = self.ou2_dn
+ msg["description"] = ldb.MessageElement("OU2 Description", ldb.FLAG_MOD_REPLACE, "description")
+ self.ldb_dc2.modify(msg)
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ ldb_res = self.ldb_dc1.search(base=self.ou1_dn,
+ scope=SCOPE_SUBTREE,
+ expression="(samAccountName=%s)" % username,
+ attrs=["*", "parentGUID"])
+ self.assertEqual(len(ldb_res), 1)
+ user_moved = ldb_res[0]
+ user_moved_dn = ldb_res[0]["dn"]
+
+ # trigger replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ # check user info on DC2 - should be under the OU2 from DC1
+ user_cur = self._check_obj(sam_ldb=self.ldb_dc2, obj_orig=user_moved, is_deleted=False)
+
+ self.assertEqual(user_cur["parentGUID"], user_moved["parentGUID"])
+
+ # delete user on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self._GUID_string(user_orig["objectGUID"][0]))
+
+ # trigger replication from DC1 to DC2, for cleanup
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
diff --git a/source4/torture/drs/python/repl_rodc.py b/source4/torture/drs/python/repl_rodc.py
new file mode 100644
index 0000000..8a45774
--- /dev/null
+++ b/source4/torture/drs/python/repl_rodc.py
@@ -0,0 +1,739 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Test replication scenarios involving an RODC
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc1_dns_name [this is unused for the test, but it'll still try to connect]
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN repl_rodc -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import ldb
+from ldb import SCOPE_BASE
+
+from samba import WERRORError
+from samba.join import DCJoinContext
+from samba.dcerpc import drsuapi, misc, drsblobs, security
+from samba.drs_utils import drs_DsBind, drs_Replicate
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.samdb import dsdb_Dn
+from samba.credentials import Credentials
+
+import random
+import time
+
+
+def drs_get_rodc_partial_attribute_set(samdb, samdb1, exceptions=[]):
+ '''get a list of attributes for RODC replication'''
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+ partial_attribute_set.version = 1
+
+ attids = []
+
+ # the exact list of attids we send is quite critical. Note that
+ # we do ask for the secret attributes, but set SPECIAL_SECRET_PROCESSING
+ # to zero them out
+ schema_dn = samdb.get_schema_basedn()
+ res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="objectClass=attributeSchema",
+ attrs=["lDAPDisplayName", "systemFlags",
+ "searchFlags"])
+
+ for r in res:
+ ldap_display_name = str(r["lDAPDisplayName"][0])
+ if "systemFlags" in r:
+ system_flags = str(r["systemFlags"][0])
+ if (int(system_flags) & (samba.dsdb.DS_FLAG_ATTR_NOT_REPLICATED |
+ samba.dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED)):
+ continue
+ if "searchFlags" in r:
+ search_flags = str(r["searchFlags"][0])
+ if (int(search_flags) & samba.dsdb.SEARCH_FLAG_RODC_ATTRIBUTE):
+ continue
+ try:
+ attid = samdb1.get_attid_from_lDAPDisplayName(ldap_display_name)
+ if attid not in exceptions:
+ attids.append(int(attid))
+ except:
+ pass
+
+ # the attids do need to be sorted, or windows doesn't return
+ # all the attributes we need
+ attids.sort()
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+ return partial_attribute_set
+
+
+class DrsRodcTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a semi-black box test case for replication involving
+ an RODC."""
+
+ def setUp(self):
+ super(DrsRodcTestCase, self).setUp()
+ self.base_dn = self.ldb_dc1.get_default_basedn()
+
+ self.ou = samba.tests.create_test_ou(self.ldb_dc1, "test_drs_rodc")
+ self.allowed_group = "CN=Allowed RODC Password Replication Group,CN=Users,%s" % self.base_dn
+
+ self.site = self.ldb_dc1.server_site_name()
+ self.rodc_name = "TESTRODCDRS%s" % random.randint(1, 10000000)
+ self.rodc_pass = "password12#"
+ self.computer_dn = "CN=%s,OU=Domain Controllers,%s" % (self.rodc_name, self.base_dn)
+
+ self.rodc_ctx = DCJoinContext(server=self.ldb_dc1.host_dns_name(),
+ creds=self.get_credentials(),
+ lp=self.get_loadparm(), site=self.site,
+ netbios_name=self.rodc_name,
+ targetdir=None, domain=None,
+ machinepass=self.rodc_pass)
+ self._create_rodc(self.rodc_ctx)
+ self.rodc_ctx.create_tmp_samdb()
+ self.tmp_samdb = self.rodc_ctx.tmp_samdb
+
+ rodc_creds = Credentials()
+ rodc_creds.guess(self.rodc_ctx.lp)
+ rodc_creds.set_username(self.rodc_name + '$')
+ rodc_creds.set_password(self.rodc_pass)
+ self.rodc_creds = rodc_creds
+
+ (self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
+ (self.rodc_drs, self.rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, rodc_creds)
+
+ def tearDown(self):
+ self.rodc_ctx.cleanup_old_join()
+ super(DrsRodcTestCase, self).tearDown()
+
+ def test_admin_repl_secrets(self):
+ """
+ When a secret attribute is set to be replicated to an RODC with the
+ admin credentials, it should always replicate regardless of whether
+ or not it's in the Allowed RODC Password Replication Group.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcA_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_admin_repl_secrets_DummyDN_GUID(self):
+ """
+ When a secret attribute is set to be replicated to an RODC with the
+ admin credentials, it should always replicate regardless of whether
+ or not it's in the Allowed RODC Password Replication Group.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcA_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ res = self.ldb_dc1.search(base=user_dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID"])
+
+ user_guid = misc.GUID(res[0]["objectGUID"][0])
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str="DummyDN",
+ nc_guid=user_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ try:
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.fail(f"DsGetNCChanges failed with {estr}")
+
+ # Check that the user has been added to msDSRevealedUsers
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_rodc_repl_secrets(self):
+ """
+ When a secret attribute is set to be replicated to an RODC with
+ the RODC account credentials, it should not replicate if it's in
+ the Allowed RODC Password Replication Group. Once it is added to
+ the group, it should replicate.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcB_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e:
+ (enum, estr) = e.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ # send the same request again and we should get the same response
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e1:
+ (enum, estr) = e1.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ # Retry with Administrator credentials, ignores password replication groups
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_rodc_repl_secrets_follow_on_req(self):
+ """
+ Checks that an RODC can't subvert an existing (valid) GetNCChanges
+ request to reveal secrets it shouldn't have access to.
+ """
+
+ # send an acceptable request that will match as many GUIDs as possible.
+ # Here we set the SPECIAL_SECRET_PROCESSING flag so that the request gets accepted.
+ # (On the server, this builds up the getnc_state->guids array)
+ req8 = self._exop_req8(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=self.ldb_dc1.domain_dn(),
+ exop=drsuapi.DRSUAPI_EXOP_NONE,
+ max_objects=1,
+ replica_flags=drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING)
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 8, req8)
+
+ # Get the next replication chunk, but set REPL_SECRET this time. This
+ # is following on the the previous accepted request, but we've changed
+ # exop to now request secrets. This request should fail
+ try:
+ req8 = self._exop_req8(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=self.ldb_dc1.domain_dn(),
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET)
+ req8.highwatermark = ctr.new_highwatermark
+
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 8, req8)
+
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except RuntimeError as e2:
+ (enum, estr) = e2.args
+ pass
+
+ def test_msDSRevealedUsers_admin(self):
+ """
+ When a secret attribute is to be replicated to an RODC, the contents
+ of the attribute should be added to the msDSRevealedUsers attribute
+ of the computer object corresponding to the RODC.
+ """
+
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcC_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ (packed_attrs_1, unpacked_attrs_1) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ # Change the user's password on DC1
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password + "1", False, user_name)
+
+ (packed_attrs_2, unpacked_attrs_2) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_equals(unpacked_attrs_1, unpacked_attrs_2)
+
+ # Replicate to RODC again with EXOP_REPL_SECRETS
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # This is important for Windows, because the entry won't have been
+ # updated in time if we don't have it. Even with this sleep, it only
+ # passes some of the time...
+ time.sleep(5)
+
+ # Check that the entry in msDSRevealedUsers has been updated
+ (packed_attrs_3, unpacked_attrs_3) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_changed(unpacked_attrs_2, unpacked_attrs_3, expected_user_attributes)
+
+ # We should be able to delete the user
+ self.ldb_dc1.deleteuser(user_name)
+
+ res = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=self.computer_dn,
+ attrs=["msDS-RevealedUsers"])
+ self.assertFalse("msDS-RevealedUsers" in res[0])
+
+ def test_msDSRevealedUsers(self):
+ """
+ When a secret attribute is to be replicated to an RODC, the contents
+ of the attribute should be added to the msDSRevealedUsers attribute
+ of the computer object corresponding to the RODC.
+ """
+
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcD_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Check that the user has been added to msDSRevealedUsers
+ (packed_attrs_1, unpacked_attrs_1) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ # Change the user's password on DC1
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password + "1", False, user_name)
+
+ (packed_attrs_2, unpacked_attrs_2) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_equals(unpacked_attrs_1, unpacked_attrs_2)
+
+ # Replicate to RODC again with EXOP_REPL_SECRETS
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+
+ # This is important for Windows, because the entry won't have been
+ # updated in time if we don't have it. Even with this sleep, it only
+ # passes some of the time...
+ time.sleep(5)
+
+ # Check that the entry in msDSRevealedUsers has been updated
+ (packed_attrs_3, unpacked_attrs_3) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+ self._assert_attrlist_changed(unpacked_attrs_2, unpacked_attrs_3, expected_user_attributes)
+
+ # We should be able to delete the user
+ self.ldb_dc1.deleteuser(user_name)
+
+ res = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=self.computer_dn,
+ attrs=["msDS-RevealedUsers"])
+ self.assertFalse("msDS-RevealedUsers" in res[0])
+
+ def test_msDSRevealedUsers_pas(self):
+ """
+ If we provide a Partial Attribute Set when replicating to an RODC,
+ we should ignore it and replicate all of the secret attributes anyway
+ msDSRevealedUsers attribute.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+ pas_exceptions = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcE_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ pas = drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb, exceptions=pas_exceptions)
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=pas,
+ max_objects=133,
+ replica_flags=0)
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, 10, req10)
+
+ # Make sure that we still replicate the secrets
+ for attribute in ctr.first_object.object.attribute_ctr.attributes:
+ if attribute.attid in pas_exceptions:
+ pas_exceptions.remove(attribute.attid)
+ for attribute in pas_exceptions:
+ self.fail("%d was not replicated even though the partial attribute set should be ignored."
+ % attribute)
+
+ # Check that the user has been added to msDSRevealedUsers
+ (packed_attrs_1, unpacked_attrs_1) = self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ def test_msDSRevealedUsers_using_other_RODC(self):
+ """
+ Ensure that the machine account is tied to the destination DSA.
+ """
+ # Create a new identical RODC with just the first letter missing
+ other_rodc_name = self.rodc_name[1:]
+ other_rodc_ctx = DCJoinContext(server=self.ldb_dc1.host_dns_name(),
+ creds=self.get_credentials(),
+ lp=self.get_loadparm(), site=self.site,
+ netbios_name=other_rodc_name,
+ targetdir=None, domain=None,
+ machinepass=self.rodc_pass)
+ self._create_rodc(other_rodc_ctx)
+
+ other_rodc_creds = Credentials()
+ other_rodc_creds.guess(other_rodc_ctx.lp)
+ other_rodc_creds.set_username(other_rodc_name + '$')
+ other_rodc_creds.set_password(self.rodc_pass)
+
+ (other_rodc_drs, other_rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, other_rodc_creds)
+
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ user_name = "test_rodcF_%s" % rand
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, 'penguin12#', False, user_name)
+ self.ldb_dc1.add_remove_group_members("Allowed RODC Password Replication Group",
+ [user_name],
+ add_members_operation=True)
+
+ req10 = self._getnc_req10(dest_dsa=str(other_rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e3:
+ (enum, estr) = e3.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ try:
+ (level, ctr) = other_rodc_drs.DsGetNCChanges(other_rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e4:
+ (enum, estr) = e4.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ def test_msDSRevealedUsers_local_deny_allow(self):
+ """
+ Ensure that the deny trumps allow, and we can modify these
+ attributes directly instead of the global groups.
+
+ This may fail on Windows due to tokenGroup calculation caching.
+ """
+ rand = random.randint(1, 10000000)
+ expected_user_attributes = [drsuapi.DRSUAPI_ATTID_lmPwdHistory,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_dBCSPwd]
+
+ # Add a user on DC1, add it to allowed password replication
+ # group, and replicate to RODC with EXOP_REPL_SECRETS
+ user_name = "test_rodcF_%s" % rand
+ password = "password12#"
+ user_dn = "CN=%s,%s" % (user_name, self.ou)
+ self.ldb_dc1.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": user_name
+ })
+
+ # Store some secret on this user
+ self.ldb_dc1.setpassword("(sAMAccountName=%s)" % user_name, password, False, user_name)
+
+ req10 = self._getnc_req10(dest_dsa=str(self.rodc_ctx.ntds_guid),
+ invocation_id=self.ldb_dc1.get_invocation_id(),
+ nc_dn_str=user_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ partial_attribute_set=drs_get_rodc_partial_attribute_set(self.ldb_dc1, self.tmp_samdb),
+ max_objects=133,
+ replica_flags=0)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.computer_dn)
+
+ m["msDS-RevealOnDemandGroup"] = \
+ ldb.MessageElement(user_dn, ldb.FLAG_MOD_ADD,
+ "msDS-RevealOnDemandGroup")
+ self.ldb_dc1.modify(m)
+
+ # In local allow, should be success
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ except:
+ self.fail("Should have succeeded when in local allow group")
+
+ self._assert_in_revealed_users(user_dn, expected_user_attributes)
+
+ (self.rodc_drs, self.rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, self.rodc_creds)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.computer_dn)
+
+ m["msDS-NeverRevealGroup"] = \
+ ldb.MessageElement(user_dn, ldb.FLAG_MOD_ADD,
+ "msDS-NeverRevealGroup")
+ self.ldb_dc1.modify(m)
+
+ # In local allow and deny, should be failure
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e5:
+ (enum, estr) = e5.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, self.computer_dn)
+
+ m["msDS-RevealOnDemandGroup"] = \
+ ldb.MessageElement(user_dn, ldb.FLAG_MOD_DELETE,
+ "msDS-RevealOnDemandGroup")
+ self.ldb_dc1.modify(m)
+
+ # In local deny, should be failure
+ (self.rodc_drs, self.rodc_drs_handle) = self._ds_bind(self.dnsname_dc1, self.rodc_creds)
+ try:
+ (level, ctr) = self.rodc_drs.DsGetNCChanges(self.rodc_drs_handle, 10, req10)
+ self.fail("Successfully replicated secrets to an RODC that shouldn't have been replicated.")
+ except WERRORError as e6:
+ (enum, estr) = e6.args
+ self.assertEqual(enum, 8630) # ERROR_DS_DRA_SECRETS_DENIED
+
+ def _assert_in_revealed_users(self, user_dn, attrlist):
+ res = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=self.computer_dn,
+ attrs=["msDS-RevealedUsers"])
+ revealed_users = res[0]["msDS-RevealedUsers"]
+ actual_attrids = []
+ packed_attrs = []
+ unpacked_attrs = []
+ for attribute in revealed_users:
+ attribute = attribute.decode('utf8')
+ dsdb_dn = dsdb_Dn(self.ldb_dc1, attribute)
+ metadata = ndr_unpack(drsblobs.replPropertyMetaData1, dsdb_dn.get_bytes())
+ if user_dn in attribute:
+ unpacked_attrs.append(metadata)
+ packed_attrs.append(dsdb_dn.get_bytes())
+ actual_attrids.append(metadata.attid)
+
+ self.assertEqual(sorted(actual_attrids), sorted(attrlist))
+
+ return (packed_attrs, unpacked_attrs)
+
+ def _assert_attrlist_equals(self, list_1, list_2):
+ return self._assert_attrlist_changed(list_1, list_2, [], num_changes=0, expected_new_usn=False)
+
+ def _assert_attrlist_changed(self, list_1, list_2, changed_attributes, num_changes=1, expected_new_usn=True):
+ for i in range(len(list_2)):
+ self.assertEqual(list_1[i].attid, list_2[i].attid)
+ self.assertEqual(list_1[i].originating_invocation_id, list_2[i].originating_invocation_id)
+ self.assertEqual(list_1[i].version + num_changes, list_2[i].version)
+
+ if expected_new_usn:
+ self.assertTrue(list_1[i].originating_usn < list_2[i].originating_usn)
+ self.assertTrue(list_1[i].local_usn < list_2[i].local_usn)
+ else:
+ self.assertEqual(list_1[i].originating_usn, list_2[i].originating_usn)
+ self.assertEqual(list_1[i].local_usn, list_2[i].local_usn)
+
+ if list_1[i].attid in changed_attributes:
+ # We do the changes too quickly, so unless we put sleeps
+ # inbetween calls, these remain the same. Checking the USNs
+ # is enough.
+ pass
+ #self.assertTrue(list_1[i].originating_change_time < list_2[i].originating_change_time)
+ else:
+ self.assertEqual(list_1[i].originating_change_time, list_2[i].originating_change_time)
+
+ def _create_rodc(self, ctx):
+ ctx.nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+ ctx.full_nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+ ctx.krbtgt_dn = "CN=krbtgt_%s,CN=Users,%s" % (ctx.myname, ctx.base_dn)
+
+ ctx.never_reveal_sid = ["<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_DENY),
+ "<SID=%s>" % security.SID_BUILTIN_ADMINISTRATORS,
+ "<SID=%s>" % security.SID_BUILTIN_SERVER_OPERATORS,
+ "<SID=%s>" % security.SID_BUILTIN_BACKUP_OPERATORS,
+ "<SID=%s>" % security.SID_BUILTIN_ACCOUNT_OPERATORS]
+ ctx.reveal_sid = "<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_ALLOW)
+
+ mysid = ctx.get_mysid()
+ admin_dn = "<SID=%s>" % mysid
+ ctx.managedby = admin_dn
+
+ ctx.userAccountControl = (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT |
+ samba.dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION |
+ samba.dsdb.UF_PARTIAL_SECRETS_ACCOUNT)
+
+ ctx.connection_dn = "CN=RODC Connection (FRS),%s" % ctx.ntds_dn
+ ctx.secure_channel_type = misc.SEC_CHAN_RODC
+ ctx.RODC = True
+ ctx.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
+ drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING)
+
+ ctx.join_add_objects()
diff --git a/source4/torture/drs/python/repl_schema.py b/source4/torture/drs/python/repl_schema.py
new file mode 100644
index 0000000..44317ac
--- /dev/null
+++ b/source4/torture/drs/python/repl_schema.py
@@ -0,0 +1,444 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN repl_schema -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import time
+import random
+import ldb
+import drs_base
+
+from ldb import (
+ ERR_NO_SUCH_OBJECT,
+ LdbError,
+ SCOPE_BASE,
+ Message,
+ FLAG_MOD_ADD,
+ FLAG_MOD_REPLACE
+)
+from samba.dcerpc import drsuapi, misc
+from samba.drs_utils import drs_DsBind
+from samba import dsdb
+
+
+class DrsReplSchemaTestCase(drs_base.DrsBaseTestCase):
+
+ # prefix for all objects created
+ obj_prefix = None
+ # current Class or Attribute object id
+ obj_id = 0
+
+ def _exop_req8(self, dest_dsa, invocation_id, nc_dn_str, exop,
+ replica_flags=0, max_objects=0):
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ req8.destination_dsa_guid = misc.GUID(dest_dsa) if dest_dsa else misc.GUID()
+ req8.source_dsa_invocation_id = misc.GUID(invocation_id)
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = str(nc_dn_str)
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = replica_flags
+ req8.max_object_count = max_objects
+ req8.max_ndr_size = 402116
+ req8.extended_op = exop
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+
+ return req8
+
+ def setUp(self):
+ super(DrsReplSchemaTestCase, self).setUp()
+
+ # disable automatic replication temporary
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ # initialize objects prefix if not done yet
+ if self.obj_prefix is None:
+ t = time.strftime("%s", time.gmtime())
+ DrsReplSchemaTestCase.obj_prefix = "DrsReplSchema-%s" % t
+
+ def tearDown(self):
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+ super(DrsReplSchemaTestCase, self).tearDown()
+
+ def _make_obj_names(self, base_name):
+ '''Try to create a unique name for an object
+ that is to be added to schema'''
+ self.obj_id += 1
+ obj_name = "%s-%d-%s" % (self.obj_prefix, self.obj_id, base_name)
+ obj_ldn = obj_name.replace("-", "")
+ obj_dn = ldb.Dn(self.ldb_dc1, "CN=X")
+ obj_dn.add_base(ldb.Dn(self.ldb_dc1, self.schema_dn))
+ obj_dn.set_component(0, "CN", obj_name)
+ return (obj_dn, obj_name, obj_ldn)
+
+ def _schema_new_class(self, ldb_ctx, base_name, base_int, oc_cat=1, attrs=None):
+ (class_dn, class_name, class_ldn) = self._make_obj_names(base_name)
+ rec = {"dn": class_dn,
+ "objectClass": ["top", "classSchema"],
+ "cn": class_name,
+ "lDAPDisplayName": class_ldn,
+ "governsId": "1.3.6.1.4.1.7165.4.6.2.5."
+ + str((100000 * base_int) + random.randint(1, 100000)) + ".1.5.13",
+ "instanceType": "4",
+ "objectClassCategory": "%d" % oc_cat,
+ "subClassOf": "top",
+ "systemOnly": "FALSE"}
+ # allow overriding/adding attributes
+ if attrs is not None:
+ rec.update(attrs)
+ # add it to the Schema
+ try:
+ ldb_ctx.add(rec)
+ except LdbError as e:
+ (enum, estr) = e.args
+ self.fail("Adding record failed with %d/%s" % (enum, estr))
+
+ self._ldap_schemaUpdateNow(ldb_ctx)
+ return (rec["lDAPDisplayName"], rec["dn"])
+
+ def _schema_new_attr(self, ldb_ctx, base_name, base_int, attrs=None):
+ (attr_dn, attr_name, attr_ldn) = self._make_obj_names(base_name)
+ rec = {"dn": attr_dn,
+ "objectClass": ["top", "attributeSchema"],
+ "cn": attr_name,
+ "lDAPDisplayName": attr_ldn,
+ "attributeId": "1.3.6.1.4.1.7165.4.6.1.5."
+ + str((100000 * base_int) + random.randint(1, 100000)) + ".1.5.13",
+ "attributeSyntax": "2.5.5.12",
+ "omSyntax": "64",
+ "instanceType": "4",
+ "isSingleValued": "TRUE",
+ "systemOnly": "FALSE"}
+ # allow overriding/adding attributes
+ if attrs is not None:
+ rec.update(attrs)
+ # add it to the Schema
+ ldb_ctx.add(rec)
+ self._ldap_schemaUpdateNow(ldb_ctx)
+ return (rec["lDAPDisplayName"], rec["dn"])
+
+ def _check_object(self, obj_dn):
+ '''Check if object obj_dn exists on both DCs'''
+ res_dc1 = self.ldb_dc1.search(base=obj_dn,
+ scope=SCOPE_BASE,
+ attrs=["*"])
+ self.assertEqual(len(res_dc1), 1,
+ "%s doesn't exists on %s" % (obj_dn, self.dnsname_dc1))
+ try:
+ res_dc2 = self.ldb_dc2.search(base=obj_dn,
+ scope=SCOPE_BASE,
+ attrs=["*"])
+ except LdbError as e1:
+ (enum, estr) = e1.args
+ if enum == ERR_NO_SUCH_OBJECT:
+ self.fail("%s doesn't exists on %s" % (obj_dn, self.dnsname_dc2))
+ raise
+ self.assertEqual(len(res_dc2), 1,
+ "%s doesn't exists on %s" % (obj_dn, self.dnsname_dc2))
+
+ def test_class(self):
+ """Simple test for classSchema replication"""
+ # add new classSchema object
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-S", 0)
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check object is replicated
+ self._check_object(c_dn)
+
+ def test_classInheritance(self):
+ """Test inheritance through subClassOf
+ I think 5 levels of inheritance is pretty decent for now."""
+ # add 5 levels deep hierarchy
+ c_dn_list = []
+ c_ldn_last = None
+ for i in range(1, 6):
+ base_name = "cls-I-%02d" % i
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, base_name, i)
+ c_dn_list.append(c_dn)
+ if c_ldn_last:
+ # inherit from last class added
+ m = Message.from_dict(self.ldb_dc1,
+ {"dn": c_dn,
+ "subClassOf": c_ldn_last},
+ FLAG_MOD_REPLACE)
+ self.ldb_dc1.modify(m)
+ # store last class ldapDisplayName
+ c_ldn_last = c_ldn
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check objects are replicated
+ for c_dn in c_dn_list:
+ self._check_object(c_dn)
+
+ def test_classWithCustomAttribute(self):
+ """Create new Attribute and a Class,
+ that has value for newly created attribute.
+ This should check code path that searches for
+ AttributeID_id in Schema cache"""
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-A", 7)
+ # add a base classSchema class so we can use our new
+ # attribute in class definition in a sibling class
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-A", 8,
+ 1,
+ {"systemMayContain": a_ldn,
+ "subClassOf": "classSchema"})
+ # add new classSchema object with value for a_ldb attribute
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-B", 9,
+ 1,
+ {"objectClass": ["top", "classSchema", c_ldn],
+ a_ldn: "test_classWithCustomAttribute"})
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+
+ def test_classWithCustomLinkAttribute(self):
+ """Create new Attribute and a Class,
+ that has value for newly created attribute.
+ This should check code path that searches for
+ AttributeID_id in Schema cache"""
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-Link-X", 10,
+ attrs={'linkID': "1.2.840.113556.1.2.50",
+ "attributeSyntax": "2.5.5.1",
+ "omSyntax": "127"})
+ # add a base classSchema class so we can use our new
+ # attribute in class definition in a sibling class
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-Link-Y", 11,
+ 1,
+ {"systemMayContain": a_ldn,
+ "subClassOf": "classSchema"})
+ # add new classSchema object with value for a_ldb attribute
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-Link-Z", 12,
+ 1,
+ {"objectClass": ["top", "classSchema", c_ldn],
+ a_ldn: self.schema_dn})
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+
+ res = self.ldb_dc1.search(base="",
+ scope=SCOPE_BASE,
+ attrs=["domainFunctionality"])
+
+ if int(res[0]["domainFunctionality"][0]) > dsdb.DS_DOMAIN_FUNCTION_2000:
+ res = self.ldb_dc1.search(base=a_dn,
+ scope=SCOPE_BASE,
+ attrs=["msDS-IntId"])
+ self.assertEqual(1, len(res))
+ self.assertTrue("msDS-IntId" in res[0])
+ int_id = int(res[0]["msDS-IntId"][0])
+ if int_id < 0:
+ int_id += (1 << 32)
+
+ dc_guid_1 = self.ldb_dc1.get_invocation_id()
+
+ drs, drs_handle = self._ds_bind(self.dnsname_dc1, ip=self.url_dc1)
+
+ req8 = self._exop_req8(dest_dsa=None,
+ invocation_id=dc_guid_1,
+ nc_dn_str=c_dn,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_SYNC_FORCED)
+
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ for link in ctr.linked_attributes:
+ self.assertTrue(link.attid != int_id,
+ 'Got %d for both' % link.attid)
+
+ def test_attribute(self):
+ """Simple test for attributeSchema replication"""
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-S", 13)
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+ # check object is replicated
+ self._check_object(a_dn)
+
+ def test_attribute_on_ou(self):
+ """Simple test having an OU with a custome attribute replicated correctly
+
+ This ensures that the server
+ """
+
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-OU-S", 14)
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-OU-A", 15,
+ 3,
+ {"mayContain": a_ldn})
+ ou_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+ ou_dn.add_base(self.ldb_dc1.get_default_basedn())
+ ou_dn.set_component(0, "OU", a_dn.get_component_value(0))
+ rec = {"dn": ou_dn,
+ "objectClass": ["top", "organizationalUnit", c_ldn],
+ "ou": ou_dn.get_component_value(0),
+ a_ldn: "test OU"}
+ self.ldb_dc1.add(rec)
+
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.domain_dn, forced=True)
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+ self._check_object(ou_dn)
+ self.ldb_dc1.delete(ou_dn)
+
+ def test_all(self):
+ """Basic plan is to create bunch of classSchema
+ and attributeSchema objects, replicate Schema NC
+ and then check all objects are replicated correctly"""
+
+ # add new classSchema object
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-A", 16)
+ # add new attributeSchema object
+ (a_ldn, a_dn) = self._schema_new_attr(self.ldb_dc1, "attr-A", 17)
+
+ # add attribute to the class we have
+ m = Message.from_dict(self.ldb_dc1,
+ {"dn": c_dn,
+ "mayContain": a_ldn},
+ FLAG_MOD_ADD)
+ self.ldb_dc1.modify(m)
+
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, nc_dn=self.schema_dn, forced=True)
+
+ # check objects are replicated
+ self._check_object(c_dn)
+ self._check_object(a_dn)
+
+ def test_classWithCustomBinaryDNLinkAttribute(self):
+ # Add a new attribute to the schema, which has binary DN syntax (2.5.5.7)
+ (bin_ldn, bin_dn) = self._schema_new_attr(self.ldb_dc1, "attr-Link-Bin", 18,
+ attrs={"linkID": "1.2.840.113556.1.2.50",
+ "attributeSyntax": "2.5.5.7",
+ "omSyntax": "127"})
+
+ (bin_ldn_b, bin_dn_b) = self._schema_new_attr(self.ldb_dc1, "attr-Link-Bin-Back", 19,
+ attrs={"linkID": bin_ldn,
+ "attributeSyntax": "2.5.5.1",
+ "omSyntax": "127"})
+
+ # Add a new class to the schema which can have the binary DN attribute
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-Link-Bin", 20,
+ 3,
+ {"mayContain": bin_ldn})
+ (c_ldn_b, c_dn_b) = self._schema_new_class(self.ldb_dc1, "cls-Link-Bin-Back", 21,
+ 3,
+ {"mayContain": bin_ldn_b})
+
+ link_end_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+ link_end_dn.add_base(self.ldb_dc1.get_default_basedn())
+ link_end_dn.set_component(0, "OU", bin_dn_b.get_component_value(0))
+
+ ou_dn = ldb.Dn(self.ldb_dc1, "ou=X")
+ ou_dn.add_base(self.ldb_dc1.get_default_basedn())
+ ou_dn.set_component(0, "OU", bin_dn.get_component_value(0))
+
+ # Add an instance of the class to be pointed at
+ rec = {"dn": link_end_dn,
+ "objectClass": ["top", "organizationalUnit", c_ldn_b],
+ "ou": link_end_dn.get_component_value(0)}
+ self.ldb_dc1.add(rec)
+
+ # .. and one that does, and points to the first one
+ rec = {"dn": ou_dn,
+ "objectClass": ["top", "organizationalUnit", c_ldn],
+ "ou": ou_dn.get_component_value(0)}
+ self.ldb_dc1.add(rec)
+
+ m = Message.from_dict(self.ldb_dc1,
+ {"dn": ou_dn,
+ bin_ldn: "B:8:1234ABCD:%s" % str(link_end_dn)},
+ FLAG_MOD_ADD)
+ self.ldb_dc1.modify(m)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.domain_dn, forced=True)
+
+ self._check_object(c_dn)
+ self._check_object(bin_dn)
+
+ # Make sure we can delete the backlink
+ self.ldb_dc1.delete(link_end_dn)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.domain_dn, forced=True)
+
+ def test_rename(self):
+ """Basic plan is to create a classSchema
+ and attributeSchema objects, replicate Schema NC
+ and then check all objects are replicated correctly"""
+
+ # add new classSchema object
+ (c_ldn, c_dn) = self._schema_new_class(self.ldb_dc1, "cls-B", 20)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+
+ # check objects are replicated
+ self._check_object(c_dn)
+
+ # rename the Class CN
+ c_dn_new = ldb.Dn(self.ldb_dc1, str(c_dn))
+ c_dn_new.set_component(0,
+ "CN",
+ c_dn.get_component_value(0) + "-NEW")
+ try:
+ self.ldb_dc1.rename(c_dn, c_dn_new)
+ except LdbError as e2:
+ (num, _) = e2.args
+ self.fail("failed to change CN for %s: %s" % (c_dn, _))
+
+ # force replication from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn=self.schema_dn, forced=True)
+
+ # check objects are replicated
+ self._check_object(c_dn_new)
diff --git a/source4/torture/drs/python/repl_secdesc.py b/source4/torture/drs/python/repl_secdesc.py
new file mode 100644
index 0000000..3c36ba6
--- /dev/null
+++ b/source4/torture/drs/python/repl_secdesc.py
@@ -0,0 +1,400 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd. 2017
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import drs_base
+import ldb
+import samba
+from samba import sd_utils
+from ldb import LdbError
+
+class ReplAclTestCase(drs_base.DrsBaseTestCase):
+
+ def setUp(self):
+ super(ReplAclTestCase, self).setUp()
+ self.mod = "(A;CIOI;GA;;;SY)"
+ self.mod_becomes = "(A;OICIIO;GA;;;SY)"
+ self.mod_inherits_as = "(A;OICIIOID;GA;;;SY)"
+
+ self.sd_utils_dc1 = sd_utils.SDUtils(self.ldb_dc1)
+ self.sd_utils_dc2 = sd_utils.SDUtils(self.ldb_dc2)
+
+ self.ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "test_acl_inherit")
+
+ # disable replication for the tests so we can control at what point
+ # the DCs try to replicate
+ self._disable_all_repl(self.dnsname_dc1)
+ self._disable_all_repl(self.dnsname_dc2)
+
+ # make sure DCs are synchronized before the test
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ def tearDown(self):
+ self.ldb_dc1.delete(self.ou, ["tree_delete:1"])
+
+ # re-enable replication
+ self._enable_all_repl(self.dnsname_dc1)
+ self._enable_all_repl(self.dnsname_dc2)
+
+ super(ReplAclTestCase, self).tearDown()
+
+ def test_acl_inheirt_new_object_1_pass(self):
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inheirt_new_object(self):
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Confirm inherited ACLs are identical and were inheritied
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inherit_existing_object(self):
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm it is now replicated
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=[])
+
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inheirt_existing_object_1_pass(self):
+ # Make a new object
+ dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+ self.ldb_dc1.add({"dn": dn, "objectclass": "organizationalUnit"})
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Confirm inherited ACLs are identical and were inherited
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(dn),
+ self.sd_utils_dc2.get_sd_as_sddl(dn))
+
+ def test_acl_inheirt_renamed_object(self):
+ # Make a new object
+ new_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "acl_test_l2")
+
+ sub_ou_dn = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm it is now replicated
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+
+ # Set the inherited ACL on the parent OU on DC1
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Assert ACL replicated as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(self.ou))
+
+ # Rename to under self.ou
+
+ self.ldb_dc1.rename(new_ou, sub_ou_dn)
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm inherited ACLs are identical and were inherited
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn),
+ self.sd_utils_dc2.get_sd_as_sddl(sub_ou_dn))
+
+
+ def test_acl_inheirt_renamed_child_object(self):
+ # Make a new OU
+ new_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "acl_test_l2")
+
+ # Here is where the new OU will end up at the end.
+ sub2_ou_dn_final = ldb.Dn(self.ldb_dc1, "OU=l2,%s" % self.ou)
+
+ sub3_ou_dn = ldb.Dn(self.ldb_dc1, "OU=l3,%s" % new_ou)
+ sub3_ou_dn_final = ldb.Dn(self.ldb_dc1, "OU=l3,%s" % sub2_ou_dn_final)
+
+ self.ldb_dc1.add({"dn": sub3_ou_dn,
+ "objectclass": "organizationalUnit"})
+
+ sub4_ou_dn = ldb.Dn(self.ldb_dc1, "OU=l4,%s" % sub3_ou_dn)
+ sub4_ou_dn_final = ldb.Dn(self.ldb_dc1, "OU=l4,%s" % sub3_ou_dn_final)
+
+ self.ldb_dc1.add({"dn": sub4_ou_dn,
+ "objectclass": "organizationalUnit"})
+
+ try:
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+ self.fail()
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm it is now replicated
+ self.ldb_dc2.search(scope=ldb.SCOPE_BASE,
+ base=new_ou,
+ attrs=[])
+
+ #
+ # Given a tree new_ou -> l3 -> l4
+ #
+
+ # Set the inherited ACL on the grandchild OU (l3) on DC1
+ self.sd_utils_dc1.dacl_add_ace(sub3_ou_dn, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(sub3_ou_dn))
+
+ # Rename new_ou (l2) to under self.ou (this must happen second). If the
+ # inheritence between l3 and l4 is name-based, this could
+ # break.
+
+ # The tree is now self.ou -> l2 -> l3 -> l4
+
+ self.ldb_dc1.rename(new_ou, sub2_ou_dn_final)
+
+ # Assert ACL set remained as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(sub3_ou_dn_final))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Confirm set ACLs (on l3 ) are identical and were inherited
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc2.get_sd_as_sddl(sub3_ou_dn_final))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub3_ou_dn_final),
+ self.sd_utils_dc2.get_sd_as_sddl(sub3_ou_dn_final))
+
+ # Confirm inherited ACLs (from l3 to l4) are identical
+ # and where inherited
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub4_ou_dn_final))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub4_ou_dn_final),
+ self.sd_utils_dc2.get_sd_as_sddl(sub4_ou_dn_final))
+
+
+ def test_acl_inheirt_renamed_object_in_conflict(self):
+ # Make a new object to be renamed under self.ou
+ new_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "acl_test_l2")
+
+ # Make a new OU under self.ou (on DC2)
+ sub_ou_dn = ldb.Dn(self.ldb_dc2, "OU=l2,%s" % self.ou)
+ self.ldb_dc2.add({"dn": sub_ou_dn,
+ "objectclass": "organizationalUnit"})
+
+ # Set the inherited ACL on the parent OU
+ self.sd_utils_dc1.dacl_add_ace(self.ou, self.mod)
+
+ # Assert ACL set stuck as expected
+ self.assertIn(self.mod_becomes,
+ self.sd_utils_dc1.get_sd_as_sddl(self.ou))
+
+ # Replicate to DC2
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ # Rename to under self.ou
+ self.ldb_dc1.rename(new_ou, sub_ou_dn)
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn))
+
+ # Replicate to DC2 (will cause a conflict, DC1 to win, version
+ # is higher since named twice)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2,
+ fromDC=self.dnsname_dc1,
+ forced=True)
+
+ children = self.ldb_dc2.search(scope=ldb.SCOPE_ONELEVEL,
+ base=self.ou,
+ attrs=[])
+ for child in children:
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc2.get_sd_as_sddl(child.dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn),
+ self.sd_utils_dc2.get_sd_as_sddl(child.dn))
+
+ # Replicate back
+ self._net_drs_replicate(DC=self.dnsname_dc1,
+ fromDC=self.dnsname_dc2,
+ forced=True)
+
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(sub_ou_dn))
+
+ for child in children:
+ self.assertIn(self.mod_inherits_as,
+ self.sd_utils_dc1.get_sd_as_sddl(child.dn))
+ self.assertEqual(self.sd_utils_dc1.get_sd_as_sddl(child.dn),
+ self.sd_utils_dc2.get_sd_as_sddl(child.dn))
diff --git a/source4/torture/drs/python/replica_sync.py b/source4/torture/drs/python/replica_sync.py
new file mode 100644
index 0000000..cd1c941
--- /dev/null
+++ b/source4/torture/drs/python/replica_sync.py
@@ -0,0 +1,747 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various schema replication scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import time
+import ldb
+
+from ldb import (
+ SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT)
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a black box test case for DsReplicaSync
+ implementation. It should test the behavior of this
+ case in cases when inbound replication is disabled"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+
+ # This OU avoids this test conflicting with anything
+ # that may already be in the DB
+ self.top_ou = samba.tests.create_test_ou(self.ldb_dc1,
+ "replica_sync")
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+ self.ou1 = None
+ self.ou2 = None
+
+ def tearDown(self):
+ self._cleanup_object(self.ou1)
+ self._cleanup_object(self.ou2)
+ self._cleanup_dn(self.top_ou)
+
+ # re-enable replication
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _cleanup_dn(self, dn):
+ try:
+ self.ldb_dc2.delete(dn, ["tree_delete:1"])
+ except LdbError as e:
+ (num, _) = e.args
+ self.assertEqual(num, ERR_NO_SUCH_OBJECT)
+ try:
+ self.ldb_dc1.delete(dn, ["tree_delete:1"])
+ except LdbError as e1:
+ (num, _) = e1.args
+ self.assertEqual(num, ERR_NO_SUCH_OBJECT)
+
+ def _cleanup_object(self, guid):
+ """Cleans up a test object, if it still exists"""
+ if guid is not None:
+ self._cleanup_dn('<GUID=%s>' % guid)
+
+ def test_ReplEnabled(self):
+ """Tests we can replicate when replication is enabled"""
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False)
+
+ def test_ReplDisabled(self):
+ """Tests we cann't replicate when replication is disabled"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+
+ ccache_name = self.get_creds_ccache_name()
+
+ # Tunnel the command line credentials down to the
+ # subcommand to avoid a new kinit
+ cmdline_auth = "--use-krb5-ccache=%s" % ccache_name
+
+ # bin/samba-tool drs <drs_command> <cmdline_auth>
+ cmd_list = ["drs", "replicate", cmdline_auth]
+
+ nc_dn = self.domain_dn
+ # bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
+ cmd_list += [self.dnsname_dc1, self.dnsname_dc2, nc_dn]
+
+ (result, out, err) = self.runsubcmd(*cmd_list)
+ self.assertCmdFail(result)
+ self.assertTrue('WERR_DS_DRA_SINK_DISABLED' in err)
+
+ def test_ReplDisabledForced(self):
+ """Tests we can force replicate when replication is disabled"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ out = self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True)
+
+ def test_ReplLocal(self):
+ """Tests we can replicate direct to the local db"""
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=False, local=True, full_sync=True)
+
+ def _create_ou(self, samdb, name):
+ ldif = """
+dn: %s,%s
+objectClass: organizationalUnit
+""" % (name, self.top_ou)
+ samdb.add_ldif(ldif)
+ res = samdb.search(base="%s,%s" % (name, self.top_ou),
+ scope=SCOPE_BASE, attrs=["objectGUID"])
+ return self._GUID_string(res[0]["objectGUID"][0])
+
+ def _check_deleted(self, sam_ldb, guid):
+ # search the user by guid as it may be deleted
+ res = sam_ldb.search(base='<GUID=%s>' % guid,
+ controls=["show_deleted:1"],
+ attrs=["isDeleted", "objectCategory", "ou"])
+ self.assertEqual(len(res), 1)
+ ou_cur = res[0]
+ # Deleted Object base DN
+ dodn = self._deleted_objects_dn(sam_ldb)
+ # now check properties of the user
+ name_cur = ou_cur["ou"][0]
+ self.assertEqual(ou_cur["isDeleted"][0], b"TRUE")
+ self.assertTrue(not("objectCategory" in ou_cur))
+ self.assertTrue(dodn in str(ou_cur["dn"]),
+ "OU %s is deleted but it is not located under %s!" % (name_cur, dodn))
+
+ def test_ReplConflictsFullSync(self):
+ """Tests that objects created in conflict become conflict DNs (honour full sync override)"""
+
+ # First confirm local replication (so when we test against windows, this fails fast without creating objects)
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
+
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Full Sync")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Full Sync")
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, local=True, forced=True, full_sync=True)
+
+ # Check that DC2 got the DC1 object, and OU1 was make into conflict
+ res1 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc2, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC2
+
+ self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc2.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=True)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRemoteWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Conflict")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsLocalWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC2 object created first
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Local Conflict")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Local Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU2 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]), "Got %s for %s" % (str(res2[0]["name"][0]), self.ou2))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRemoteWin_with_child(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Conflict")
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Conflict")
+ # Create children on DC2
+ ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Conflict")
+ ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+
+ def test_ReplConflictsRenamedVsNewRemoteWin(self):
+ """Tests resolving a DN conflict between a renamed object and a new object"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create an OU and rename it on DC1
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict orig")
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict,%s" % self.top_ou)
+
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+
+ # create a conflicting object with the same DN on DC2
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was made into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRenamedVsNewLocalWin(self):
+ """Tests resolving a DN conflict between a renamed object and a new object"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, where the DC2 object has been renamed
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Rename Local Conflict orig")
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Rename Local Conflict,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Rename Local Conflict")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU2 was made into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRenameRemoteWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Remote Rename Conflict")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Remote Rename Conflict 2")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Remote Rename Conflict 3,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Remote Rename Conflict 3,%s" % self.top_ou)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplConflictsRenameRemoteWin_with_child(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Parent Remote Rename Conflict")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Parent Remote Rename Conflict 2")
+ # Create children on DC2
+ ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Test Parent Remote Rename Conflict")
+ ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Test Parent Remote Rename Conflict 2")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Parent Remote Rename Conflict 3,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Parent Remote Rename Conflict 3,%s" % self.top_ou)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and SELF.OU1 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1, ["tree_delete:1"])
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2, ["tree_delete:1"])
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+
+ def test_ReplConflictsRenameLocalWin(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ self.ou1 = self._create_ou(self.ldb_dc1, "OU=Test Rename Local Conflict")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Test Rename Local Conflict 2")
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Test Rename Local Conflict 3,%s" % self.top_ou)
+ # We have to sleep to ensure that the two objects have different timestamps
+ time.sleep(1)
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou1, "OU=Test Rename Local Conflict 3,%s" % self.top_ou)
+
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC2 got the DC1 object, and OU2 was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) not in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+
+ def test_ReplLostAndFound(self):
+ """Tests that objects created under a OU deleted eleswhere end up in lostAndFound"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create two OUs on DC2
+ self.ou1 = self._create_ou(self.ldb_dc2, "OU=Deleted parent")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Deleted parent 2")
+
+ # replicate them from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Delete both objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ # Create children on DC2
+ ou1_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Deleted parent")
+ ou2_child = self._create_ou(self.ldb_dc2, "OU=Test Child,OU=Deleted parent 2")
+
+ # Replicate from DC2
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check the sub-OUs are now in lostAndFound and the first one is a conflict DN
+
+ # Check that DC2 got the DC1 object, and one or other object was make into conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % ou1_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % ou2_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % ou1_child in str(res1[0]["name"][0]) or 'CNF:%s' % ou2_child in str(res2[0]["name"][0]))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) in str(res1[0].dn))
+ self.assertTrue(self._lost_and_found_dn(self.ldb_dc1, self.domain_dn) in str(res2[0].dn))
+ self.assertEqual(str(res1[0]["name"][0]), res1[0].dn.get_rdn_value())
+ self.assertEqual(str(res2[0]["name"][0]), res2[0].dn.get_rdn_value())
+
+ # Delete all objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % ou1_child)
+ self.ldb_dc1.delete('<GUID=%s>' % ou2_child)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ # Check all deleted on DC1
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ # Check all deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+
+ def test_ReplRenames(self):
+ """Tests that objects created under a OU deleted eleswhere end up in lostAndFound"""
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # Create two OUs on DC2
+ self.ou1 = self._create_ou(self.ldb_dc2, "OU=Original parent")
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Original parent 2")
+
+ # replicate them from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Create children on DC1
+ ou1_child = self._create_ou(self.ldb_dc1, "OU=Test Child,OU=Original parent")
+ ou2_child = self._create_ou(self.ldb_dc1, "OU=Test Child 2,OU=Original parent")
+ ou3_child = self._create_ou(self.ldb_dc1, "OU=Test Case Child,OU=Original parent")
+
+ # replicate them from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self.ldb_dc1.rename("<GUID=%s>" % ou2_child, "OU=Test Child 3,OU=Original parent 2,%s" % self.top_ou)
+ self.ldb_dc1.rename("<GUID=%s>" % ou1_child, "OU=Test Child 2,OU=Original parent 2,%s" % self.top_ou)
+ self.ldb_dc1.rename("<GUID=%s>" % ou2_child, "OU=Test Child,OU=Original parent 2,%s" % self.top_ou)
+ self.ldb_dc1.rename("<GUID=%s>" % ou3_child, "OU=Test CASE Child,OU=Original parent,%s" % self.top_ou)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou2, "OU=Original parent 3,%s" % self.top_ou)
+ self.ldb_dc2.rename("<GUID=%s>" % self.ou1, "OU=Original parent 2,%s" % self.top_ou)
+
+ # replicate them from DC1 to DC2
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ # Check the sub-OUs are now under Original Parent 3 (original
+ # parent 2 for Test CASE Child), and both have the right names
+
+ # Check that DC2 got the DC1 object, and the renames are all correct
+ res1 = self.ldb_dc2.search(base="<GUID=%s>" % ou1_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc2.search(base="<GUID=%s>" % ou2_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res3 = self.ldb_dc2.search(base="<GUID=%s>" % ou3_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0].dn)
+ print(res2[0].dn)
+ print(res3[0].dn)
+ self.assertEqual('Test Child 2', str(res1[0]["name"][0]))
+ self.assertEqual('Test Child', str(res2[0]["name"][0]))
+ self.assertEqual('Test CASE Child', str(res3[0]["name"][0]))
+ self.assertEqual(str(res1[0].dn), "OU=Test Child 2,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res2[0].dn), "OU=Test Child,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res3[0].dn), "OU=Test CASE Child,OU=Original parent 2,%s" % self.top_ou)
+
+ # replicate them from DC2 to DC1
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check that DC1 got the DC2 object, and the renames are all correct
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % ou1_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % ou2_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ res3 = self.ldb_dc1.search(base="<GUID=%s>" % ou3_child,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0].dn)
+ print(res2[0].dn)
+ print(res3[0].dn)
+ self.assertEqual('Test Child 2', str(res1[0]["name"][0]))
+ self.assertEqual('Test Child', str(res2[0]["name"][0]))
+ self.assertEqual('Test CASE Child', str(res3[0]["name"][0]))
+ self.assertEqual(str(res1[0].dn), "OU=Test Child 2,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res2[0].dn), "OU=Test Child,OU=Original parent 3,%s" % self.top_ou)
+ self.assertEqual(str(res3[0].dn), "OU=Test CASE Child,OU=Original parent 2,%s" % self.top_ou)
+
+ # Delete all objects by GUID on DC1
+
+ self.ldb_dc1.delete('<GUID=%s>' % ou1_child)
+ self.ldb_dc1.delete('<GUID=%s>' % ou2_child)
+ self.ldb_dc1.delete('<GUID=%s>' % ou3_child)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ # Check all deleted on DC1
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ self._check_deleted(self.ldb_dc1, ou1_child)
+ self._check_deleted(self.ldb_dc1, ou2_child)
+ self._check_deleted(self.ldb_dc1, ou3_child)
+ # Check all deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
+ self._check_deleted(self.ldb_dc2, ou1_child)
+ self._check_deleted(self.ldb_dc2, ou2_child)
+ self._check_deleted(self.ldb_dc2, ou3_child)
+
+ def reanimate_object(self, samdb, guid, new_dn):
+ """Re-animates a deleted object"""
+ res = samdb.search(base="<GUID=%s>" % guid, attrs=["isDeleted"],
+ controls=['show_deleted:1'], scope=SCOPE_BASE)
+ if len(res) != 1:
+ return
+
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["isDeleted"] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "isDeleted")
+ msg["distinguishedName"] = ldb.MessageElement([new_dn], ldb.FLAG_MOD_REPLACE, "distinguishedName")
+ samdb.modify(msg, ["show_deleted:1"])
+
+ def test_ReplReanimationConflict(self):
+ """
+ Checks that if a reanimated object conflicts with a new object, then
+ the conflict is resolved correctly.
+ """
+
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ # create an object, "accidentally" delete it, and replicate the changes to both DCs
+ self.ou1 = self._create_ou(self.ldb_dc2, "OU=Conflict object")
+ self.ldb_dc2.delete('<GUID=%s>' % self.ou1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Now pretend that the admin for one DC resolves the problem by
+ # re-animating the object...
+ self.reanimate_object(self.ldb_dc1, self.ou1, "OU=Conflict object,%s" % self.top_ou)
+
+ # ...whereas another admin just creates a user with the same name
+ # again on a different DC
+ time.sleep(1)
+ self.ou2 = self._create_ou(self.ldb_dc2, "OU=Conflict object")
+
+ # Now sync the DCs to resolve the conflict
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2, forced=True, full_sync=False)
+
+ # Check the latest change won and SELF.OU1 was made into a conflict
+ res1 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou1,
+ scope=SCOPE_BASE, attrs=["name"])
+ res2 = self.ldb_dc1.search(base="<GUID=%s>" % self.ou2,
+ scope=SCOPE_BASE, attrs=["name"])
+ print(res1[0]["name"][0])
+ print(res2[0]["name"][0])
+ self.assertTrue('CNF:%s' % self.ou1 in str(res1[0]["name"][0]))
+ self.assertFalse('CNF:%s' % self.ou2 in str(res2[0]["name"][0]))
+
+ # Delete both objects by GUID on DC1
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True, full_sync=False)
+
+ self._check_deleted(self.ldb_dc1, self.ou1)
+ self._check_deleted(self.ldb_dc1, self.ou2)
+ # Check deleted on DC2
+ self._check_deleted(self.ldb_dc2, self.ou1)
+ self._check_deleted(self.ldb_dc2, self.ou2)
diff --git a/source4/torture/drs/python/replica_sync_rodc.py b/source4/torture/drs/python/replica_sync_rodc.py
new file mode 100644
index 0000000..cbdcc12
--- /dev/null
+++ b/source4/torture/drs/python/replica_sync_rodc.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Test conflict scenarios on the RODC
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Catalyst.NET Ltd 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name (RODC)
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN replica_sync_rodc -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+import time
+import ldb
+from samba.common import get_string
+
+from ldb import (
+ SCOPE_BASE, LdbError, ERR_NO_SUCH_OBJECT)
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a black box test case for DsReplicaSync
+ implementation. It should test the behavior of this
+ case in cases when inbound replication is disabled"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+ self._disable_all_repl(self.dnsname_dc1)
+ self.ou1 = None
+ self.ou2 = None
+
+ def tearDown(self):
+ # re-enable replication
+ self._enable_all_repl(self.dnsname_dc1)
+
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _create_ou(self, samdb, name):
+ ldif = """
+dn: %s,%s
+objectClass: organizationalUnit
+""" % (name, self.domain_dn)
+ samdb.add_ldif(ldif)
+ res = samdb.search(base="%s,%s" % (name, self.domain_dn),
+ scope=SCOPE_BASE, attrs=["objectGUID"])
+ return get_string(self._GUID_string(res[0]["objectGUID"][0]))
+
+ def _check_deleted(self, sam_ldb, guid):
+ # search the user by guid as it may be deleted
+ res = sam_ldb.search(base='<GUID=%s>' % guid,
+ controls=["show_deleted:1"],
+ attrs=["isDeleted", "objectCategory", "ou"])
+ self.assertEqual(len(res), 1)
+ ou_cur = res[0]
+ # Deleted Object base DN
+ dodn = self._deleted_objects_dn(sam_ldb)
+ # now check properties of the user
+ name_cur = ou_cur["ou"][0]
+ self.assertEqual(ou_cur["isDeleted"][0], "TRUE")
+ self.assertTrue(not("objectCategory" in ou_cur))
+ self.assertTrue(dodn in str(ou_cur["dn"]),
+ "OU %s is deleted but it is not located under %s!" % (name_cur, dodn))
+
+ def test_ReplConflictsRODC(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ # Replicate all objects to RODC beforehand
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ name = "OU=Test RODC Conflict"
+ self.ou1 = self._create_ou(self.ldb_dc1, name)
+
+ # Replicate single object
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+
+ # Delete the object, so another can be added
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+
+ # Create a conflicting DN as it would appear to the RODC
+ self.ou2 = self._create_ou(self.ldb_dc1, name)
+
+ try:
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+ except:
+ # Cleanup the object
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+ return
+
+ # Replicate cannot succeed, HWM would be updated incorrectly.
+ self.fail("DRS replicate should have failed.")
+
+ def test_ReplConflictsRODCRename(self):
+ """Tests that objects created in conflict become conflict DNs"""
+ # Replicate all objects to RODC beforehand
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1, forced=True)
+
+ # Create conflicting objects on DC1 and DC2, with DC1 object created first
+ name = "OU=Test RODC Rename Conflict"
+ self.ou1 = self._create_ou(self.ldb_dc1, name)
+
+ # Replicate single object
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+
+ # Create a non-conflicting DN to rename as conflicting
+ free_name = "OU=Test RODC Rename No Conflict"
+ self.ou2 = self._create_ou(self.ldb_dc1, free_name)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (free_name, self.domain_dn),
+ local=True, single=True, forced=True)
+
+ # Delete the object, so we can rename freely
+ # DO NOT REPLICATE TO THE RODC
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou1)
+
+ # Collide the name from the RODC perspective
+ self.ldb_dc1.rename("<GUID=%s>" % self.ou2, "%s,%s" % (name, self.domain_dn))
+
+ try:
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1,
+ nc_dn="%s,%s" % (name, self.domain_dn),
+ local=True, single=True, forced=True)
+ except:
+ # Cleanup the object
+ self.ldb_dc1.delete('<GUID=%s>' % self.ou2)
+ return
+
+ # Replicate cannot succeed, HWM would be updated incorrectly.
+ self.fail("DRS replicate should have failed.")
diff --git a/source4/torture/drs/python/ridalloc_exop.py b/source4/torture/drs/python/ridalloc_exop.py
new file mode 100644
index 0000000..0d46eee
--- /dev/null
+++ b/source4/torture/drs/python/ridalloc_exop.py
@@ -0,0 +1,813 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Tests various RID allocation scenarios
+#
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+# Copyright (C) Catalyst IT Ltd. 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export DC1=dc1_dns_name
+# export DC2=dc2_dns_name
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/torture/drs/python" $SUBUNITRUN ridalloc_exop -U"$DOMAIN/$DC_USERNAME"%"$DC_PASSWORD"
+#
+
+import drs_base
+import samba.tests
+
+import ldb
+from ldb import SCOPE_BASE
+
+from samba.dcerpc import drsuapi, misc
+from samba.drs_utils import drs_DsBind
+from samba.samdb import SamDB
+
+import shutil
+import tempfile
+import os
+from samba.auth import system_session, admin_session
+from samba.dbchecker import dbcheck
+from samba.ndr import ndr_pack
+from samba.dcerpc import security
+from samba import drs_utils, dsdb
+
+
+class DrsReplicaSyncTestCase(drs_base.DrsBaseTestCase):
+ """Intended as a semi-black box test case for DsGetNCChanges
+ implementation for extended operations. It should be testing
+ how DsGetNCChanges handles different input params (mostly invalid).
+ Final goal is to make DsGetNCChanges as binary compatible to
+ Windows implementation as possible"""
+
+ def setUp(self):
+ super(DrsReplicaSyncTestCase, self).setUp()
+
+ def tearDown(self):
+ super(DrsReplicaSyncTestCase, self).tearDown()
+
+ def _determine_fSMORoleOwner(self, fsmo_obj_dn):
+ """Returns (owner, not_owner) pair where:
+ owner: dns name for FSMO owner
+ not_owner: dns name for DC not owning the FSMO"""
+ # collect info to return later
+ fsmo_info_1 = {"dns_name": self.dnsname_dc1,
+ "invocation_id": self.ldb_dc1.get_invocation_id(),
+ "ntds_guid": self.ldb_dc1.get_ntds_GUID(),
+ "server_dn": self.ldb_dc1.get_serverName()}
+ fsmo_info_2 = {"dns_name": self.dnsname_dc2,
+ "invocation_id": self.ldb_dc2.get_invocation_id(),
+ "ntds_guid": self.ldb_dc2.get_ntds_GUID(),
+ "server_dn": self.ldb_dc2.get_serverName()}
+
+ msgs = self.ldb_dc1.search(scope=ldb.SCOPE_BASE, base=fsmo_info_1["server_dn"], attrs=["serverReference"])
+ fsmo_info_1["server_acct_dn"] = ldb.Dn(self.ldb_dc1, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_1["rid_set_dn"] = ldb.Dn(self.ldb_dc1, "CN=RID Set") + fsmo_info_1["server_acct_dn"]
+
+ msgs = self.ldb_dc2.search(scope=ldb.SCOPE_BASE, base=fsmo_info_2["server_dn"], attrs=["serverReference"])
+ fsmo_info_2["server_acct_dn"] = ldb.Dn(self.ldb_dc2, msgs[0]["serverReference"][0].decode('utf8'))
+ fsmo_info_2["rid_set_dn"] = ldb.Dn(self.ldb_dc2, "CN=RID Set") + fsmo_info_2["server_acct_dn"]
+
+ # determine the owner dc
+ res = self.ldb_dc1.search(fsmo_obj_dn,
+ scope=SCOPE_BASE, attrs=["fSMORoleOwner"])
+ assert len(res) == 1, "Only one fSMORoleOwner value expected for %s!" % fsmo_obj_dn
+ fsmo_owner = res[0]["fSMORoleOwner"][0]
+ if fsmo_owner == self.info_dc1["dsServiceName"][0]:
+ return (fsmo_info_1, fsmo_info_2)
+ return (fsmo_info_2, fsmo_info_1)
+
+ def _check_exop_failed(self, ctr6, expected_failure):
+ self.assertEqual(ctr6.extended_ret, expected_failure)
+ #self.assertEqual(ctr6.object_count, 0)
+ #self.assertEqual(ctr6.first_object, None)
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes_count, 0)
+ self.assertEqual(ctr6.linked_attributes, [])
+ self.assertEqual(ctr6.drs_error[0], 0)
+
+ def test_InvalidDestDSA_ridalloc(self):
+ """Test RID allocation with invalid destination DSA guid"""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa="9c637462-5b8c-4467-aef2-bdb1f57bc4ef",
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self._check_exop_failed(ctr, drsuapi.DRSUAPI_EXOP_ERR_UNKNOWN_CALLER)
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+
+ def test_do_ridalloc(self):
+ """Test doing a RID allocation with a valid destination DSA guid"""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa=fsmo_not_owner["ntds_guid"],
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+ ctr6 = ctr
+ self.assertEqual(ctr6.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ self.assertEqual(ctr6.object_count, 3)
+ self.assertNotEqual(ctr6.first_object, None)
+ self.assertEqual(ldb.Dn(self.ldb_dc1, ctr6.first_object.object.identifier.dn), fsmo_dn)
+ self.assertNotEqual(ctr6.first_object.next_object, None)
+ self.assertNotEqual(ctr6.first_object.next_object.next_object, None)
+ second_object = ctr6.first_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, second_object.identifier.dn), fsmo_not_owner["rid_set_dn"])
+ third_object = ctr6.first_object.next_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, third_object.identifier.dn), fsmo_not_owner["server_acct_dn"])
+
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.drs_error[0], 0)
+ # We don't check the linked_attributes_count as if the domain
+ # has an RODC, it can gain links on the server account object
+
+ def test_do_ridalloc_get_anc(self):
+ """Test doing a RID allocation with a valid destination DSA guid and GET_ANC flag"""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ req8 = self._exop_req8(dest_dsa=fsmo_not_owner["ntds_guid"],
+ invocation_id=fsmo_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC,
+ replica_flags=drsuapi.DRSUAPI_DRS_GET_ANC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_owner["dns_name"])
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_owner["invocation_id"]))
+ ctr6 = ctr
+ self.assertEqual(ctr6.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ self.assertEqual(ctr6.object_count, 3)
+ self.assertNotEqual(ctr6.first_object, None)
+ self.assertEqual(ldb.Dn(self.ldb_dc1, ctr6.first_object.object.identifier.dn), fsmo_dn)
+ self.assertNotEqual(ctr6.first_object.next_object, None)
+ self.assertNotEqual(ctr6.first_object.next_object.next_object, None)
+ second_object = ctr6.first_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, second_object.identifier.dn), fsmo_not_owner["rid_set_dn"])
+ third_object = ctr6.first_object.next_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, third_object.identifier.dn), fsmo_not_owner["server_acct_dn"])
+ self.assertEqual(ctr6.more_data, False)
+ self.assertEqual(ctr6.nc_object_count, 0)
+ self.assertEqual(ctr6.nc_linked_attributes_count, 0)
+ self.assertEqual(ctr6.drs_error[0], 0)
+ # We don't check the linked_attributes_count as if the domain
+ # has an RODC, it can gain links on the server account object
+
+ def test_edit_rid_master(self):
+ """Test doing a RID allocation after changing the RID master from the original one.
+ This should set rIDNextRID to 0 on the new RID master."""
+ # 1. a. Transfer role to non-RID master
+ # b. Check that it succeeds correctly
+ #
+ # 2. a. Call the RID alloc against the former master.
+ # b. Check that it succeeds.
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ # 1. Swap RID master role
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb_dc1, "")
+ m["becomeRidMaster"] = ldb.MessageElement("1", ldb.FLAG_MOD_REPLACE,
+ "becomeRidMaster")
+
+ # Make sure that ldb_dc1 == RID Master
+
+ server_dn = str(ldb.Dn(self.ldb_dc1, self.ldb_dc1.get_dsServiceName()).parent())
+
+ # self.ldb_dc1 == LOCALDC
+ if server_dn == fsmo_owner['server_dn']:
+ # ldb_dc1 == VAMPIREDC
+ ldb_dc1, ldb_dc2 = self.ldb_dc2, self.ldb_dc1
+ else:
+ # Otherwise switch the two
+ ldb_dc1, ldb_dc2 = self.ldb_dc1, self.ldb_dc2
+
+ try:
+ # ldb_dc1 is now RID MASTER (as VAMPIREDC)
+ ldb_dc1.modify(m)
+ except ldb.LdbError as e1:
+ (num, msg) = e1.args
+ self.fail("Failed to reassign RID Master " + msg)
+
+ try:
+ # 2. Perform a RID alloc
+ req8 = self._exop_req8(dest_dsa=fsmo_owner["ntds_guid"],
+ invocation_id=fsmo_not_owner["invocation_id"],
+ nc_dn_str=fsmo_dn,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+
+ (drs, drs_handle) = self._ds_bind(fsmo_not_owner["dns_name"])
+ # 3. Make sure the allocation succeeds
+ try:
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+ except RuntimeError as e:
+ self.fail("RID allocation failed: " + str(e))
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+
+ self.assertEqual(level, 6, "Expected level 6 response!")
+ self.assertEqual(ctr.source_dsa_guid, misc.GUID(fsmo_not_owner["ntds_guid"]))
+ self.assertEqual(ctr.source_dsa_invocation_id, misc.GUID(fsmo_not_owner["invocation_id"]))
+ ctr6 = ctr
+ self.assertEqual(ctr6.extended_ret, drsuapi.DRSUAPI_EXOP_ERR_SUCCESS)
+ self.assertEqual(ctr6.object_count, 3)
+ self.assertNotEqual(ctr6.first_object, None)
+ self.assertEqual(ldb.Dn(ldb_dc2, ctr6.first_object.object.identifier.dn), fsmo_dn)
+ self.assertNotEqual(ctr6.first_object.next_object, None)
+ self.assertNotEqual(ctr6.first_object.next_object.next_object, None)
+ second_object = ctr6.first_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, second_object.identifier.dn), fsmo_owner["rid_set_dn"])
+ third_object = ctr6.first_object.next_object.next_object.object
+ self.assertEqual(ldb.Dn(self.ldb_dc1, third_object.identifier.dn), fsmo_owner["server_acct_dn"])
+ finally:
+ # Swap the RID master back for other tests
+ m = ldb.Message()
+ m.dn = ldb.Dn(ldb_dc2, "")
+ m["becomeRidMaster"] = ldb.MessageElement("1", ldb.FLAG_MOD_REPLACE, "becomeRidMaster")
+ try:
+ ldb_dc2.modify(m)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ self.fail("Failed to restore RID Master " + msg)
+
+ def test_offline_samba_tool_seized_ridalloc(self):
+ """Perform a join against the non-RID manager and then seize the RID Manager role"""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST1")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ (result, out, err) = self.runsubcmd("fsmo", "seize", "--role", "rid", "-H", ldb_url, "--configfile=%s" % (smbconf), "--force")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ finally:
+ shutil.rmtree(targetdir, ignore_errors=True)
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST1")
+
+ def _test_join(self, server, netbios_name):
+ tmpdir = os.path.join(self.tempdir, "targetdir")
+ creds = self.get_credentials()
+ (result, out, err) = self.runsubcmd("domain", "join",
+ creds.get_realm(),
+ "dc", "-U%s%%%s" % (creds.get_username(),
+ creds.get_password()),
+ '--targetdir=%s' % tmpdir,
+ '--server=%s' % server,
+ "--option=netbios name = %s" % netbios_name)
+ self.assertCmdSuccess(result, out, err)
+ return tmpdir
+
+ def _test_force_demote(self, server, netbios_name):
+ creds = self.get_credentials()
+ (result, out, err) = self.runsubcmd("domain", "demote",
+ "-U%s%%%s" % (creds.get_username(),
+ creds.get_password()),
+ '--server=%s' % server,
+ "--remove-other-dead-server=%s" % netbios_name)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_offline_manual_seized_ridalloc_with_dbcheck(self):
+ """Peform the same actions as test_offline_samba_tool_seized_ridalloc,
+ but do not create the RID set. Confirm that dbcheck correctly creates
+ the RID Set.
+
+ Also check
+ """
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST2")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ serviceName = new_ldb.get_dsServiceName()
+ m = ldb.Message()
+ m.dn = fsmo_dn
+ m["fSMORoleOwner"] = ldb.MessageElement(serviceName,
+ ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ new_ldb.modify(m)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ chk = dbcheck(new_ldb, verbose=False, fix=True, yes=True, quiet=True)
+
+ self.assertEqual(chk.check_database(DN=server_ref_dn, scope=ldb.SCOPE_BASE), 1, "Should have fixed one error (missing RID Set)")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ finally:
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST2")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_offline_manual_seized_ridalloc_add_user(self):
+ """Peform the same actions as test_offline_samba_tool_seized_ridalloc,
+ but do not create the RID set. Confirm that user-add correctly creates
+ the RID Set."""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST3")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ serviceName = new_ldb.get_dsServiceName()
+ m = ldb.Message()
+ m.dn = fsmo_dn
+ m["fSMORoleOwner"] = ldb.MessageElement(serviceName,
+ ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ new_ldb.modify(m)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+
+ finally:
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST3")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_offline_manual_seized_ridalloc_add_user_as_admin(self):
+ """Peform the same actions as test_offline_samba_tool_seized_ridalloc,
+ but do not create the RID set. Confirm that user-add correctly creates
+ the RID Set."""
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_not_owner['dns_name'], "RIDALLOCTEST4")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=admin_session(lp, self.ldb_dc1.get_domain_sid()), lp=lp)
+
+ serviceName = new_ldb.get_dsServiceName()
+ m = ldb.Message()
+ m.dn = fsmo_dn
+ m["fSMORoleOwner"] = ldb.MessageElement(serviceName,
+ ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ new_ldb.modify(m)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # Assert that no RID Set has been set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertFalse("rIDSetReferences" in res[0])
+
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ # Create a user to allocate a RID Set for itself (the RID master)
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+
+ finally:
+ self._test_force_demote(fsmo_not_owner['dns_name'], "RIDALLOCTEST4")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_join_time_ridalloc(self):
+ """Perform a join against the RID manager and assert we have a RID Set"""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_owner['dns_name'], "RIDALLOCTEST5")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ finally:
+ self._test_force_demote(fsmo_owner['dns_name'], "RIDALLOCTEST5")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_rid_set_dbcheck(self):
+ """Perform a join against the RID manager and assert we have a RID Set.
+ Using dbcheck, we assert that we can detect out of range users."""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_owner['dns_name'], "RIDALLOCTEST6")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ rid_set_dn = ldb.Dn(new_ldb, res[0]["rIDSetReferences"][0].decode('utf8'))
+
+ # 4. Add a new user (triggers RID set work)
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 5. Now fetch the RID SET
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ last_rid = (0xFFFFFFFF00000000 & next_pool) >> 32
+
+ # 6. Add user above the ridNextRid and at mid-range.
+ #
+ # We can do this with safety because this is an offline DB that will be
+ # destroyed.
+ m = ldb.Message()
+ m.dn = ldb.Dn(new_ldb, "CN=ridsettestuser1,CN=Users")
+ m.dn.add_base(new_ldb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('user', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(security.dom_sid(str(new_ldb.get_domain_sid()) + "-%d" % (last_rid - 10))),
+ ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ new_ldb.add(m, controls=["relax:0"])
+
+ # 7. Check the RID Set
+ chk = dbcheck(new_ldb, verbose=False, fix=True, yes=True, quiet=True)
+
+ # Should have one error (wrong rIDNextRID)
+ self.assertEqual(chk.check_database(DN=rid_set_dn, scope=ldb.SCOPE_BASE), 1)
+
+ # 8. Assert we get didn't show any other errors
+ chk = dbcheck(new_ldb, verbose=False, fix=False, quiet=True)
+
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ last_allocated_rid = int(rid_set_res[0]["rIDNextRid"][0])
+ self.assertEqual(last_allocated_rid, last_rid - 10)
+
+ # 9. Assert that the range wasn't thrown away
+
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ self.assertEqual(last_rid, (0xFFFFFFFF00000000 & next_pool) >> 32, "rid pool should have changed")
+ finally:
+ self._test_force_demote(fsmo_owner['dns_name'], "RIDALLOCTEST6")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_rid_set_dbcheck_after_seize(self):
+ """Perform a join against the RID manager and assert we have a RID Set.
+ We seize the RID master role, then using dbcheck, we assert that we can
+ detect out of range users (and then bump the RID set as required)."""
+
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ targetdir = self._test_join(fsmo_owner['dns_name'], "RIDALLOCTEST7")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ smbconf = os.path.join(targetdir, "etc/smb.conf")
+
+ lp = self.get_loadparm()
+ new_ldb = SamDB(ldb_url, credentials=self.get_credentials(),
+ session_info=system_session(lp), lp=lp)
+
+ # 1. Get server name
+ res = new_ldb.search(base=ldb.Dn(new_ldb, new_ldb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ server_ref_dn = ldb.Dn(new_ldb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Assert we get the RID Set
+ res = new_ldb.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+
+ self.assertTrue("rIDSetReferences" in res[0])
+ rid_set_dn = ldb.Dn(new_ldb, res[0]["rIDSetReferences"][0].decode('utf8'))
+ # 4. Seize the RID Manager role
+ (result, out, err) = self.runsubcmd("fsmo", "seize", "--role", "rid", "-H", ldb_url, "--configfile=%s" % (smbconf), "--force")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # 5. Add a new user (triggers RID set work)
+ new_ldb.newuser("ridalloctestuser", "P@ssword!")
+
+ # 6. Now fetch the RID SET
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ last_rid = (0xFFFFFFFF00000000 & next_pool) >> 32
+
+ # 7. Add user above the ridNextRid and at almost the end of the range.
+ #
+ m = ldb.Message()
+ m.dn = ldb.Dn(new_ldb, "CN=ridsettestuser2,CN=Users")
+ m.dn.add_base(new_ldb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('user', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(security.dom_sid(str(new_ldb.get_domain_sid()) + "-%d" % (last_rid - 3))),
+ ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ new_ldb.add(m, controls=["relax:0"])
+
+ # 8. Add user above the ridNextRid and at the end of the range
+ m = ldb.Message()
+ m.dn = ldb.Dn(new_ldb, "CN=ridsettestuser3,CN=Users")
+ m.dn.add_base(new_ldb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('user', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(security.dom_sid(str(new_ldb.get_domain_sid()) + "-%d" % last_rid)),
+ ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ new_ldb.add(m, controls=["relax:0"])
+
+ chk = dbcheck(new_ldb, verbose=False, fix=True, yes=True, quiet=True)
+
+ # Should have fixed two errors (wrong ridNextRid)
+ self.assertEqual(chk.check_database(DN=rid_set_dn, scope=ldb.SCOPE_BASE), 2)
+
+ # 9. Assert we get didn't show any other errors
+ chk = dbcheck(new_ldb, verbose=False, fix=False, quiet=True)
+
+ # 10. Add another user (checks RID rollover)
+ # We have seized the role, so we can do that.
+ new_ldb.newuser("ridalloctestuser3", "P@ssword!")
+
+ rid_set_res = new_ldb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDNextRid',
+ 'rIDAllocationPool'])
+ next_pool = int(rid_set_res[0]["rIDAllocationPool"][0])
+ self.assertNotEqual(last_rid, (0xFFFFFFFF00000000 & next_pool) >> 32, "rid pool should have changed")
+ finally:
+ self._test_force_demote(fsmo_owner['dns_name'], "RIDALLOCTEST7")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_replicate_against_deleted_objects_transaction(self):
+ """Not related to RID allocation, but uses the infrastructure here.
+ Do a join, create a link between two objects remotely, but
+ remove the target locally. Show that we need to set a magic
+ opaque if there is an outer transaction.
+
+ """
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ test_user4 = "ridalloctestuser4"
+ test_group = "ridalloctestgroup1"
+
+ self.ldb_dc1.newuser(test_user4, "P@ssword!")
+
+ self.addCleanup(self.ldb_dc1.deleteuser, test_user4)
+
+ self.ldb_dc1.newgroup(test_group)
+ self.addCleanup(self.ldb_dc1.deletegroup, test_group)
+
+ targetdir = self._test_join(self.dnsname_dc1, "RIDALLOCTEST8")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url,
+ session_info=system_session(lp), lp=lp)
+
+ destination_dsa_guid = misc.GUID(new_ldb.get_ntds_GUID())
+
+ repl = drs_utils.drs_Replicate(f'ncacn_ip_tcp:{self.dnsname_dc1}[seal]',
+ lp,
+ self.get_credentials(),
+ new_ldb,
+ destination_dsa_guid)
+
+ source_dsa_invocation_id = misc.GUID(self.ldb_dc1.invocation_id)
+
+ # Add the link on the remote DC
+ self.ldb_dc1.add_remove_group_members(test_group, [test_user4])
+
+ # Starting a transaction overrides, currently the logic
+ # inside repl.replicatate to retry with GET_TGT which in
+ # turn tells the repl_meta_data module that the most up to
+ # date info is already available
+ new_ldb.transaction_start()
+ repl.replicate(self.ldb_dc1.domain_dn(),
+ source_dsa_invocation_id,
+ destination_dsa_guid)
+
+ # Delete the user locally, before applying the links.
+ # This simulates getting the delete in the replciation
+ # stream.
+ new_ldb.deleteuser(test_user4)
+
+ # This fails as the user has been deleted locally but a remote link is sent
+ self.assertRaises(ldb.LdbError, new_ldb.transaction_commit)
+
+ new_ldb.transaction_start()
+ repl.replicate(self.ldb_dc1.domain_dn(),
+ source_dsa_invocation_id,
+ destination_dsa_guid)
+
+ # Delete the user locally (the previous transaction
+ # doesn't apply), before applying the links. This
+ # simulates getting the delete in the replciation stream.
+ new_ldb.deleteuser(test_user4)
+
+ new_ldb.set_opaque_integer(dsdb.DSDB_FULL_JOIN_REPLICATION_COMPLETED_OPAQUE_NAME,
+ 1)
+
+ # This should now work
+ try:
+ new_ldb.transaction_commit()
+ except ldb.LdbError as e:
+ self.fail(f"Failed to replicate despite setting opaque with {e.args[1]}")
+
+ finally:
+ self._test_force_demote(self.dnsname_dc1, "RIDALLOCTEST8")
+ shutil.rmtree(targetdir, ignore_errors=True)
+
+ def test_replicate_against_deleted_objects_normal(self):
+ """Not related to RID allocation, but uses the infrastructure here.
+ Do a join, create a link between two objects remotely, but
+ remove the target locally. .
+
+ """
+ fsmo_dn = ldb.Dn(self.ldb_dc1, "CN=RID Manager$,CN=System," + self.ldb_dc1.domain_dn())
+ (fsmo_owner, fsmo_not_owner) = self._determine_fSMORoleOwner(fsmo_dn)
+
+ test_user5 = "ridalloctestuser5"
+ test_group2 = "ridalloctestgroup2"
+
+ self.ldb_dc1.newuser(test_user5, "P@ssword!")
+ self.addCleanup(self.ldb_dc1.deleteuser, test_user5)
+
+ self.ldb_dc1.newgroup(test_group2)
+ self.addCleanup(self.ldb_dc1.deletegroup, test_group2)
+
+ targetdir = self._test_join(self.dnsname_dc1, "RIDALLOCTEST9")
+ try:
+ # Connect to the database
+ ldb_url = "tdb://%s" % os.path.join(targetdir, "private/sam.ldb")
+ lp = self.get_loadparm()
+
+ new_ldb = SamDB(ldb_url,
+ session_info=system_session(lp), lp=lp)
+
+ destination_dsa_guid = misc.GUID(new_ldb.get_ntds_GUID())
+
+ repl = drs_utils.drs_Replicate(f'ncacn_ip_tcp:{self.dnsname_dc1}[seal]',
+ lp,
+ self.get_credentials(),
+ new_ldb,
+ destination_dsa_guid)
+
+ source_dsa_invocation_id = misc.GUID(self.ldb_dc1.invocation_id)
+
+ # Add the link on the remote DC
+ self.ldb_dc1.add_remove_group_members(test_group2, [test_user5])
+
+ # Delete the user locally
+ new_ldb.deleteuser(test_user5)
+
+ # Confirm replication copes with a link to a locally deleted user
+ repl.replicate(self.ldb_dc1.domain_dn(),
+ source_dsa_invocation_id,
+ destination_dsa_guid)
+
+ finally:
+ self._test_force_demote(self.dnsname_dc1, "RIDALLOCTEST9")
+ shutil.rmtree(targetdir, ignore_errors=True)
diff --git a/source4/torture/drs/python/samba_tool_drs.py b/source4/torture/drs/python/samba_tool_drs.py
new file mode 100644
index 0000000..8538c30
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs.py
@@ -0,0 +1,417 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for samba-tool drs."""
+
+import samba.tests
+import os
+import ldb
+import drs_base
+
+
+class SambaToolDrsTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+ self.dc2 = samba.tests.env_get_var_value("DC2")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(), creds.get_password())
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+
+ self.rm_files('names.tdb', allow_missing=True)
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns',
+ allow_missing=True)
+
+ super(SambaToolDrsTests, self).tearDown()
+
+ def _get_rootDSE(self, dc, ldap_only=True):
+ samdb = samba.tests.connect_samdb(dc, lp=self.get_loadparm(),
+ credentials=self.get_credentials(),
+ ldap_only=ldap_only)
+ return samdb.search(base="", scope=samba.tests.ldb.SCOPE_BASE)[0]
+
+ def test_samba_tool_bind(self):
+ """Tests 'samba-tool drs bind' command."""
+
+ # Output should be like:
+ # Extensions supported:
+ # <list-of-supported-extensions>
+ # Site GUID: <GUID>
+ # Repl epoch: 0
+ out = self.check_output("samba-tool drs bind %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertTrue("Site GUID:" in out.decode('utf8'))
+ self.assertTrue("Repl epoch:" in out.decode('utf8'))
+
+ def test_samba_tool_kcc(self):
+ """Tests 'samba-tool drs kcc' command."""
+
+ # Output should be like 'Consistency check on <DC> successful.'
+ out = self.check_output("samba-tool drs kcc %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertTrue(b"Consistency check on" in out)
+ self.assertTrue(b"successful" in out)
+
+ def test_samba_tool_options(self):
+ """Tests 'samba-tool drs options' command
+ """
+ # Output should be like 'Current DSA options: IS_GC <OTHER_FLAGS>'
+ out = self.check_output("samba-tool drs options %s %s" % (self.dc1,
+ self.cmdline_creds))
+ self.assertTrue(b"Current DSA options:" in out)
+
+ def test_samba_tool_replicate(self):
+ """Tests 'samba-tool drs replicate' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate %s %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name,
+ self.cmdline_creds))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_async(self):
+ """Tests 'samba-tool drs replicate --async-op' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was started.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate --async-op %s %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name,
+ self.cmdline_creds))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was started" in out)
+
+ def test_samba_tool_replicate_local_online(self):
+ """Tests 'samba-tool drs replicate --local-online' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate --local-online %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_local_online_async(self):
+ """Tests 'samba-tool drs replicate --local-online --async-op' command."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was started.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate --local-online --async-op %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was started" in out)
+
+ def test_samba_tool_replicate_local_machine_creds(self):
+ """Tests 'samba-tool drs replicate --local -P' command (uses machine creds)."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate -P --local %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Incremental" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_local(self):
+ """Tests 'samba-tool drs replicate --local' command (uses machine creds)."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+
+ def get_num_obj_links(output):
+ num_objs = None
+ num_links = None
+ for word in output.decode('utf8').split(" "):
+ try:
+ int(word)
+ if num_objs is None:
+ num_objs = int(word)
+ elif num_links is None:
+ num_links = int(word)
+ except ValueError:
+ pass
+
+ return (num_objs, num_links)
+
+ out = self.check_output("samba-tool drs replicate --local --full-sync %s %s %s %s"
+ % (self.dc1, self.dc2, nc_name, self.cmdline_creds))
+ self.assertTrue(b"was successful" in out)
+ self.assertTrue(b"Full" in out)
+
+ (first_obj, _) = get_num_obj_links(out)
+
+ out = self.check_output("samba-tool drs replicate --local %s %s %s %s"
+ % (self.dc1, self.dc2, nc_name, self.cmdline_creds))
+ self.assertTrue(b"was successful" in out)
+ self.assertTrue(b"Incremental" in out)
+
+ (second_obj, _) = get_num_obj_links(out)
+
+ self.assertTrue(first_obj > second_obj)
+
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_nc_name = server_rootdse["defaultNamingContext"]
+ server_ds_name = server_rootdse["dsServiceName"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ creds = self.get_credentials()
+
+ # We have to give it a different netbiosname every time
+ # it runs, otherwise the collision causes strange issues
+ # to happen. This should be different on different environments.
+ netbiosname = "test" + self.dc2
+ if len(netbiosname) > 15:
+ netbiosname = netbiosname[:15]
+
+ out = self.check_output("samba-tool domain join %s dc --server=%s %s --targetdir=%s --option=netbiosname=%s"
+ % (server_realm, self.dc1, self.cmdline_creds, self.tempdir, netbiosname))
+
+ new_dc_config_file = "%s/etc/smb.conf" % self.tempdir
+
+ self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s"
+ % ("invalid", self.dc1, nc_name,
+ self.cmdline_creds, new_dc_config_file))
+
+ self._disable_inbound_repl(self.dnsname_dc1)
+ self._disable_inbound_repl(self.dnsname_dc2)
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1)
+ self._net_drs_replicate(DC=self.dnsname_dc1, fromDC=self.dnsname_dc2)
+
+ # add an object with link on dc1
+ group_name = "group-repl-local-%s" % self.dc2
+ user_name = "user-repl-local-%s" % self.dc2
+
+ self.check_output("samba-tool group add %s %s -H ldap://%s"
+ % (group_name, self.cmdline_creds, self.dc1))
+ self.check_output("samba-tool user add %s %s --random-password -H ldap://%s"
+ % (user_name, self.cmdline_creds, self.dc1))
+ self.check_output("samba-tool group addmembers %s %s %s -H ldap://%s"
+ % (group_name, user_name, self.cmdline_creds, self.dc1))
+
+ self._net_drs_replicate(DC=self.dnsname_dc2, fromDC=self.dnsname_dc1)
+
+ # pull that change with --local into local db from dc1: should send link and some objects
+ out = self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s"
+ % ("invalid", self.dc1, nc_name,
+ self.cmdline_creds, new_dc_config_file))
+
+ (obj_1, link_1) = get_num_obj_links(out)
+
+ self.assertGreaterEqual(obj_1, 2)
+ self.assertEqual(link_1, 1)
+
+ # pull that change with --local into local db from dc2: shouldn't send link or object
+ # as we sent an up-to-dateness vector showing that we had already synced with DC1
+ out = self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s"
+ % ("invalid", self.dc2, nc_name,
+ self.cmdline_creds, new_dc_config_file))
+
+ (obj_2, link_2) = get_num_obj_links(out)
+
+ self.assertEqual(obj_2, 0)
+ self.assertEqual(link_2, 0)
+
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H ldap://%s %s --configfile=%s"
+ % (netbiosname, self.dc1, self.cmdline_creds, new_dc_config_file))
+
+ def test_samba_tool_replicate_machine_creds_P(self):
+ """Tests 'samba-tool drs replicate -P' command with machine creds."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate -P %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_replicate_machine_creds(self):
+ """Tests 'samba-tool drs replicate' command with implicit machine creds."""
+
+ # Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
+ nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
+ out = self.check_output("samba-tool drs replicate %s %s %s" % (self.dc1,
+ self.dc2,
+ nc_name))
+ self.assertTrue(b"Replicate from" in out)
+ self.assertTrue(b"was successful" in out)
+
+ def test_samba_tool_drs_clone_dc(self):
+ """Tests 'samba-tool drs clone-dc-database' command."""
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_nc_name = server_rootdse["defaultNamingContext"]
+ server_ds_name = server_rootdse["dsServiceName"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ creds = self.get_credentials()
+ out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds,
+ self.tempdir))
+ ldb_rootdse = self._get_rootDSE("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"), ldap_only=False)
+ nc_name = ldb_rootdse["defaultNamingContext"]
+ ds_name = ldb_rootdse["dsServiceName"]
+ ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ self.assertEqual(nc_name, server_nc_name)
+ # The clone should pretend to be the source server
+ self.assertEqual(ds_name, server_ds_name)
+ self.assertEqual(ldap_service_name, server_ldap_service_name)
+
+ samdb = samba.tests.connect_samdb("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
+ ldap_only=False, lp=self.get_loadparm())
+
+ def get_krbtgt_pw():
+ krbtgt_pw = samdb.searchone("unicodePwd", "cn=krbtgt,CN=users,%s" % nc_name)
+ self.assertRaises(KeyError, get_krbtgt_pw)
+
+ server_dn = samdb.searchone("serverReferenceBL", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name)).decode('utf8')
+ ntds_guid = samdb.searchone("objectGUID", "cn=ntds settings,%s" % server_dn).decode('utf8')
+
+ res = samdb.search(base=str(server_nc_name),
+ expression="(&(objectclass=user)(cn=dns-%s))" % (self.dc2),
+ attrs=[], scope=ldb.SCOPE_SUBTREE)
+ if len(res) == 1:
+ dns_obj = res[0]
+ else:
+ dns_obj = None
+
+ # While we have this cloned, try demoting the other server on the clone, by GUID
+ out = self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
+ % (ntds_guid,
+ self.tempdir))
+
+ # Check some of the objects that should have been removed
+ def check_machine_obj():
+ samdb.searchone("CN", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
+ self.assertRaises(ldb.LdbError, check_machine_obj)
+
+ def check_server_obj():
+ samdb.searchone("CN", server_dn)
+ self.assertRaises(ldb.LdbError, check_server_obj)
+
+ def check_ntds_guid():
+ samdb.searchone("CN", "<GUID=%s>" % ntds_guid)
+ self.assertRaises(ldb.LdbError, check_ntds_guid)
+
+ if dns_obj is not None:
+ # Check some of the objects that should have been removed
+ def check_dns_account_obj():
+ samdb.search(base=dns_obj.dn, scope=ldb.SCOPE_BASE,
+ attrs=[])
+ self.assertRaises(ldb.LdbError, check_dns_account_obj)
+
+ def test_samba_tool_drs_clone_dc_secrets(self):
+ """Tests 'samba-tool drs clone-dc-database --include-secrets' command ."""
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_nc_name = server_rootdse["defaultNamingContext"]
+ server_ds_name = server_rootdse["dsServiceName"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ creds = self.get_credentials()
+ out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s --include-secrets"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds,
+ self.tempdir))
+ ldb_rootdse = self._get_rootDSE("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"), ldap_only=False)
+ nc_name = ldb_rootdse["defaultNamingContext"]
+ config_nc_name = ldb_rootdse["configurationNamingContext"]
+ ds_name = ldb_rootdse["dsServiceName"]
+ ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+
+ samdb = samba.tests.connect_samdb("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
+ ldap_only=False, lp=self.get_loadparm())
+ krbtgt_pw = samdb.searchone("unicodePwd", "cn=krbtgt,CN=users,%s" % nc_name)
+ self.assertIsNotNone(krbtgt_pw)
+
+ self.assertEqual(nc_name, server_nc_name)
+ # The clone should pretend to be the source server
+ self.assertEqual(ds_name, server_ds_name)
+ self.assertEqual(ldap_service_name, server_ldap_service_name)
+
+ server_dn = samdb.searchone("serverReferenceBL", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name)).decode('utf8')
+ ntds_guid = samdb.searchone("objectGUID", "cn=ntds settings,%s" % server_dn)
+
+ res = samdb.search(base=str(server_nc_name),
+ expression="(&(objectclass=user)(cn=dns-%s))" % (self.dc2),
+ attrs=[], scope=ldb.SCOPE_SUBTREE)
+ if len(res) == 1:
+ dns_obj = res[0]
+ else:
+ dns_obj = None
+
+ def demote_self():
+ # While we have this cloned, try demoting the other server on the clone
+ out = self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
+ % (self.dc1,
+ self.tempdir))
+ self.assertRaises(samba.tests.BlackboxProcessError, demote_self)
+
+ # While we have this cloned, try demoting the other server on the clone
+ out = self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H ldb://%s/private/sam.ldb"
+ % (self.dc2,
+ self.tempdir))
+
+ # Check some of the objects that should have been removed
+ def check_machine_obj():
+ samdb.searchone("CN", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
+ self.assertRaises(ldb.LdbError, check_machine_obj)
+
+ def check_server_obj():
+ samdb.searchone("CN", server_dn)
+ self.assertRaises(ldb.LdbError, check_server_obj)
+
+ def check_ntds_guid():
+ samdb.searchone("CN", "<GUID=%s>" % ntds_guid)
+ self.assertRaises(ldb.LdbError, check_ntds_guid)
+
+ if dns_obj is not None:
+ # Check some of the objects that should have been removed
+ def check_dns_account_obj():
+ samdb.search(base=dns_obj.dn, scope=ldb.SCOPE_BASE,
+ attrs=[])
+ self.assertRaises(ldb.LdbError, check_dns_account_obj)
+
+ def test_samba_tool_drs_clone_dc_secrets_without_targetdir(self):
+ """Tests 'samba-tool drs clone-dc-database' command without --targetdir."""
+ server_rootdse = self._get_rootDSE(self.dc1)
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ creds = self.get_credentials()
+
+ def attempt_clone():
+ out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds))
+ self.assertRaises(samba.tests.BlackboxProcessError, attempt_clone)
diff --git a/source4/torture/drs/python/samba_tool_drs_critical.py b/source4/torture/drs/python/samba_tool_drs_critical.py
new file mode 100644
index 0000000..47f9e94
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs_critical.py
@@ -0,0 +1,98 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for samba-tool drs."""
+
+import samba.tests
+import os
+import ldb
+import drs_base
+import random
+
+class SambaToolDrsTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+ self.dc2 = samba.tests.env_get_var_value("DC2")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(), creds.get_password())
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self._enable_inbound_repl(self.dnsname_dc2)
+
+ self.rm_files('names.tdb', allow_missing=True)
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns',
+ allow_missing=True)
+
+ super(SambaToolDrsTests, self).tearDown()
+
+ # This test is for the Samba 4.5 emulation servers (but runs
+ # against a normal server as well) that fail to correctly
+ # implement DRSUAPI_DRS_GET_ANC when DRSUAPI_DRS_CRITICAL_ONLY is
+ # set.
+ def test_samba_tool_drs_clone_dc_critical_object_chain(self):
+ """Tests 'samba-tool drs clone-dc-database' command with a Critical/non-critical/critical object chain."""
+
+ samdb = samba.tests.connect_samdb(self.dc1, lp=self.get_loadparm(),
+ credentials=self.get_credentials(),
+ ldap_only=True)
+ server_rootdse = samdb.search(base="",
+ scope=samba.tests.ldb.SCOPE_BASE)[0]
+ nc_name = server_rootdse["defaultNamingContext"][0]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+
+ not_critical_dn = f"OU=not-critical{random.randint(1, 10000000)},{nc_name}"
+ samdb.create_ou(not_critical_dn)
+ self.addCleanup(samdb.delete,
+ not_critical_dn)
+ domain_sid = samdb.get_domain_sid()
+ admin_sid = f"{domain_sid}-500"
+ samdb.rename(f"<SID={admin_sid}>",
+ f"cn=administrator,{not_critical_dn}")
+ self.addCleanup(samdb.rename,
+ f"<SID={admin_sid}>",
+ f"cn=administrator,cn=users,{nc_name}")
+
+ try:
+ out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s"
+ % (server_realm,
+ self.dc1,
+ self.cmdline_creds,
+ self.tempdir))
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling samba-tool: %s" % e)
+
+ local_samdb = samba.tests.connect_samdb("ldb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
+ ldap_only=False, lp=self.get_loadparm())
+
+ # Check administrator was replicated and is in the right place
+ res = local_samdb.search(base=str(nc_name),
+ expression="(&(objectclass=user)(cn=administrator))",
+ attrs=[], scope=ldb.SCOPE_SUBTREE)
+ self.assertEquals(len(res), 1)
+
+ admin_obj = res[0]
+
+ self.assertEquals(admin_obj.dn, ldb.Dn(samdb, f"cn=administrator,{not_critical_dn}"))
diff --git a/source4/torture/drs/python/samba_tool_drs_no_dns.py b/source4/torture/drs/python/samba_tool_drs_no_dns.py
new file mode 100644
index 0000000..fb1551a
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs_no_dns.py
@@ -0,0 +1,175 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+# Copyright (C) Catalyst.Net Ltd 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Blackbox tests for samba-tool drs with no DNS partitions
+
+Adapted from samba_tool_drs.py
+"""
+
+import samba.tests
+import os
+import ldb
+import drs_base
+
+from samba.tests import BlackboxProcessError
+from samba.common import get_string
+
+
+class SambaToolDrsNoDnsTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsNoDnsTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(), creds.get_password())
+
+ def tearDown(self):
+ self._enable_inbound_repl(self.dnsname_dc1)
+ self.rm_files('names.tdb', allow_missing=True)
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns',
+ allow_missing=True)
+
+ super(SambaToolDrsNoDnsTests, self).tearDown()
+
+ def _get_rootDSE(self, dc, ldap_only=True):
+ samdb = samba.tests.connect_samdb(dc, lp=self.get_loadparm(),
+ credentials=self.get_credentials(),
+ ldap_only=ldap_only)
+ return samdb.search(base="", scope=samba.tests.ldb.SCOPE_BASE)[0], samdb
+
+ def test_samba_tool_replicate_local_no_dns_tdb(self):
+ self.backend = 'tdb'
+ self._test_samba_tool_replicate_local_no_dns()
+
+ def test_samba_tool_replicate_local_no_dns_mdb(self):
+ self.backend = 'mdb'
+ self._test_samba_tool_replicate_local_no_dns()
+
+ def _test_samba_tool_replicate_local_no_dns(self):
+ """Check we can provision a database without DNS partitions
+ (and then add them afterwards)."""
+
+ server_rootdse, _ = self._get_rootDSE(self.dc1)
+ nc_name = server_rootdse["defaultNamingContext"]
+ server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
+ server_realm = server_ldap_service_name.split(":")[0]
+ creds = self.get_credentials()
+
+ # We have to give it a different netbiosname every time
+ # it runs, otherwise the collision causes strange issues
+ # to happen. This should be different on different environments.
+ netbiosname = "dns" + self.backend + self.dc1
+ if len(netbiosname) > 15:
+ netbiosname = netbiosname[:15]
+
+ out = self.check_output("samba-tool domain join %s dc --server=%s %s --targetdir=%s --option=netbiosname=%s %s --backend-store=%s"
+ % (server_realm, self.dc1, self.cmdline_creds,
+ self.tempdir, netbiosname,
+ "--dns-backend=NONE",
+ self.backend))
+
+ new_dc_config_file = os.path.join(self.tempdir, "etc", "smb.conf")
+ new_dc_sam = os.path.join(self.tempdir, "private", "sam.ldb")
+
+ forestdns_dn = ldb.binary_encode('DC=ForestDNSZones,' + str(nc_name))
+ domaindns_dn = ldb.binary_encode('DC=DomainDNSZones,' + str(nc_name))
+
+ self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s --full-sync"
+ % ("invalid", self.dc1, forestdns_dn,
+ self.cmdline_creds, new_dc_config_file))
+
+ self.check_output("samba-tool drs replicate --local %s %s %s %s --configfile=%s --full-sync"
+ % ("invalid", self.dc1, domaindns_dn,
+ self.cmdline_creds, new_dc_config_file))
+
+ server_rootdse, samdb = self._get_rootDSE("ldb://" + new_dc_sam, ldap_only=False)
+ server_ds_name = ldb.binary_encode(server_rootdse["dsServiceName"][0].decode('utf-8'))
+
+ # Show that Has-Master-NCs is fixed by samba_upgradedns
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % forestdns_dn)
+ self.assertEqual(len(res), 0)
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % domaindns_dn)
+ self.assertEqual(len(res), 0)
+
+ self.check_output("samba_upgradedns --configfile=%s" % (new_dc_config_file))
+
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % forestdns_dn)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % domaindns_dn)
+ self.assertEqual(len(res), 1)
+
+ # Show that replica locations is fixed by dbcheck
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, forestdns_dn))
+ self.assertEqual(len(res), 0)
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, domaindns_dn))
+ self.assertEqual(len(res), 0)
+
+ try:
+ # This fixes any forward-link-backward-link issues with the tools
+ self.check_output("samba-tool dbcheck --configfile=%s --cross-ncs --fix --yes" % (new_dc_config_file))
+ except BlackboxProcessError as e:
+ self.assertTrue("Checked " in get_string(e.stdout))
+
+ self.check_output("samba-tool dbcheck --configfile=%s --cross-ncs" % (new_dc_config_file))
+
+ # Compare the two directories
+ self.check_output("samba-tool ldapcmp ldap://%s ldb://%s %s --filter=%s" %
+ (self.dc1, new_dc_sam, self.cmdline_creds,
+ "msDs-masteredBy,msDS-NC-Replica-Locations,msDS-hasMasterNCs"))
+
+ # Check all ForestDNS connections and backlinks
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % forestdns_dn)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(base=forestdns_dn,
+ expression="(msds-masteredby=%s)" % server_ds_name)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, forestdns_dn))
+ self.assertEqual(len(res), 1)
+
+ # Check all DomainDNS connections and backlinks
+ res = samdb.search(base=server_ds_name,
+ expression="(msds-hasmasterncs=%s)" % domaindns_dn)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(base=domaindns_dn,
+ expression="(msds-masteredby=%s)" % server_ds_name)
+ self.assertEqual(len(res), 1)
+ res = samdb.search(controls=["search_options:1:2"],
+ expression="(&(msds-nc-replica-locations=%s)(ncname=%s))"
+ % (server_ds_name, domaindns_dn))
+ self.assertEqual(len(res), 1)
+
+ # Demote the DC we created in the test
+ self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H ldap://%s %s --configfile=%s"
+ % (netbiosname, self.dc1, self.cmdline_creds, new_dc_config_file))
diff --git a/source4/torture/drs/python/samba_tool_drs_showrepl.py b/source4/torture/drs/python/samba_tool_drs_showrepl.py
new file mode 100644
index 0000000..707b1fb
--- /dev/null
+++ b/source4/torture/drs/python/samba_tool_drs_showrepl.py
@@ -0,0 +1,333 @@
+# Blackbox tests for "samba-tool drs" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for samba-tool drs showrepl."""
+import samba.tests
+import drs_base
+from samba.dcerpc import drsuapi
+from samba import drs_utils
+import re
+import json
+import ldb
+import random
+from samba.common import get_string
+
+GUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
+HEX8_RE = r'0x[\da-f]{8}'
+DN_RE = r'(?:(?:CN|DC)=[\\:\w -]+,)+DC=com'
+
+
+class SambaToolDrsShowReplTests(drs_base.DrsBaseTestCase):
+ """Blackbox test case for samba-tool drs."""
+
+ def setUp(self):
+ super(SambaToolDrsShowReplTests, self).setUp()
+
+ self.dc1 = samba.tests.env_get_var_value("DC1")
+ self.dc2 = samba.tests.env_get_var_value("DC2")
+
+ creds = self.get_credentials()
+ self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
+ creds.get_username(),
+ creds.get_password())
+
+ def test_samba_tool_showrepl(self):
+ """Tests 'samba-tool drs showrepl' command.
+ """
+ # Output should be like:
+ # <site-name>/<domain-name>
+ # DSA Options: <hex-options>
+ # DSA object GUID: <DSA-object-GUID>
+ # DSA invocationId: <DSA-invocationId>
+ # <Inbound-connections-list>
+ # <Outbound-connections-list>
+ # <KCC-objects>
+ # ...
+ # TODO: Perhaps we should check at least for
+ # DSA's objectGUDI and invocationId
+ out = self.check_output("samba-tool drs showrepl "
+ "%s %s" % (self.dc1, self.cmdline_creds))
+
+ out = get_string(out)
+ # We want to assert that we are getting the same results, but
+ # dates and GUIDs change randomly.
+ #
+ # There are sections with headers like ==== THIS ===="
+ (header,
+ _inbound, inbound,
+ _outbound, outbound,
+ _conn, conn) = out.split("====")
+
+ self.assertEqual(_inbound, ' INBOUND NEIGHBORS ')
+ self.assertEqual(_outbound, ' OUTBOUND NEIGHBORS ')
+ self.assertEqual(_conn, ' KCC CONNECTION OBJECTS ')
+
+ self.assertRegexpMatches(header,
+ r'^Default-First-Site-Name\\LOCALDC\s+'
+ r"DSA Options: %s\s+"
+ r"DSA object GUID: %s\s+"
+ r"DSA invocationId: %s" %
+ (HEX8_RE, GUID_RE, GUID_RE))
+
+ # We don't assert the DomainDnsZones and ForestDnsZones are
+ # there because we don't know that they have been set up yet.
+
+ for p in ['CN=Configuration,DC=samba,DC=example,DC=com',
+ 'DC=samba,DC=example,DC=com',
+ 'CN=Schema,CN=Configuration,DC=samba,DC=example,DC=com']:
+ self.assertRegexpMatches(
+ inbound,
+ r'%s\n'
+ r'\tDefault-First-Site-Name\\[A-Z]+ via RPC\n'
+ r'\t\tDSA object GUID: %s\n'
+ r'\t\tLast attempt @ [^\n]+\n'
+ r'\t\t\d+ consecutive failure\(s\).\n'
+ r'\t\tLast success @ [^\n]+\n'
+ r'\n' % (p, GUID_RE),
+ msg="%s inbound missing" % p)
+
+ self.assertRegexpMatches(
+ outbound,
+ r'%s\n'
+ r'\tDefault-First-Site-Name\\[A-Z]+ via RPC\n'
+ r'\t\tDSA object GUID: %s\n'
+ r'\t\tLast attempt @ [^\n]+\n'
+ r'\t\t\d+ consecutive failure\(s\).\n'
+ r'\t\tLast success @ [^\n]+\n'
+ r'\n' % (p, GUID_RE),
+ msg="%s outbound missing" % p)
+
+ self.assertRegexpMatches(conn,
+ r'Connection --\n'
+ r'\tConnection name: %s\n'
+ r'\tEnabled : TRUE\n'
+ r'\tServer DNS name : \w+.samba.example.com\n'
+ r'\tServer DN name : %s'
+ r'\n' % (GUID_RE, DN_RE))
+
+ def test_samba_tool_showrepl_json(self):
+ """Tests 'samba-tool drs showrepl --json' command.
+ """
+ out = self.check_output("samba-tool drs showrepl %s %s --json" %
+ (self.dc1, self.cmdline_creds))
+ d = json.loads(get_string(out))
+ self.assertEqual(set(d), set(['repsFrom',
+ 'repsTo',
+ "NTDSConnections",
+ "dsa"]))
+
+ # dsa
+ for k in ["objectGUID", "invocationId"]:
+ self.assertRegexpMatches(d['dsa'][k], '^%s$' % GUID_RE)
+ self.assertTrue(isinstance(d['dsa']["options"], int))
+
+ # repsfrom and repsto
+ for reps in (d['repsFrom'], d['repsTo']):
+ for r in reps:
+ for k in ('NC dn', "NTDS DN"):
+ self.assertRegexpMatches(r[k], '^%s$' % DN_RE)
+ for k in ("last attempt time",
+ "last attempt message",
+ "last success"):
+ self.assertTrue(isinstance(r[k], str))
+ self.assertRegexpMatches(r["DSA objectGUID"], '^%s$' % GUID_RE)
+ self.assertTrue(isinstance(r["consecutive failures"], int))
+
+ # ntdsconnection
+ for n in d["NTDSConnections"]:
+ self.assertRegexpMatches(n["dns name"],
+ r'^[\w]+\.samba\.example\.com$')
+ self.assertRegexpMatches(n["name"], "^%s$" % GUID_RE)
+ self.assertTrue(isinstance(n['enabled'], bool))
+ self.assertTrue(isinstance(n['options'], int))
+ self.assertTrue(isinstance(n['replicates NC'], list))
+ self.assertRegexpMatches(n["remote DN"], "^%s$" % DN_RE)
+
+ def _force_all_reps(self, samdb, dc, direction):
+ if direction == 'inbound':
+ info_type = drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS
+ elif direction == 'outbound':
+ info_type = drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO
+ else:
+ raise ValueError("expected 'inbound' or 'outbound'")
+
+ self._enable_all_repl(dc)
+ lp = self.get_loadparm()
+ creds = self.get_credentials()
+ drsuapi_conn, drsuapi_handle, _ = drs_utils.drsuapi_connect(dc, lp, creds)
+ req1 = drsuapi.DsReplicaGetInfoRequest1()
+ req1.info_type = info_type
+ _, info = drsuapi_conn.DsReplicaGetInfo(drsuapi_handle, 1, req1)
+ for x in info.array:
+ # you might think x.source_dsa_address was the thing, but no.
+ # and we need to filter out RODCs and deleted DCs
+
+ res = []
+ try:
+ res = samdb.search(base=x.source_dsa_obj_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-isRODC', 'isDeleted'],
+ controls=['show_deleted:0'])
+ except ldb.LdbError as e:
+ if e.args[0] != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ if (len(res) == 0 or
+ len(res[0].get('msDS-isRODC', '')) > 0 or
+ res[0]['isDeleted'] == 'TRUE'):
+ continue
+
+ dsa_dn = str(ldb.Dn(samdb, x.source_dsa_obj_dn).parent())
+ try:
+ res = samdb.search(base=dsa_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ except ldb.LdbError as e:
+ if e.args[0] != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ continue
+
+ if len(res) == 0:
+ print("server %s has no dNSHostName" % dsa_dn)
+ continue
+
+ remote = res[0].get('dNSHostName', [''])[0]
+ if remote:
+ self._enable_all_repl(remote)
+
+ if direction == 'inbound':
+ src, dest = remote, dc
+ else:
+ src, dest = dc, remote
+ self._net_drs_replicate(dest, src, forced=True)
+
+ def test_samba_tool_showrepl_pull_summary_all_good(self):
+ """Tests 'samba-tool drs showrepl --pull-summary' command."""
+ # To be sure that all is good we need to force replication
+ # with everyone (because others might have it turned off), and
+ # turn replication on for them in case they suddenly decide to
+ # try again.
+ #
+ # We don't restore them to the non-auto-replication state.
+ samdb1 = self.getSamDB("-H", "ldap://%s" % self.dc1, "-U",
+ self.cmdline_creds)
+ self._enable_all_repl(self.dc1)
+ self._force_all_reps(samdb1, self.dc1, 'inbound')
+ self._force_all_reps(samdb1, self.dc1, 'outbound')
+ try:
+ out = self.check_output(
+ "samba-tool drs showrepl --pull-summary %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, "[ALL GOOD]\n")
+ out = get_string(out)
+
+ out = self.check_output("samba-tool drs showrepl --pull-summary "
+ "--color=yes %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, "\033[1;32m[ALL GOOD]\033[0m\n")
+
+ # --verbose output is still quiet when all is good.
+ out = self.check_output(
+ "samba-tool drs showrepl --pull-summary -v %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual(out, "[ALL GOOD]\n")
+ out = self.check_output("samba-tool drs showrepl --pull-summary -v "
+ "--color=yes %s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+
+ except samba.tests.BlackboxProcessError as e:
+ self.fail(str(e))
+
+ self.assertStringsEqual(out, "\033[1;32m[ALL GOOD]\033[0m\n")
+
+ def test_samba_tool_showrepl_summary_forced_failure(self):
+ """Tests 'samba-tool drs showrepl --summary' command when we break the
+ network on purpose.
+ """
+ self.addCleanup(self._enable_all_repl, self.dc1)
+ self._disable_all_repl(self.dc1)
+
+ samdb1 = self.getSamDB("-H", "ldap://%s" % self.dc1, "-U",
+ self.cmdline_creds)
+ samdb2 = self.getSamDB("-H", "ldap://%s" % self.dc2, "-U",
+ self.cmdline_creds)
+ domain_dn = samdb1.domain_dn()
+
+ # Add some things to NOT replicate
+ ou1 = "OU=dc1.%x,%s" % (random.randrange(1 << 64), domain_dn)
+ ou2 = "OU=dc2.%x,%s" % (random.randrange(1 << 64), domain_dn)
+ samdb1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ self.addCleanup(samdb1.delete, ou1, ['tree_delete:1'])
+ samdb2.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+ self.addCleanup(samdb2.delete, ou2, ['tree_delete:1'])
+
+ dn1 = 'cn=u1.%%d,%s' % (ou1)
+ dn2 = 'cn=u2.%%d,%s' % (ou2)
+
+ try:
+ for i in range(100):
+ samdb1.add({
+ "dn": dn1 % i,
+ "objectclass": "user"
+ })
+ samdb2.add({
+ "dn": dn2 % i,
+ "objectclass": "user"
+ })
+ out = self.check_output("samba-tool drs showrepl --summary -v "
+ "%s %s" %
+ (self.dc1, self.cmdline_creds))
+ out = get_string(out)
+ self.assertStringsEqual('[ALL GOOD]', out, strip=True)
+ out = self.check_output("samba-tool drs showrepl --summary -v "
+ "--color=yes %s %s" %
+ (self.dc2, self.cmdline_creds))
+ out = get_string(out)
+ self.assertIn('[ALL GOOD]', out)
+
+ except samba.tests.BlackboxProcessError as e:
+ e_stdout = get_string(e.stdout)
+ e_stderr = get_string(e.stderr)
+ print("Good, failed as expected after %d rounds: %r" % (i, e.cmd))
+ self.assertIn('There are failing connections', e_stdout,
+ msg=('stdout: %r\nstderr: %r\nretcode: %s'
+ '\nmessage: %r\ncmd: %r') % (e_stdout,
+ e_stderr,
+ e.returncode,
+ e.msg,
+ e.cmd))
+ self.assertRegexpMatches(
+ e_stdout,
+ r'result 845[67] '
+ r'\(WERR_DS_DRA_(SINK|SOURCE)_DISABLED\)',
+ msg=("The process should have failed "
+ "because replication was forced off, "
+ "but it failed for some other reason."))
+ self.assertIn('consecutive failure(s).', e_stdout)
+ else:
+ self.fail("No DRS failure noticed after 100 rounds of trying")