summaryrefslogtreecommitdiffstats
path: root/source4/dsdb/repl
diff options
context:
space:
mode:
Diffstat (limited to 'source4/dsdb/repl')
-rw-r--r--source4/dsdb/repl/drepl_extended.c211
-rw-r--r--source4/dsdb/repl/drepl_fsmo.c147
-rw-r--r--source4/dsdb/repl/drepl_notify.c485
-rw-r--r--source4/dsdb/repl/drepl_out_helpers.c1356
-rw-r--r--source4/dsdb/repl/drepl_out_helpers.h26
-rw-r--r--source4/dsdb/repl/drepl_out_pull.c260
-rw-r--r--source4/dsdb/repl/drepl_partitions.c651
-rw-r--r--source4/dsdb/repl/drepl_periodic.c157
-rw-r--r--source4/dsdb/repl/drepl_replica.c62
-rw-r--r--source4/dsdb/repl/drepl_ridalloc.c265
-rw-r--r--source4/dsdb/repl/drepl_secret.c146
-rw-r--r--source4/dsdb/repl/drepl_service.c545
-rw-r--r--source4/dsdb/repl/drepl_service.h251
-rw-r--r--source4/dsdb/repl/replicated_objects.c1283
14 files changed, 5845 insertions, 0 deletions
diff --git a/source4/dsdb/repl/drepl_extended.c b/source4/dsdb/repl/drepl_extended.c
new file mode 100644
index 0000000..8b5bb6f
--- /dev/null
+++ b/source4/dsdb/repl/drepl_extended.c
@@ -0,0 +1,211 @@
+/*
+ Unix SMB/CIFS Implementation.
+
+ DSDB replication service - extended operation code
+
+ Copyright (C) Andrew Tridgell 2010
+ Copyright (C) Andrew Bartlett 2010
+ Copyright (C) Nadezhda Ivanova 2010
+
+ based on drepl_notify.c
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "ldb_module.h"
+#include "dsdb/samdb/samdb.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include "param/param.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+
+/*
+ create the role owner source dsa structure
+
+ nc_dn: the DN of the subtree being replicated
+ source_dsa_dn: the DN of the server that we are replicating from
+ */
+static WERROR drepl_create_extended_source_dsa(struct dreplsrv_service *service,
+ TALLOC_CTX *mem_ctx,
+ struct ldb_dn *nc_dn,
+ struct ldb_dn *source_dsa_dn,
+ uint64_t min_usn,
+ struct dreplsrv_partition_source_dsa **_sdsa)
+{
+ struct dreplsrv_partition_source_dsa *sdsa;
+ struct ldb_context *ldb = service->samdb;
+ int ret;
+ WERROR werr;
+ struct ldb_dn *nc_root;
+ struct dreplsrv_partition *p;
+
+ sdsa = talloc_zero(service, struct dreplsrv_partition_source_dsa);
+ W_ERROR_HAVE_NO_MEMORY(sdsa);
+
+ sdsa->partition = talloc_zero(sdsa, struct dreplsrv_partition);
+ if (!sdsa->partition) {
+ talloc_free(sdsa);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ sdsa->partition->dn = ldb_dn_copy(sdsa->partition, nc_dn);
+ if (!sdsa->partition->dn) {
+ talloc_free(sdsa);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+ sdsa->partition->nc.dn = ldb_dn_alloc_linearized(sdsa->partition, nc_dn);
+ if (!sdsa->partition->nc.dn) {
+ talloc_free(sdsa);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+ ret = dsdb_find_guid_by_dn(ldb, nc_dn, &sdsa->partition->nc.guid);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find GUID for %s\n",
+ ldb_dn_get_linearized(nc_dn)));
+ talloc_free(sdsa);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ sdsa->repsFrom1 = &sdsa->_repsFromBlob.ctr.ctr1;
+ ret = dsdb_find_guid_by_dn(ldb, source_dsa_dn, &sdsa->repsFrom1->source_dsa_obj_guid);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find objectGUID for %s\n",
+ ldb_dn_get_linearized(source_dsa_dn)));
+ talloc_free(sdsa);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ sdsa->repsFrom1->other_info = talloc_zero(sdsa, struct repsFromTo1OtherInfo);
+ if (!sdsa->repsFrom1->other_info) {
+ talloc_free(sdsa);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ sdsa->repsFrom1->other_info->dns_name = samdb_ntds_msdcs_dns_name(ldb,
+ sdsa->repsFrom1->other_info,
+ &sdsa->repsFrom1->source_dsa_obj_guid);
+ if (!sdsa->repsFrom1->other_info->dns_name) {
+ talloc_free(sdsa);
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ werr = dreplsrv_out_connection_attach(service, sdsa->repsFrom1, &sdsa->conn);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,(__location__ ": Failed to attach connection to %s\n",
+ ldb_dn_get_linearized(nc_dn)));
+ talloc_free(sdsa);
+ return werr;
+ }
+
+ ret = dsdb_find_nc_root(service->samdb, sdsa, nc_dn, &nc_root);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find nc_root for %s\n",
+ ldb_dn_get_linearized(nc_dn)));
+ talloc_free(sdsa);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ /* use the partition uptodateness vector */
+ ret = dsdb_load_udv_v2(service->samdb, nc_root, sdsa->partition,
+ &sdsa->partition->uptodatevector.cursors,
+ &sdsa->partition->uptodatevector.count);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to load UDV for %s\n",
+ ldb_dn_get_linearized(nc_root)));
+ talloc_free(sdsa);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ /* find the highwatermark from the partitions list */
+ for (p=service->partitions; p; p=p->next) {
+ if (ldb_dn_compare(p->dn, nc_root) == 0) {
+ struct dreplsrv_partition_source_dsa *s;
+ werr = dreplsrv_partition_source_dsa_by_guid(p,
+ &sdsa->repsFrom1->source_dsa_obj_guid,
+ &s);
+ if (W_ERROR_IS_OK(werr)) {
+ sdsa->repsFrom1->highwatermark = s->repsFrom1->highwatermark;
+ sdsa->repsFrom1->replica_flags = s->repsFrom1->replica_flags;
+ }
+ }
+ }
+
+ if (!service->am_rodc) {
+ sdsa->repsFrom1->replica_flags |= DRSUAPI_DRS_WRIT_REP;
+ }
+
+ *_sdsa = sdsa;
+ return WERR_OK;
+}
+
+struct extended_op_data {
+ dreplsrv_extended_callback_t callback;
+ void *callback_data;
+ struct dreplsrv_partition_source_dsa *sdsa;
+};
+
+/*
+ called when an extended op finishes
+ */
+static void extended_op_callback(struct dreplsrv_service *service,
+ WERROR err,
+ enum drsuapi_DsExtendedError exop_error,
+ void *cb_data)
+{
+ struct extended_op_data *data = talloc_get_type_abort(cb_data, struct extended_op_data);
+ talloc_unlink(data, data->sdsa);
+ data->callback(service, err, exop_error, data->callback_data);
+ talloc_free(data);
+}
+
+/*
+ schedule a getncchanges request to the role owner for an extended operation
+ */
+WERROR drepl_request_extended_op(struct dreplsrv_service *service,
+ struct ldb_dn *nc_dn,
+ struct ldb_dn *source_dsa_dn,
+ enum drsuapi_DsExtendedOperation extended_op,
+ uint64_t fsmo_info,
+ uint64_t min_usn,
+ dreplsrv_extended_callback_t callback,
+ void *callback_data)
+{
+ WERROR werr;
+ struct extended_op_data *data;
+
+ data = talloc(service, struct extended_op_data);
+ W_ERROR_HAVE_NO_MEMORY(data);
+
+ werr = drepl_create_extended_source_dsa(service, data, nc_dn, source_dsa_dn, min_usn, &data->sdsa);
+ W_ERROR_NOT_OK_RETURN(werr);
+
+ data->callback = callback;
+ data->callback_data = callback_data;
+
+ werr = dreplsrv_schedule_partition_pull_source(service, data->sdsa,
+ 0, extended_op, fsmo_info,
+ extended_op_callback, data);
+ if (!W_ERROR_IS_OK(werr)) {
+ talloc_free(data);
+ }
+
+ dreplsrv_run_pending_ops(service);
+
+ return werr;
+}
diff --git a/source4/dsdb/repl/drepl_fsmo.c b/source4/dsdb/repl/drepl_fsmo.c
new file mode 100644
index 0000000..3c3cbad
--- /dev/null
+++ b/source4/dsdb/repl/drepl_fsmo.c
@@ -0,0 +1,147 @@
+/*
+ Unix SMB/CIFS Implementation.
+
+ DSDB replication service - FSMO role change
+
+ Copyright (C) Nadezhda Ivanova 2010
+ Copyright (C) Andrew Tridgell 2010
+ Copyright (C) Andrew Bartlett 2010
+ Copyright (C) Anatoliy Atanasov 2010
+
+ based on drepl_ridalloc.c
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "dsdb/samdb/samdb.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include "param/param.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+struct fsmo_role_state {
+ struct irpc_message *msg;
+ struct drepl_takeFSMORole *r;
+};
+
+static void drepl_role_callback(struct dreplsrv_service *service,
+ WERROR werr,
+ enum drsuapi_DsExtendedError ext_err,
+ void *cb_data)
+{
+ struct fsmo_role_state *fsmo = talloc_get_type_abort(cb_data, struct fsmo_role_state);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(2,(__location__ ": Failed role transfer - %s - extended_ret[0x%X]\n",
+ win_errstr(werr), ext_err));
+ } else {
+ DEBUG(2,(__location__ ": Successful role transfer\n"));
+ }
+ fsmo->r->out.result = werr;
+ irpc_send_reply(fsmo->msg, NT_STATUS_OK);
+}
+
+/*
+ see which role is we are asked to assume, initialize data and send request
+ */
+NTSTATUS drepl_take_FSMO_role(struct irpc_message *msg,
+ struct drepl_takeFSMORole *r)
+{
+ struct dreplsrv_service *service = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+ struct ldb_dn *role_owner_dn, *fsmo_role_dn;
+ TALLOC_CTX *tmp_ctx = talloc_new(service);
+ uint64_t fsmo_info = 0;
+ enum drsuapi_DsExtendedOperation extended_op = DRSUAPI_EXOP_NONE;
+ WERROR werr;
+ enum drepl_role_master role = r->in.role;
+ struct fsmo_role_state *fsmo;
+ bool is_us;
+ int ret;
+
+ werr = dsdb_get_fsmo_role_info(tmp_ctx, service->samdb, role,
+ &fsmo_role_dn, &role_owner_dn);
+ if (!W_ERROR_IS_OK(werr)) {
+ talloc_free(tmp_ctx);
+ r->out.result = werr;
+ return NT_STATUS_OK;
+ }
+
+ switch (role) {
+ case DREPL_NAMING_MASTER:
+ case DREPL_INFRASTRUCTURE_MASTER:
+ case DREPL_SCHEMA_MASTER:
+ extended_op = DRSUAPI_EXOP_FSMO_REQ_ROLE;
+ break;
+ case DREPL_RID_MASTER:
+ extended_op = DRSUAPI_EXOP_FSMO_RID_REQ_ROLE;
+ break;
+ case DREPL_PDC_MASTER:
+ extended_op = DRSUAPI_EXOP_FSMO_REQ_PDC;
+ break;
+ default:
+ DEBUG(0,("Unknown role %u in role transfer\n",
+ (unsigned)role));
+ /* IRPC messages are trusted, so this really should not happen */
+ smb_panic("Unknown role despite dsdb_get_fsmo_role_info success");
+ }
+
+ ret = samdb_dn_is_our_ntdsa(service->samdb, role_owner_dn, &is_us);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,("FSMO role check failed (failed to confirm if our ntdsDsa) for DN %s and owner %s \n",
+ ldb_dn_get_linearized(fsmo_role_dn),
+ ldb_dn_get_linearized(role_owner_dn)));
+ talloc_free(tmp_ctx);
+ r->out.result = WERR_DS_DRA_INTERNAL_ERROR;
+ return NT_STATUS_OK;
+ }
+
+ if (is_us) {
+ DEBUG(5,("FSMO role check failed, we already own DN %s with %s\n",
+ ldb_dn_get_linearized(fsmo_role_dn),
+ ldb_dn_get_linearized(role_owner_dn)));
+ r->out.result = WERR_OK;
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ fsmo = talloc(msg, struct fsmo_role_state);
+ NT_STATUS_HAVE_NO_MEMORY(fsmo);
+
+ fsmo->msg = msg;
+ fsmo->r = r;
+
+ werr = drepl_request_extended_op(service,
+ fsmo_role_dn,
+ role_owner_dn,
+ extended_op,
+ fsmo_info,
+ 0,
+ drepl_role_callback,
+ fsmo);
+ if (!W_ERROR_IS_OK(werr)) {
+ r->out.result = werr;
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ /* mark this message to be answered later */
+ msg->defer_reply = true;
+ dreplsrv_run_pending_ops(service);
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+}
diff --git a/source4/dsdb/repl/drepl_notify.c b/source4/dsdb/repl/drepl_notify.c
new file mode 100644
index 0000000..20be3b5
--- /dev/null
+++ b/source4/dsdb/repl/drepl_notify.c
@@ -0,0 +1,485 @@
+/*
+ Unix SMB/CIFS Implementation.
+
+ DSDB replication service periodic notification handling
+
+ Copyright (C) Andrew Tridgell 2009
+ based on drepl_periodic
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "lib/events/events.h"
+#include "dsdb/samdb/samdb.h"
+#include "auth/auth.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "libcli/composite/composite.h"
+#include "../lib/util/tevent_ntstatus.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+
+struct dreplsrv_op_notify_state {
+ struct tevent_context *ev;
+ struct dreplsrv_notify_operation *op;
+ void *ndr_struct_ptr;
+};
+
+static void dreplsrv_op_notify_connect_done(struct tevent_req *subreq);
+
+/*
+ start the ReplicaSync async call
+ */
+static struct tevent_req *dreplsrv_op_notify_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct dreplsrv_notify_operation *op)
+{
+ struct tevent_req *req;
+ struct dreplsrv_op_notify_state *state;
+ struct tevent_req *subreq;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct dreplsrv_op_notify_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->ev = ev;
+ state->op = op;
+
+ subreq = dreplsrv_out_drsuapi_send(state,
+ ev,
+ op->source_dsa->conn);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, dreplsrv_op_notify_connect_done, req);
+
+ return req;
+}
+
+static void dreplsrv_op_notify_replica_sync_trigger(struct tevent_req *req);
+
+static void dreplsrv_op_notify_connect_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ NTSTATUS status;
+
+ status = dreplsrv_out_drsuapi_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ dreplsrv_op_notify_replica_sync_trigger(req);
+}
+
+static void dreplsrv_op_notify_replica_sync_done(struct tevent_req *subreq);
+
+static void dreplsrv_op_notify_replica_sync_trigger(struct tevent_req *req)
+{
+ struct dreplsrv_op_notify_state *state =
+ tevent_req_data(req,
+ struct dreplsrv_op_notify_state);
+ struct dreplsrv_partition *partition = state->op->source_dsa->partition;
+ struct dreplsrv_drsuapi_connection *drsuapi = state->op->source_dsa->conn->drsuapi;
+ struct drsuapi_DsReplicaSync *r;
+ struct tevent_req *subreq;
+
+ r = talloc_zero(state, struct drsuapi_DsReplicaSync);
+ if (tevent_req_nomem(r, req)) {
+ return;
+ }
+ r->in.req = talloc_zero(r, union drsuapi_DsReplicaSyncRequest);
+ if (tevent_req_nomem(r, req)) {
+ return;
+ }
+ r->in.bind_handle = &drsuapi->bind_handle;
+ r->in.level = 1;
+ r->in.req->req1.naming_context = &partition->nc;
+ r->in.req->req1.source_dsa_guid = state->op->service->ntds_guid;
+ r->in.req->req1.options =
+ DRSUAPI_DRS_ASYNC_OP |
+ DRSUAPI_DRS_UPDATE_NOTIFICATION |
+ DRSUAPI_DRS_WRIT_REP;
+
+ if (state->op->is_urgent) {
+ r->in.req->req1.options |= DRSUAPI_DRS_SYNC_URGENT;
+ }
+
+ state->ndr_struct_ptr = r;
+
+ if (DEBUGLVL(10)) {
+ NDR_PRINT_IN_DEBUG(drsuapi_DsReplicaSync, r);
+ }
+
+ subreq = dcerpc_drsuapi_DsReplicaSync_r_send(state,
+ state->ev,
+ drsuapi->drsuapi_handle,
+ r);
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq, dreplsrv_op_notify_replica_sync_done, req);
+}
+
+static void dreplsrv_op_notify_replica_sync_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req =
+ tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct dreplsrv_op_notify_state *state =
+ tevent_req_data(req,
+ struct dreplsrv_op_notify_state);
+ struct drsuapi_DsReplicaSync *r = talloc_get_type(state->ndr_struct_ptr,
+ struct drsuapi_DsReplicaSync);
+ NTSTATUS status;
+
+ state->ndr_struct_ptr = NULL;
+
+ status = dcerpc_drsuapi_DsReplicaSync_r_recv(subreq, r);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ if (!W_ERROR_IS_OK(r->out.result)) {
+ status = werror_to_ntstatus(r->out.result);
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ tevent_req_done(req);
+}
+
+static NTSTATUS dreplsrv_op_notify_recv(struct tevent_req *req)
+{
+ return tevent_req_simple_recv_ntstatus(req);
+}
+
+/*
+ called when a notify operation has completed
+ */
+static void dreplsrv_notify_op_callback(struct tevent_req *subreq)
+{
+ struct dreplsrv_notify_operation *op =
+ tevent_req_callback_data(subreq,
+ struct dreplsrv_notify_operation);
+ NTSTATUS status;
+ struct dreplsrv_service *s = op->service;
+ WERROR werr;
+
+ status = dreplsrv_op_notify_recv(subreq);
+ werr = ntstatus_to_werror(status);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_INFO("dreplsrv_notify: Failed to send DsReplicaSync to %s for %s - %s : %s\n",
+ op->source_dsa->repsFrom1->other_info->dns_name,
+ ldb_dn_get_linearized(op->source_dsa->partition->dn),
+ nt_errstr(status), win_errstr(werr));
+ } else {
+ DBG_INFO("dreplsrv_notify: DsReplicaSync successfully sent to %s\n",
+ op->source_dsa->repsFrom1->other_info->dns_name);
+ op->source_dsa->notify_uSN = op->uSN;
+ }
+
+ drepl_reps_update(s, "repsTo", op->source_dsa->partition->dn,
+ &op->source_dsa->repsFrom1->source_dsa_obj_guid,
+ werr);
+
+ talloc_free(op);
+ s->ops.n_current = NULL;
+ dreplsrv_run_pending_ops(s);
+}
+
+/*
+ run any pending replica sync calls
+ */
+void dreplsrv_notify_run_ops(struct dreplsrv_service *s)
+{
+ struct dreplsrv_notify_operation *op;
+ struct tevent_req *subreq;
+
+ if (s->ops.n_current || s->ops.current) {
+ /* if there's still one running, we're done */
+ return;
+ }
+
+ if (!s->ops.notifies) {
+ /* if there're no pending operations, we're done */
+ return;
+ }
+
+ op = s->ops.notifies;
+ s->ops.n_current = op;
+ DLIST_REMOVE(s->ops.notifies, op);
+
+ subreq = dreplsrv_op_notify_send(op, s->task->event_ctx, op);
+ if (!subreq) {
+ DBG_ERR("dreplsrv_notify_run_ops: dreplsrv_op_notify_send[%s][%s] - no memory\n",
+ op->source_dsa->repsFrom1->other_info->dns_name,
+ ldb_dn_get_linearized(op->source_dsa->partition->dn));
+ return;
+ }
+ tevent_req_set_callback(subreq, dreplsrv_notify_op_callback, op);
+ DBG_INFO("started DsReplicaSync for %s to %s\n",
+ ldb_dn_get_linearized(op->source_dsa->partition->dn),
+ op->source_dsa->repsFrom1->other_info->dns_name);
+}
+
+
+/*
+ find a source_dsa for a given guid
+ */
+static struct dreplsrv_partition_source_dsa *dreplsrv_find_notify_dsa(struct dreplsrv_partition *p,
+ struct GUID *guid)
+{
+ struct dreplsrv_partition_source_dsa *s;
+
+ /* first check the sources list */
+ for (s=p->sources; s; s=s->next) {
+ if (GUID_equal(&s->repsFrom1->source_dsa_obj_guid, guid)) {
+ return s;
+ }
+ }
+
+ /* then the notifies list */
+ for (s=p->notifies; s; s=s->next) {
+ if (GUID_equal(&s->repsFrom1->source_dsa_obj_guid, guid)) {
+ return s;
+ }
+ }
+ return NULL;
+}
+
+
+/*
+ schedule a replicaSync message
+ */
+static WERROR dreplsrv_schedule_notify_sync(struct dreplsrv_service *service,
+ struct dreplsrv_partition *p,
+ struct repsFromToBlob *reps,
+ TALLOC_CTX *mem_ctx,
+ uint64_t uSN,
+ bool is_urgent,
+ uint32_t replica_flags)
+{
+ struct dreplsrv_notify_operation *op;
+ struct dreplsrv_partition_source_dsa *s;
+
+ s = dreplsrv_find_notify_dsa(p, &reps->ctr.ctr1.source_dsa_obj_guid);
+ if (s == NULL) {
+ DBG_ERR("Unable to find source_dsa for %s\n",
+ GUID_string(mem_ctx, &reps->ctr.ctr1.source_dsa_obj_guid));
+ return WERR_DS_UNAVAILABLE;
+ }
+
+ /* first try to find an existing notify operation */
+ for (op = service->ops.notifies; op; op = op->next) {
+ if (op->source_dsa != s) {
+ continue;
+ }
+
+ if (op->is_urgent != is_urgent) {
+ continue;
+ }
+
+ if (op->replica_flags != replica_flags) {
+ continue;
+ }
+
+ if (op->uSN < uSN) {
+ op->uSN = uSN;
+ }
+
+ /* reuse the notify operation, as it's not yet started */
+ return WERR_OK;
+ }
+
+ op = talloc_zero(mem_ctx, struct dreplsrv_notify_operation);
+ W_ERROR_HAVE_NO_MEMORY(op);
+
+ op->service = service;
+ op->source_dsa = s;
+ op->uSN = uSN;
+ op->is_urgent = is_urgent;
+ op->replica_flags = replica_flags;
+ op->schedule_time = time(NULL);
+
+ DLIST_ADD_END(service->ops.notifies, op);
+ talloc_steal(service, op);
+ return WERR_OK;
+}
+
+/*
+ see if a partition has a hugher uSN than what is in the repsTo and
+ if so then send a DsReplicaSync
+ */
+static WERROR dreplsrv_notify_check(struct dreplsrv_service *s,
+ struct dreplsrv_partition *p,
+ TALLOC_CTX *mem_ctx)
+{
+ uint32_t count=0;
+ struct repsFromToBlob *reps;
+ WERROR werr;
+ uint64_t uSNHighest;
+ uint64_t uSNUrgent;
+ uint32_t i;
+ int ret;
+
+ werr = dsdb_loadreps(s->samdb, mem_ctx, p->dn, "repsTo", &reps, &count);
+ if (!W_ERROR_IS_OK(werr)) {
+ DBG_ERR("Failed to load repsTo for %s\n",
+ ldb_dn_get_linearized(p->dn));
+ return werr;
+ }
+
+ /* loads the partition uSNHighest and uSNUrgent */
+ ret = dsdb_load_partition_usn(s->samdb, p->dn, &uSNHighest, &uSNUrgent);
+ if (ret != LDB_SUCCESS || uSNHighest == 0) {
+ /* nothing to do */
+ return WERR_OK;
+ }
+
+ /* see if any of our partners need some of our objects */
+ for (i=0; i<count; i++) {
+ struct dreplsrv_partition_source_dsa *sdsa;
+ uint32_t replica_flags;
+ sdsa = dreplsrv_find_notify_dsa(p, &reps[i].ctr.ctr1.source_dsa_obj_guid);
+ replica_flags = reps[i].ctr.ctr1.replica_flags;
+ if (sdsa == NULL) continue;
+ if (sdsa->notify_uSN < uSNHighest) {
+ /* we need to tell this partner to replicate
+ with us */
+ bool is_urgent = sdsa->notify_uSN < uSNUrgent;
+
+ /* check if urgent replication is needed */
+ werr = dreplsrv_schedule_notify_sync(s, p, &reps[i], mem_ctx,
+ uSNHighest, is_urgent, replica_flags);
+ if (!W_ERROR_IS_OK(werr)) {
+ DBG_ERR("Failed to setup notify to %s for %s\n",
+ reps[i].ctr.ctr1.other_info->dns_name,
+ ldb_dn_get_linearized(p->dn));
+ return werr;
+ }
+ DBG_DEBUG("queued DsReplicaSync for %s to %s "
+ "(urgent=%s) uSN=%llu:%llu\n",
+ ldb_dn_get_linearized(p->dn),
+ reps[i].ctr.ctr1.other_info->dns_name,
+ is_urgent?"true":"false",
+ (unsigned long long)sdsa->notify_uSN,
+ (unsigned long long)uSNHighest);
+ }
+ }
+
+ return WERR_OK;
+}
+
+/*
+ see if any of the partitions have changed, and if so then send a
+ DsReplicaSync to all the replica partners in the repsTo object
+ */
+static WERROR dreplsrv_notify_check_all(struct dreplsrv_service *s, TALLOC_CTX *mem_ctx)
+{
+ WERROR status;
+ struct dreplsrv_partition *p;
+
+ for (p = s->partitions; p; p = p->next) {
+ status = dreplsrv_notify_check(s, p, mem_ctx);
+ W_ERROR_NOT_OK_RETURN(status);
+ }
+
+ return WERR_OK;
+}
+
+static void dreplsrv_notify_run(struct dreplsrv_service *service);
+
+static void dreplsrv_notify_handler_te(struct tevent_context *ev, struct tevent_timer *te,
+ struct timeval t, void *ptr)
+{
+ struct dreplsrv_service *service = talloc_get_type(ptr, struct dreplsrv_service);
+ WERROR status;
+
+ service->notify.te = NULL;
+
+ dreplsrv_notify_run(service);
+
+ status = dreplsrv_notify_schedule(service, service->notify.interval);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(service->task, win_errstr(status), false);
+ return;
+ }
+}
+
+WERROR dreplsrv_notify_schedule(struct dreplsrv_service *service, uint32_t next_interval)
+{
+ TALLOC_CTX *tmp_mem;
+ struct tevent_timer *new_te;
+ struct timeval next_time;
+
+ /* prevent looping */
+ if (next_interval == 0) next_interval = 1;
+
+ next_time = timeval_current_ofs(next_interval, 50);
+
+ if (service->notify.te) {
+ /*
+ * if the timestamp of the new event is higher,
+ * as current next we don't need to reschedule
+ */
+ if (timeval_compare(&next_time, &service->notify.next_event) > 0) {
+ return WERR_OK;
+ }
+ }
+
+ /* reset the next scheduled timestamp */
+ service->notify.next_event = next_time;
+
+ new_te = tevent_add_timer(service->task->event_ctx, service,
+ service->notify.next_event,
+ dreplsrv_notify_handler_te, service);
+ W_ERROR_HAVE_NO_MEMORY(new_te);
+
+ tmp_mem = talloc_new(service);
+ DBG_DEBUG("dreplsrv_notify_schedule(%u) %sscheduled for: %s\n",
+ next_interval,
+ (service->notify.te?"re":""),
+ nt_time_string(tmp_mem, timeval_to_nttime(&next_time)));
+ talloc_free(tmp_mem);
+
+ talloc_free(service->notify.te);
+ service->notify.te = new_te;
+
+ return WERR_OK;
+}
+
+static void dreplsrv_notify_run(struct dreplsrv_service *service)
+{
+ TALLOC_CTX *mem_ctx;
+
+ mem_ctx = talloc_new(service);
+ dreplsrv_notify_check_all(service, mem_ctx);
+ talloc_free(mem_ctx);
+
+ dreplsrv_run_pending_ops(service);
+}
diff --git a/source4/dsdb/repl/drepl_out_helpers.c b/source4/dsdb/repl/drepl_out_helpers.c
new file mode 100644
index 0000000..401d2bf
--- /dev/null
+++ b/source4/dsdb/repl/drepl_out_helpers.c
@@ -0,0 +1,1356 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service helper function for outgoing traffic
+
+ Copyright (C) Stefan Metzmacher 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "dsdb/samdb/samdb.h"
+#include "auth/auth.h"
+#include "samba/service.h"
+#include "lib/events/events.h"
+#include "dsdb/repl/drepl_service.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "libcli/composite/composite.h"
+#include "auth/gensec/gensec.h"
+#include "param/param.h"
+#include "../lib/util/tevent_ntstatus.h"
+#include "libcli/security/security.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+struct dreplsrv_out_drsuapi_state {
+ struct tevent_context *ev;
+
+ struct dreplsrv_out_connection *conn;
+
+ struct dreplsrv_drsuapi_connection *drsuapi;
+
+ struct drsuapi_DsBindInfoCtr bind_info_ctr;
+ struct drsuapi_DsBind bind_r;
+};
+
+static void dreplsrv_out_drsuapi_connect_done(struct composite_context *creq);
+
+struct tevent_req *dreplsrv_out_drsuapi_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct dreplsrv_out_connection *conn)
+{
+ struct tevent_req *req;
+ struct dreplsrv_out_drsuapi_state *state;
+ struct composite_context *creq;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct dreplsrv_out_drsuapi_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ state->ev = ev;
+ state->conn = conn;
+ state->drsuapi = conn->drsuapi;
+
+ if (state->drsuapi != NULL) {
+ struct dcerpc_binding_handle *b =
+ state->drsuapi->pipe->binding_handle;
+ bool is_connected = dcerpc_binding_handle_is_connected(b);
+
+ if (is_connected) {
+ tevent_req_done(req);
+ return tevent_req_post(req, ev);
+ }
+
+ TALLOC_FREE(conn->drsuapi);
+ }
+
+ state->drsuapi = talloc_zero(state, struct dreplsrv_drsuapi_connection);
+ if (tevent_req_nomem(state->drsuapi, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ creq = dcerpc_pipe_connect_b_send(state, conn->binding, &ndr_table_drsuapi,
+ conn->service->system_session_info->credentials,
+ ev, conn->service->task->lp_ctx);
+ if (tevent_req_nomem(creq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ composite_continue(NULL, creq, dreplsrv_out_drsuapi_connect_done, req);
+
+ return req;
+}
+
+static void dreplsrv_out_drsuapi_bind_done(struct tevent_req *subreq);
+
+static void dreplsrv_out_drsuapi_connect_done(struct composite_context *creq)
+{
+ struct tevent_req *req = talloc_get_type(creq->async.private_data,
+ struct tevent_req);
+ struct dreplsrv_out_drsuapi_state *state = tevent_req_data(req,
+ struct dreplsrv_out_drsuapi_state);
+ NTSTATUS status;
+ struct tevent_req *subreq;
+
+ status = dcerpc_pipe_connect_b_recv(creq,
+ state->drsuapi,
+ &state->drsuapi->pipe);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ state->drsuapi->drsuapi_handle = state->drsuapi->pipe->binding_handle;
+
+ status = gensec_session_key(state->drsuapi->pipe->conn->security_state.generic_state,
+ state->drsuapi,
+ &state->drsuapi->gensec_skey);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ state->bind_info_ctr.length = 28;
+ state->bind_info_ctr.info.info28 = state->conn->service->bind_info28;
+
+ state->bind_r.in.bind_guid = &state->conn->service->ntds_guid;
+ state->bind_r.in.bind_info = &state->bind_info_ctr;
+ state->bind_r.out.bind_handle = &state->drsuapi->bind_handle;
+
+ subreq = dcerpc_drsuapi_DsBind_r_send(state,
+ state->ev,
+ state->drsuapi->drsuapi_handle,
+ &state->bind_r);
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq, dreplsrv_out_drsuapi_bind_done, req);
+}
+
+static void dreplsrv_out_drsuapi_bind_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct dreplsrv_out_drsuapi_state *state = tevent_req_data(req,
+ struct dreplsrv_out_drsuapi_state);
+ NTSTATUS status;
+
+ status = dcerpc_drsuapi_DsBind_r_recv(subreq, state);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ if (!W_ERROR_IS_OK(state->bind_r.out.result)) {
+ status = werror_to_ntstatus(state->bind_r.out.result);
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ ZERO_STRUCT(state->drsuapi->remote_info28);
+ if (state->bind_r.out.bind_info) {
+ struct drsuapi_DsBindInfo28 *info28;
+ info28 = &state->drsuapi->remote_info28;
+
+ switch (state->bind_r.out.bind_info->length) {
+ case 24: {
+ struct drsuapi_DsBindInfo24 *info24;
+ info24 = &state->bind_r.out.bind_info->info.info24;
+
+ info28->supported_extensions = info24->supported_extensions;
+ info28->site_guid = info24->site_guid;
+ info28->pid = info24->pid;
+ info28->repl_epoch = 0;
+ break;
+ }
+ case 28: {
+ *info28 = state->bind_r.out.bind_info->info.info28;
+ break;
+ }
+ case 32: {
+ struct drsuapi_DsBindInfo32 *info32;
+ info32 = &state->bind_r.out.bind_info->info.info32;
+
+ info28->supported_extensions = info32->supported_extensions;
+ info28->site_guid = info32->site_guid;
+ info28->pid = info32->pid;
+ info28->repl_epoch = info32->repl_epoch;
+ break;
+ }
+ case 48: {
+ struct drsuapi_DsBindInfo48 *info48;
+ info48 = &state->bind_r.out.bind_info->info.info48;
+
+ info28->supported_extensions = info48->supported_extensions;
+ info28->site_guid = info48->site_guid;
+ info28->pid = info48->pid;
+ info28->repl_epoch = info48->repl_epoch;
+ break;
+ }
+ case 52: {
+ struct drsuapi_DsBindInfo52 *info52;
+ info52 = &state->bind_r.out.bind_info->info.info52;
+
+ info28->supported_extensions = info52->supported_extensions;
+ info28->site_guid = info52->site_guid;
+ info28->pid = info52->pid;
+ info28->repl_epoch = info52->repl_epoch;
+ break;
+ }
+ default:
+ DEBUG(1, ("Warning: invalid info length in bind info: %d\n",
+ state->bind_r.out.bind_info->length));
+ break;
+ }
+ }
+
+ tevent_req_done(req);
+}
+
+NTSTATUS dreplsrv_out_drsuapi_recv(struct tevent_req *req)
+{
+ struct dreplsrv_out_drsuapi_state *state = tevent_req_data(req,
+ struct dreplsrv_out_drsuapi_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ state->conn->drsuapi = talloc_move(state->conn, &state->drsuapi);
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+struct dreplsrv_op_pull_source_schema_cycle {
+ struct repsFromTo1 repsFrom1;
+ size_t object_count;
+ struct drsuapi_DsReplicaObjectListItemEx *first_object;
+ struct drsuapi_DsReplicaObjectListItemEx *last_object;
+ uint32_t linked_attributes_count;
+ struct drsuapi_DsReplicaLinkedAttribute *linked_attributes;
+};
+
+struct dreplsrv_op_pull_source_state {
+ struct tevent_context *ev;
+ struct dreplsrv_out_operation *op;
+ void *ndr_struct_ptr;
+ /*
+ * Used when we have to re-try with a different NC, eg for
+ * EXOP retry or to get a current schema first
+ */
+ struct dreplsrv_partition_source_dsa *source_dsa_retry;
+ enum drsuapi_DsExtendedOperation extended_op_retry;
+ bool retry_started;
+ struct dreplsrv_op_pull_source_schema_cycle *schema_cycle;
+};
+
+static void dreplsrv_op_pull_source_connect_done(struct tevent_req *subreq);
+
+struct tevent_req *dreplsrv_op_pull_source_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct dreplsrv_out_operation *op)
+{
+ struct tevent_req *req;
+ struct dreplsrv_op_pull_source_state *state;
+ struct tevent_req *subreq;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct dreplsrv_op_pull_source_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->ev = ev;
+ state->op = op;
+
+ subreq = dreplsrv_out_drsuapi_send(state, ev, op->source_dsa->conn);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, dreplsrv_op_pull_source_connect_done, req);
+
+ return req;
+}
+
+static bool dreplsrv_op_pull_source_detect_schema_cycle(struct tevent_req *req)
+{
+ struct dreplsrv_op_pull_source_state *state =
+ tevent_req_data(req,
+ struct dreplsrv_op_pull_source_state);
+ bool is_schema = false;
+
+ if (state->op->extended_op == DRSUAPI_EXOP_NONE) {
+ struct dreplsrv_out_operation *op = state->op;
+ struct dreplsrv_service *service = op->service;
+ struct ldb_dn *schema_dn = ldb_get_schema_basedn(service->samdb);
+ struct dreplsrv_partition *partition = op->source_dsa->partition;
+
+ is_schema = ldb_dn_compare(partition->dn, schema_dn) == 0;
+ }
+
+ if (is_schema) {
+ struct dreplsrv_op_pull_source_schema_cycle *sc;
+
+ sc = talloc_zero(state,
+ struct dreplsrv_op_pull_source_schema_cycle);
+ if (tevent_req_nomem(sc, req)) {
+ return false;
+ }
+ sc->repsFrom1 = *state->op->source_dsa->repsFrom1;
+
+ state->schema_cycle = sc;
+ }
+
+ return true;
+}
+
+static void dreplsrv_op_pull_source_get_changes_trigger(struct tevent_req *req);
+
+static void dreplsrv_op_pull_source_connect_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ NTSTATUS status;
+ bool ok;
+
+ status = dreplsrv_out_drsuapi_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ ok = dreplsrv_op_pull_source_detect_schema_cycle(req);
+ if (!ok) {
+ return;
+ }
+
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+}
+
+static void dreplsrv_op_pull_source_get_changes_done(struct tevent_req *subreq);
+
+/*
+ get a RODC partial attribute set for a replication call
+ */
+static NTSTATUS dreplsrv_get_rodc_partial_attribute_set(struct dreplsrv_service *service,
+ TALLOC_CTX *mem_ctx,
+ struct drsuapi_DsPartialAttributeSet **_pas,
+ struct drsuapi_DsReplicaOIDMapping_Ctr **pfm,
+ bool for_schema)
+{
+ struct drsuapi_DsPartialAttributeSet *pas;
+ struct dsdb_schema *schema;
+ uint32_t i;
+
+ pas = talloc_zero(mem_ctx, struct drsuapi_DsPartialAttributeSet);
+ NT_STATUS_HAVE_NO_MEMORY(pas);
+
+ schema = dsdb_get_schema(service->samdb, NULL);
+
+ pas->version = 1;
+ pas->attids = talloc_array(pas, enum drsuapi_DsAttributeId, schema->num_attributes);
+ if (pas->attids == NULL) {
+ TALLOC_FREE(pas);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ for (i=0; i<schema->num_attributes; i++) {
+ struct dsdb_attribute *a;
+ a = schema->attributes_by_attributeID_id[i];
+ if (a->systemFlags & (DS_FLAG_ATTR_NOT_REPLICATED | DS_FLAG_ATTR_IS_CONSTRUCTED)) {
+ continue;
+ }
+ if (a->searchFlags & SEARCH_FLAG_RODC_ATTRIBUTE) {
+ continue;
+ }
+ pas->attids[pas->num_attids] = dsdb_attribute_get_attid(a, for_schema);
+ pas->num_attids++;
+ }
+
+ pas->attids = talloc_realloc(pas, pas->attids, enum drsuapi_DsAttributeId, pas->num_attids);
+ if (pas->attids == NULL) {
+ TALLOC_FREE(pas);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ *_pas = pas;
+
+ if (pfm != NULL) {
+ dsdb_get_oid_mappings_drsuapi(schema, true, mem_ctx, pfm);
+ }
+
+ return NT_STATUS_OK;
+}
+
+
+/*
+ get a GC partial attribute set for a replication call
+ */
+static NTSTATUS dreplsrv_get_gc_partial_attribute_set(struct dreplsrv_service *service,
+ TALLOC_CTX *mem_ctx,
+ struct drsuapi_DsPartialAttributeSet **_pas,
+ struct drsuapi_DsReplicaOIDMapping_Ctr **pfm)
+{
+ struct drsuapi_DsPartialAttributeSet *pas;
+ struct dsdb_schema *schema;
+ uint32_t i;
+
+ pas = talloc_zero(mem_ctx, struct drsuapi_DsPartialAttributeSet);
+ NT_STATUS_HAVE_NO_MEMORY(pas);
+
+ schema = dsdb_get_schema(service->samdb, NULL);
+
+ pas->version = 1;
+ pas->attids = talloc_array(pas, enum drsuapi_DsAttributeId, schema->num_attributes);
+ if (pas->attids == NULL) {
+ TALLOC_FREE(pas);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ for (i=0; i<schema->num_attributes; i++) {
+ struct dsdb_attribute *a;
+ a = schema->attributes_by_attributeID_id[i];
+ if (a->isMemberOfPartialAttributeSet) {
+ pas->attids[pas->num_attids] = dsdb_attribute_get_attid(a, false);
+ pas->num_attids++;
+ }
+ }
+
+ pas->attids = talloc_realloc(pas, pas->attids, enum drsuapi_DsAttributeId, pas->num_attids);
+ if (pas->attids == NULL) {
+ TALLOC_FREE(pas);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ *_pas = pas;
+
+ if (pfm != NULL) {
+ dsdb_get_oid_mappings_drsuapi(schema, true, mem_ctx, pfm);
+ }
+
+ return NT_STATUS_OK;
+}
+
+/*
+ convert from one udv format to the other
+ */
+static WERROR udv_convert(TALLOC_CTX *mem_ctx,
+ const struct replUpToDateVectorCtr2 *udv,
+ struct drsuapi_DsReplicaCursorCtrEx *udv_ex)
+{
+ uint32_t i;
+
+ udv_ex->version = 2;
+ udv_ex->reserved1 = 0;
+ udv_ex->reserved2 = 0;
+ udv_ex->count = udv->count;
+ udv_ex->cursors = talloc_array(mem_ctx, struct drsuapi_DsReplicaCursor, udv->count);
+ W_ERROR_HAVE_NO_MEMORY(udv_ex->cursors);
+
+ for (i=0; i<udv->count; i++) {
+ udv_ex->cursors[i].source_dsa_invocation_id = udv->cursors[i].source_dsa_invocation_id;
+ udv_ex->cursors[i].highest_usn = udv->cursors[i].highest_usn;
+ }
+
+ return WERR_OK;
+}
+
+
+static void dreplsrv_op_pull_source_get_changes_trigger(struct tevent_req *req)
+{
+ struct dreplsrv_op_pull_source_state *state = tevent_req_data(req,
+ struct dreplsrv_op_pull_source_state);
+ const struct repsFromTo1 *rf1 = state->op->source_dsa->repsFrom1;
+ struct dreplsrv_service *service = state->op->service;
+ struct dreplsrv_partition *partition = state->op->source_dsa->partition;
+ struct dreplsrv_drsuapi_connection *drsuapi = state->op->source_dsa->conn->drsuapi;
+ struct drsuapi_DsGetNCChanges *r;
+ struct drsuapi_DsReplicaCursorCtrEx *uptodateness_vector;
+ struct tevent_req *subreq;
+ struct drsuapi_DsPartialAttributeSet *pas = NULL;
+ NTSTATUS status;
+ uint32_t replica_flags;
+ struct drsuapi_DsReplicaHighWaterMark highwatermark;
+ struct drsuapi_DsReplicaOIDMapping_Ctr *mappings = NULL;
+ bool is_schema = false;
+
+ if (state->schema_cycle != NULL) {
+ is_schema = true;
+ rf1 = &state->schema_cycle->repsFrom1;
+ }
+
+ r = talloc(state, struct drsuapi_DsGetNCChanges);
+ if (tevent_req_nomem(r, req)) {
+ return;
+ }
+
+ r->out.level_out = talloc(r, uint32_t);
+ if (tevent_req_nomem(r->out.level_out, req)) {
+ return;
+ }
+ r->in.req = talloc(r, union drsuapi_DsGetNCChangesRequest);
+ if (tevent_req_nomem(r->in.req, req)) {
+ return;
+ }
+ r->out.ctr = talloc(r, union drsuapi_DsGetNCChangesCtr);
+ if (tevent_req_nomem(r->out.ctr, req)) {
+ return;
+ }
+
+ if (partition->uptodatevector.count != 0 &&
+ partition->uptodatevector_ex.count == 0) {
+ WERROR werr;
+ werr = udv_convert(partition, &partition->uptodatevector, &partition->uptodatevector_ex);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,(__location__ ": Failed to convert UDV for %s : %s\n",
+ ldb_dn_get_linearized(partition->dn), win_errstr(werr)));
+ tevent_req_nterror(req, werror_to_ntstatus(werr));
+ return;
+ }
+ }
+
+ if (partition->uptodatevector_ex.count == 0) {
+ uptodateness_vector = NULL;
+ } else {
+ uptodateness_vector = &partition->uptodatevector_ex;
+ }
+
+ replica_flags = rf1->replica_flags;
+ highwatermark = rf1->highwatermark;
+
+ if (state->op->options & DRSUAPI_DRS_GET_ANC) {
+ replica_flags |= DRSUAPI_DRS_GET_ANC;
+ }
+
+ if (state->op->options & DRSUAPI_DRS_SYNC_FORCED) {
+ replica_flags |= DRSUAPI_DRS_SYNC_FORCED;
+ }
+
+ if (partition->partial_replica) {
+ status = dreplsrv_get_gc_partial_attribute_set(service, r,
+ &pas,
+ &mappings);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0,(__location__ ": Failed to construct GC partial attribute set : %s\n", nt_errstr(status)));
+ tevent_req_nterror(req, status);
+ return;
+ }
+ replica_flags &= ~DRSUAPI_DRS_WRIT_REP;
+ } else if (partition->rodc_replica || state->op->extended_op == DRSUAPI_EXOP_REPL_SECRET) {
+ status = dreplsrv_get_rodc_partial_attribute_set(service, r,
+ &pas,
+ &mappings,
+ is_schema);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0,(__location__ ": Failed to construct RODC partial attribute set : %s\n", nt_errstr(status)));
+ tevent_req_nterror(req, status);
+ return;
+ }
+ replica_flags &= ~DRSUAPI_DRS_WRIT_REP;
+ if (state->op->extended_op == DRSUAPI_EXOP_REPL_SECRET) {
+ replica_flags &= ~DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING;
+ } else {
+ replica_flags |= DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING;
+ }
+
+ /*
+ * As per MS-DRSR:
+ *
+ * 4.1.10.4
+ * Client Behavior When Sending the IDL_DRSGetNCChanges Request
+ *
+ * 4.1.10.4.1
+ * ReplicateNCRequestMsg
+ */
+ replica_flags |= DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP;
+ } else {
+ replica_flags |= DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP;
+ }
+
+ if (state->op->extended_op != DRSUAPI_EXOP_NONE) {
+ /*
+ * If it's an exop never set the ADD_REF even if it's in
+ * repsFrom flags.
+ */
+ replica_flags &= ~DRSUAPI_DRS_ADD_REF;
+ }
+
+ /* is this a full resync of all objects? */
+ if (state->op->options & DRSUAPI_DRS_FULL_SYNC_NOW) {
+ ZERO_STRUCT(highwatermark);
+ /* clear the FULL_SYNC_NOW option for subsequent
+ stages of the replication cycle */
+ state->op->options &= ~DRSUAPI_DRS_FULL_SYNC_NOW;
+ state->op->options |= DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS;
+ replica_flags |= DRSUAPI_DRS_NEVER_SYNCED;
+ }
+ if (state->op->options & DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS) {
+ uptodateness_vector = NULL;
+ }
+
+ r->in.bind_handle = &drsuapi->bind_handle;
+
+ if (drsuapi->remote_info28.supported_extensions & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10) {
+ r->in.level = 10;
+ r->in.req->req10.destination_dsa_guid = service->ntds_guid;
+ r->in.req->req10.source_dsa_invocation_id= rf1->source_dsa_invocation_id;
+ r->in.req->req10.naming_context = &partition->nc;
+ r->in.req->req10.highwatermark = highwatermark;
+ r->in.req->req10.uptodateness_vector = uptodateness_vector;
+ r->in.req->req10.replica_flags = replica_flags;
+ r->in.req->req10.max_object_count = 133;
+ r->in.req->req10.max_ndr_size = 1336811;
+ r->in.req->req10.extended_op = state->op->extended_op;
+ r->in.req->req10.fsmo_info = state->op->fsmo_info;
+ r->in.req->req10.partial_attribute_set = pas;
+ r->in.req->req10.partial_attribute_set_ex= NULL;
+ r->in.req->req10.mapping_ctr.num_mappings= mappings == NULL ? 0 : mappings->num_mappings;
+ r->in.req->req10.mapping_ctr.mappings = mappings == NULL ? NULL : mappings->mappings;
+
+ /* the only difference to v8 is the more_flags */
+ r->in.req->req10.more_flags = state->op->more_flags;
+
+ } else if (drsuapi->remote_info28.supported_extensions & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8) {
+ r->in.level = 8;
+ r->in.req->req8.destination_dsa_guid = service->ntds_guid;
+ r->in.req->req8.source_dsa_invocation_id= rf1->source_dsa_invocation_id;
+ r->in.req->req8.naming_context = &partition->nc;
+ r->in.req->req8.highwatermark = highwatermark;
+ r->in.req->req8.uptodateness_vector = uptodateness_vector;
+ r->in.req->req8.replica_flags = replica_flags;
+ r->in.req->req8.max_object_count = 133;
+ r->in.req->req8.max_ndr_size = 1336811;
+ r->in.req->req8.extended_op = state->op->extended_op;
+ r->in.req->req8.fsmo_info = state->op->fsmo_info;
+ r->in.req->req8.partial_attribute_set = pas;
+ r->in.req->req8.partial_attribute_set_ex= NULL;
+ r->in.req->req8.mapping_ctr.num_mappings= mappings == NULL ? 0 : mappings->num_mappings;
+ r->in.req->req8.mapping_ctr.mappings = mappings == NULL ? NULL : mappings->mappings;
+ } else {
+ r->in.level = 5;
+ r->in.req->req5.destination_dsa_guid = service->ntds_guid;
+ r->in.req->req5.source_dsa_invocation_id= rf1->source_dsa_invocation_id;
+ r->in.req->req5.naming_context = &partition->nc;
+ r->in.req->req5.highwatermark = highwatermark;
+ r->in.req->req5.uptodateness_vector = uptodateness_vector;
+ r->in.req->req5.replica_flags = replica_flags;
+ r->in.req->req5.max_object_count = 133;
+ r->in.req->req5.max_ndr_size = 1336770;
+ r->in.req->req5.extended_op = state->op->extended_op;
+ r->in.req->req5.fsmo_info = state->op->fsmo_info;
+ }
+
+#if 0
+ NDR_PRINT_IN_DEBUG(drsuapi_DsGetNCChanges, r);
+#endif
+
+ state->ndr_struct_ptr = r;
+ subreq = dcerpc_drsuapi_DsGetNCChanges_r_send(state,
+ state->ev,
+ drsuapi->drsuapi_handle,
+ r);
+ if (tevent_req_nomem(subreq, req)) {
+ return;
+ }
+ tevent_req_set_callback(subreq, dreplsrv_op_pull_source_get_changes_done, req);
+}
+
+static void dreplsrv_op_pull_source_apply_changes_trigger(struct tevent_req *req,
+ struct drsuapi_DsGetNCChanges *r,
+ uint32_t ctr_level,
+ struct drsuapi_DsGetNCChangesCtr1 *ctr1,
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6);
+
+static void dreplsrv_op_pull_source_get_changes_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct dreplsrv_op_pull_source_state *state = tevent_req_data(req,
+ struct dreplsrv_op_pull_source_state);
+ NTSTATUS status;
+ struct drsuapi_DsGetNCChanges *r = talloc_get_type(state->ndr_struct_ptr,
+ struct drsuapi_DsGetNCChanges);
+ uint32_t ctr_level = 0;
+ struct drsuapi_DsGetNCChangesCtr1 *ctr1 = NULL;
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6 = NULL;
+ enum drsuapi_DsExtendedError extended_ret = DRSUAPI_EXOP_ERR_NONE;
+ state->ndr_struct_ptr = NULL;
+
+ status = dcerpc_drsuapi_DsGetNCChanges_r_recv(subreq, r);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ if (!W_ERROR_IS_OK(r->out.result)) {
+ status = werror_to_ntstatus(r->out.result);
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ if (*r->out.level_out == 1) {
+ ctr_level = 1;
+ ctr1 = &r->out.ctr->ctr1;
+ } else if (*r->out.level_out == 2 &&
+ r->out.ctr->ctr2.mszip1.ts) {
+ ctr_level = 1;
+ ctr1 = &r->out.ctr->ctr2.mszip1.ts->ctr1;
+ } else if (*r->out.level_out == 6) {
+ ctr_level = 6;
+ ctr6 = &r->out.ctr->ctr6;
+ } else if (*r->out.level_out == 7 &&
+ r->out.ctr->ctr7.level == 6 &&
+ r->out.ctr->ctr7.type == DRSUAPI_COMPRESSION_TYPE_MSZIP &&
+ r->out.ctr->ctr7.ctr.mszip6.ts) {
+ ctr_level = 6;
+ ctr6 = &r->out.ctr->ctr7.ctr.mszip6.ts->ctr6;
+ } else if (*r->out.level_out == 7 &&
+ r->out.ctr->ctr7.level == 6 &&
+ r->out.ctr->ctr7.type == DRSUAPI_COMPRESSION_TYPE_XPRESS &&
+ r->out.ctr->ctr7.ctr.xpress6.ts) {
+ ctr_level = 6;
+ ctr6 = &r->out.ctr->ctr7.ctr.xpress6.ts->ctr6;
+ } else {
+ status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ if (!ctr1 && !ctr6) {
+ status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ if (ctr_level == 6) {
+ if (!W_ERROR_IS_OK(ctr6->drs_error)) {
+ status = werror_to_ntstatus(ctr6->drs_error);
+ tevent_req_nterror(req, status);
+ return;
+ }
+ extended_ret = ctr6->extended_ret;
+ }
+
+ if (ctr_level == 1) {
+ extended_ret = ctr1->extended_ret;
+ }
+
+ if (state->op->extended_op != DRSUAPI_EXOP_NONE) {
+ state->op->extended_ret = extended_ret;
+
+ if (extended_ret != DRSUAPI_EXOP_ERR_SUCCESS) {
+ status = NT_STATUS_UNSUCCESSFUL;
+ tevent_req_nterror(req, status);
+ return;
+ }
+ }
+
+ dreplsrv_op_pull_source_apply_changes_trigger(req, r, ctr_level, ctr1, ctr6);
+}
+
+/**
+ * If processing a chunk of replication data fails, check if it is due to a
+ * problem that can be fixed by setting extra flags in the GetNCChanges request,
+ * i.e. GET_ANC or GET_TGT.
+ * @returns NT_STATUS_OK if the request was retried, and an error code if not
+ */
+static NTSTATUS dreplsrv_op_pull_retry_with_flags(struct tevent_req *req,
+ WERROR error_code)
+{
+ struct dreplsrv_op_pull_source_state *state;
+ NTSTATUS nt_status = NT_STATUS_OK;
+
+ state = tevent_req_data(req, struct dreplsrv_op_pull_source_state);
+
+ /*
+ * Check if we failed to apply the records due to a missing parent or
+ * target object. If so, try again and ask for any mising parent/target
+ * objects to be included this time.
+ */
+ if (W_ERROR_EQUAL(error_code, WERR_DS_DRA_RECYCLED_TARGET)) {
+
+ if (state->op->more_flags & DRSUAPI_DRS_GET_TGT) {
+ DEBUG(1,("Missing target object despite setting DRSUAPI_DRS_GET_TGT flag\n"));
+ nt_status = NT_STATUS_INVALID_NETWORK_RESPONSE;
+ } else {
+ state->op->more_flags |= DRSUAPI_DRS_GET_TGT;
+ DEBUG(1,("Missing target object when we didn't set the DRSUAPI_DRS_GET_TGT flag, retrying\n"));
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+ }
+ } else if (W_ERROR_EQUAL(error_code, WERR_DS_DRA_MISSING_PARENT)) {
+
+ if (state->op->options & DRSUAPI_DRS_GET_ANC) {
+ DEBUG(1,("Missing parent object despite setting DRSUAPI_DRS_GET_ANC flag\n"));
+ nt_status = NT_STATUS_INVALID_NETWORK_RESPONSE;
+ } else {
+ state->op->options |= DRSUAPI_DRS_GET_ANC;
+ DEBUG(4,("Missing parent object when we didn't set the DRSUAPI_DRS_GET_ANC flag, retrying\n"));
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+ }
+ } else {
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ }
+
+ return nt_status;
+}
+
+
+static void dreplsrv_update_refs_trigger(struct tevent_req *req);
+
+static void dreplsrv_op_pull_source_apply_changes_trigger(struct tevent_req *req,
+ struct drsuapi_DsGetNCChanges *r,
+ uint32_t ctr_level,
+ struct drsuapi_DsGetNCChangesCtr1 *ctr1,
+ struct drsuapi_DsGetNCChangesCtr6 *ctr6)
+{
+ struct dreplsrv_op_pull_source_state *state = tevent_req_data(req,
+ struct dreplsrv_op_pull_source_state);
+ struct repsFromTo1 rf1 = *state->op->source_dsa->repsFrom1;
+ struct dreplsrv_service *service = state->op->service;
+ struct dreplsrv_partition *partition = state->op->source_dsa->partition;
+ struct dreplsrv_drsuapi_connection *drsuapi = state->op->source_dsa->conn->drsuapi;
+ struct ldb_dn *schema_dn = ldb_get_schema_basedn(service->samdb);
+ struct dreplsrv_op_pull_source_schema_cycle *sc = NULL;
+ struct dsdb_schema *schema;
+ struct dsdb_schema *working_schema = NULL;
+ const struct drsuapi_DsReplicaOIDMapping_Ctr *mapping_ctr;
+ uint32_t object_count;
+ struct drsuapi_DsReplicaObjectListItemEx *first_object;
+ uint32_t linked_attributes_count;
+ struct drsuapi_DsReplicaLinkedAttribute *linked_attributes;
+ const struct drsuapi_DsReplicaCursor2CtrEx *uptodateness_vector;
+ struct dsdb_extended_replicated_objects *objects;
+ bool more_data = false;
+ WERROR status;
+ NTSTATUS nt_status;
+ uint32_t dsdb_repl_flags = 0;
+ struct ldb_dn *nc_root = NULL;
+ bool was_schema = false;
+ int ret;
+
+ switch (ctr_level) {
+ case 1:
+ mapping_ctr = &ctr1->mapping_ctr;
+ object_count = ctr1->object_count;
+ first_object = ctr1->first_object;
+ linked_attributes_count = 0;
+ linked_attributes = NULL;
+ rf1.source_dsa_obj_guid = ctr1->source_dsa_guid;
+ rf1.source_dsa_invocation_id = ctr1->source_dsa_invocation_id;
+ rf1.highwatermark = ctr1->new_highwatermark;
+ uptodateness_vector = NULL; /* TODO: map it */
+ more_data = ctr1->more_data;
+ break;
+ case 6:
+ mapping_ctr = &ctr6->mapping_ctr;
+ object_count = ctr6->object_count;
+ first_object = ctr6->first_object;
+ linked_attributes_count = ctr6->linked_attributes_count;
+ linked_attributes = ctr6->linked_attributes;
+ rf1.source_dsa_obj_guid = ctr6->source_dsa_guid;
+ rf1.source_dsa_invocation_id = ctr6->source_dsa_invocation_id;
+ rf1.highwatermark = ctr6->new_highwatermark;
+ uptodateness_vector = ctr6->uptodateness_vector;
+ more_data = ctr6->more_data;
+ break;
+ default:
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+
+ /*
+ * We need to cache the schema changes until we replicated
+ * everything before we can apply the new schema.
+ */
+ if (state->schema_cycle != NULL) {
+ TALLOC_CTX *mem = NULL;
+ struct drsuapi_DsReplicaObjectListItemEx **ptr = NULL;
+ struct drsuapi_DsReplicaObjectListItemEx *l = NULL;
+
+ was_schema = true;
+ sc = state->schema_cycle;
+
+ sc->repsFrom1 = rf1;
+
+ if (sc->first_object == NULL) {
+ mem = sc;
+ ptr = &sc->first_object;
+ } else {
+ mem = sc->last_object;
+ ptr = &sc->last_object->next_object;
+ }
+ *ptr = talloc_move(mem, &first_object);
+ for (l = *ptr; l != NULL; l = l->next_object) {
+ sc->object_count++;
+ if (l->next_object == NULL) {
+ sc->last_object = l;
+ break;
+ }
+ }
+
+ if (sc->linked_attributes_count == 0) {
+ sc->linked_attributes = talloc_move(sc, &linked_attributes);
+ sc->linked_attributes_count = linked_attributes_count;
+ linked_attributes_count = 0;
+ } else if (linked_attributes_count > 0) {
+ struct drsuapi_DsReplicaLinkedAttribute *new_las = NULL;
+ struct drsuapi_DsReplicaLinkedAttribute *tmp_las = NULL;
+ uint64_t new_count;
+ uint64_t add_size;
+ uint32_t add_idx;
+
+ new_count = sc->linked_attributes_count;
+ new_count += linked_attributes_count;
+ if (new_count > UINT32_MAX) {
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+ add_size = linked_attributes_count;
+ add_size *= sizeof(linked_attributes[0]);
+ if (add_size > SIZE_MAX) {
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+ add_idx = sc->linked_attributes_count;
+
+ tmp_las = talloc_realloc(sc,
+ sc->linked_attributes,
+ struct drsuapi_DsReplicaLinkedAttribute,
+ new_count);
+ if (tevent_req_nomem(tmp_las, req)) {
+ return;
+ }
+ new_las = talloc_move(tmp_las, &linked_attributes);
+ memcpy(&tmp_las[add_idx], new_las, add_size);
+ sc->linked_attributes = tmp_las;
+ sc->linked_attributes_count = new_count;
+ linked_attributes_count = 0;
+ }
+
+ if (more_data) {
+ /* we don't need this structure anymore */
+ TALLOC_FREE(r);
+
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+ return;
+ }
+
+ /* detach sc from state */
+ state->schema_cycle = NULL;
+ }
+
+ schema = dsdb_get_schema(service->samdb, state);
+ if (!schema) {
+ DEBUG(0,(__location__ ": Schema is not loaded yet!\n"));
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
+
+ /*
+ * Decide what working schema to use for object conversion.
+ * We won't need a working schema for empty replicas sent.
+ */
+ if (sc != NULL) {
+ first_object = talloc_move(r, &sc->first_object);
+ object_count = sc->object_count;
+ linked_attributes = talloc_move(r, &sc->linked_attributes);
+ linked_attributes_count = sc->linked_attributes_count;
+ TALLOC_FREE(sc);
+
+ if (first_object != NULL) {
+ /* create working schema to convert objects with */
+ status = dsdb_repl_make_working_schema(service->samdb,
+ schema,
+ mapping_ctr,
+ object_count,
+ first_object,
+ &drsuapi->gensec_skey,
+ state, &working_schema);
+ if (!W_ERROR_IS_OK(status)) {
+ DEBUG(0,("Failed to create working schema: %s\n",
+ win_errstr(status)));
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
+ }
+ }
+
+ if (partition->partial_replica || partition->rodc_replica) {
+ dsdb_repl_flags |= DSDB_REPL_FLAG_PARTIAL_REPLICA;
+ }
+ if (state->op->options & DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS) {
+ dsdb_repl_flags |= DSDB_REPL_FLAG_PRIORITISE_INCOMING;
+ }
+ if (state->op->options & DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING) {
+ dsdb_repl_flags |= DSDB_REPL_FLAG_EXPECT_NO_SECRETS;
+ }
+ if (state->op->options & DRSUAPI_DRS_CRITICAL_ONLY ||
+ state->op->extended_op != DRSUAPI_EXOP_NONE) {
+ dsdb_repl_flags |= DSDB_REPL_FLAG_OBJECT_SUBSET;
+ }
+
+ if (state->op->more_flags & DRSUAPI_DRS_GET_TGT) {
+ dsdb_repl_flags |= DSDB_REPL_FLAG_TARGETS_UPTODATE;
+ }
+
+ if (state->op->extended_op != DRSUAPI_EXOP_NONE) {
+ ret = dsdb_find_nc_root(service->samdb, partition,
+ partition->dn, &nc_root);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find nc_root for %s\n",
+ ldb_dn_get_linearized(partition->dn)));
+ tevent_req_nterror(req, NT_STATUS_INTERNAL_ERROR);
+ return;
+ }
+ } else {
+ nc_root = partition->dn;
+ }
+
+ status = dsdb_replicated_objects_convert(service->samdb,
+ working_schema ? working_schema : schema,
+ nc_root,
+ mapping_ctr,
+ object_count,
+ first_object,
+ linked_attributes_count,
+ linked_attributes,
+ &rf1,
+ uptodateness_vector,
+ &drsuapi->gensec_skey,
+ dsdb_repl_flags,
+ state, &objects);
+
+ if (W_ERROR_EQUAL(status, WERR_DS_DRA_SCHEMA_MISMATCH)) {
+ struct dreplsrv_partition *p;
+ bool ok;
+
+ if (was_schema) {
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ DBG_ERR("Got mismatch for schema partition: %s/%s\n",
+ win_errstr(status), nt_errstr(nt_status));
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+
+ if (state->retry_started) {
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ DEBUG(0,("Failed to convert objects after retry: %s/%s\n",
+ win_errstr(status), nt_errstr(nt_status)));
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+
+ /*
+ * Change info sync or extended operation into a fetch
+ * of the schema partition, so we get all the schema
+ * objects we need.
+ *
+ * We don't want to re-do the remote exop,
+ * unless it was REPL_SECRET so we set the
+ * fallback operation to just be a fetch of
+ * the relevent partition.
+ */
+
+
+ if (state->op->extended_op == DRSUAPI_EXOP_REPL_SECRET) {
+ state->extended_op_retry = state->op->extended_op;
+ } else {
+ state->extended_op_retry = DRSUAPI_EXOP_NONE;
+ }
+ state->op->extended_op = DRSUAPI_EXOP_NONE;
+
+ if (ldb_dn_compare(nc_root, partition->dn) == 0) {
+ state->source_dsa_retry = state->op->source_dsa;
+ } else {
+ status = dreplsrv_partition_find_for_nc(service,
+ NULL, NULL,
+ ldb_dn_get_linearized(nc_root),
+ &p);
+ if (!W_ERROR_IS_OK(status)) {
+ DEBUG(2, ("Failed to find requested Naming Context for %s: %s",
+ ldb_dn_get_linearized(nc_root),
+ win_errstr(status)));
+ nt_status = werror_to_ntstatus(status);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+ status = dreplsrv_partition_source_dsa_by_guid(p,
+ &state->op->source_dsa->repsFrom1->source_dsa_obj_guid,
+ &state->source_dsa_retry);
+
+ if (!W_ERROR_IS_OK(status)) {
+ struct GUID_txt_buf str;
+ DEBUG(2, ("Failed to find requested source DSA for %s and %s: %s",
+ ldb_dn_get_linearized(nc_root),
+ GUID_buf_string(&state->op->source_dsa->repsFrom1->source_dsa_obj_guid, &str),
+ win_errstr(status)));
+ nt_status = werror_to_ntstatus(status);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+ }
+
+ /* Find schema naming context to be synchronized first */
+ status = dreplsrv_partition_find_for_nc(service,
+ NULL, NULL,
+ ldb_dn_get_linearized(schema_dn),
+ &p);
+ if (!W_ERROR_IS_OK(status)) {
+ DEBUG(2, ("Failed to find requested Naming Context for schema: %s",
+ win_errstr(status)));
+ nt_status = werror_to_ntstatus(status);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+
+ status = dreplsrv_partition_source_dsa_by_guid(p,
+ &state->op->source_dsa->repsFrom1->source_dsa_obj_guid,
+ &state->op->source_dsa);
+ if (!W_ERROR_IS_OK(status)) {
+ struct GUID_txt_buf str;
+ DEBUG(2, ("Failed to find requested source DSA for %s and %s: %s",
+ ldb_dn_get_linearized(schema_dn),
+ GUID_buf_string(&state->op->source_dsa->repsFrom1->source_dsa_obj_guid, &str),
+ win_errstr(status)));
+ nt_status = werror_to_ntstatus(status);
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+ DEBUG(4,("Wrong schema when applying reply GetNCChanges, retrying\n"));
+
+ state->retry_started = true;
+
+ ok = dreplsrv_op_pull_source_detect_schema_cycle(req);
+ if (!ok) {
+ return;
+ }
+
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+ return;
+
+ } else if (!W_ERROR_IS_OK(status)) {
+ nt_status = werror_to_ntstatus(WERR_BAD_NET_RESP);
+ DEBUG(0,("Failed to convert objects: %s/%s\n",
+ win_errstr(status), nt_errstr(nt_status)));
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+
+ status = dsdb_replicated_objects_commit(service->samdb,
+ working_schema,
+ objects,
+ &state->op->source_dsa->notify_uSN);
+ talloc_free(objects);
+
+ if (!W_ERROR_IS_OK(status)) {
+
+ /*
+ * Check if this error can be fixed by resending the GetNCChanges
+ * request with extra flags set (i.e. GET_ANC/GET_TGT)
+ */
+ nt_status = dreplsrv_op_pull_retry_with_flags(req, status);
+
+ if (NT_STATUS_IS_OK(nt_status)) {
+
+ /*
+ * We resent the request. Don't update the highwatermark,
+ * we'll start this part of the cycle again.
+ */
+ return;
+ }
+
+ DEBUG(0,("Failed to commit objects: %s/%s\n",
+ win_errstr(status), nt_errstr(nt_status)));
+ tevent_req_nterror(req, nt_status);
+ return;
+ }
+
+ if (state->op->extended_op == DRSUAPI_EXOP_NONE) {
+ /* if it applied fine, we need to update the highwatermark */
+ *state->op->source_dsa->repsFrom1 = rf1;
+ }
+
+ /* we don't need this maybe very large structure anymore */
+ TALLOC_FREE(r);
+
+ if (more_data) {
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+ return;
+ }
+
+ /*
+ * If we had to divert via doing some other thing, such as
+ * pulling the schema, then go back and do the original
+ * operation once we are done.
+ */
+ if (state->source_dsa_retry != NULL) {
+ state->op->source_dsa = state->source_dsa_retry;
+ state->op->extended_op = state->extended_op_retry;
+ state->source_dsa_retry = NULL;
+ dreplsrv_op_pull_source_get_changes_trigger(req);
+ return;
+ }
+
+ if (state->op->extended_op != DRSUAPI_EXOP_NONE ||
+ state->op->service->am_rodc) {
+ /*
+ we don't do the UpdateRefs for extended ops or if we
+ are a RODC
+ */
+ tevent_req_done(req);
+ return;
+ }
+
+ /* now we need to update the repsTo record for this partition
+ on the server. These records are initially established when
+ we join the domain, but they quickly expire. We do it here
+ so we can use the already established DRSUAPI pipe
+ */
+ dreplsrv_update_refs_trigger(req);
+}
+
+static void dreplsrv_update_refs_done(struct tevent_req *subreq);
+
+/*
+ send a UpdateRefs request to refresh our repsTo record on the server
+ */
+static void dreplsrv_update_refs_trigger(struct tevent_req *req)
+{
+ struct dreplsrv_op_pull_source_state *state = tevent_req_data(req,
+ struct dreplsrv_op_pull_source_state);
+ struct dreplsrv_service *service = state->op->service;
+ struct dreplsrv_partition *partition = state->op->source_dsa->partition;
+ struct dreplsrv_drsuapi_connection *drsuapi = state->op->source_dsa->conn->drsuapi;
+ struct drsuapi_DsReplicaUpdateRefs *r;
+ char *ntds_dns_name;
+ struct tevent_req *subreq;
+
+ r = talloc(state, struct drsuapi_DsReplicaUpdateRefs);
+ if (tevent_req_nomem(r, req)) {
+ return;
+ }
+
+ ntds_dns_name = samdb_ntds_msdcs_dns_name(service->samdb, r, &service->ntds_guid);
+ if (tevent_req_nomem(ntds_dns_name, req)) {
+ talloc_free(r);
+ return;
+ }
+
+ r->in.bind_handle = &drsuapi->bind_handle;
+ r->in.level = 1;
+ r->in.req.req1.naming_context = &partition->nc;
+ r->in.req.req1.dest_dsa_dns_name = ntds_dns_name;
+ r->in.req.req1.dest_dsa_guid = service->ntds_guid;
+ r->in.req.req1.options = DRSUAPI_DRS_ADD_REF | DRSUAPI_DRS_DEL_REF;
+ if (!service->am_rodc) {
+ r->in.req.req1.options |= DRSUAPI_DRS_WRIT_REP;
+ }
+
+ state->ndr_struct_ptr = r;
+ subreq = dcerpc_drsuapi_DsReplicaUpdateRefs_r_send(state,
+ state->ev,
+ drsuapi->drsuapi_handle,
+ r);
+ if (tevent_req_nomem(subreq, req)) {
+ talloc_free(r);
+ return;
+ }
+ tevent_req_set_callback(subreq, dreplsrv_update_refs_done, req);
+}
+
+/*
+ receive a UpdateRefs reply
+ */
+static void dreplsrv_update_refs_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(subreq,
+ struct tevent_req);
+ struct dreplsrv_op_pull_source_state *state = tevent_req_data(req,
+ struct dreplsrv_op_pull_source_state);
+ struct drsuapi_DsReplicaUpdateRefs *r = talloc_get_type(state->ndr_struct_ptr,
+ struct drsuapi_DsReplicaUpdateRefs);
+ NTSTATUS status;
+
+ state->ndr_struct_ptr = NULL;
+
+ status = dcerpc_drsuapi_DsReplicaUpdateRefs_r_recv(subreq, r);
+ TALLOC_FREE(subreq);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0,("UpdateRefs failed with %s\n",
+ nt_errstr(status)));
+ tevent_req_nterror(req, status);
+ return;
+ }
+
+ if (!W_ERROR_IS_OK(r->out.result)) {
+ status = werror_to_ntstatus(r->out.result);
+ DEBUG(0,("UpdateRefs failed with %s/%s for %s %s\n",
+ win_errstr(r->out.result),
+ nt_errstr(status),
+ r->in.req.req1.dest_dsa_dns_name,
+ r->in.req.req1.naming_context->dn));
+ /*
+ * TODO we are currently not sending the
+ * DsReplicaUpdateRefs at the correct moment,
+ * we do it just after a GetNcChanges which is
+ * not always correct.
+ * Especially when another DC is trying to demote
+ * it will sends us a DsReplicaSync that will trigger a getNcChanges
+ * this call will succeed but the DsRecplicaUpdateRefs that we send
+ * just after will not because the DC is in a demote state and
+ * will reply us a WERR_DS_DRA_BUSY, this error will cause us to
+ * answer to the DsReplicaSync with a non OK status, the other DC
+ * will stop the demote due to this error.
+ * In order to cope with this we will for the moment concider
+ * a DS_DRA_BUSY not as an error.
+ * It's not ideal but it should not have a too huge impact for
+ * running production as this error otherwise never happen and
+ * due to the fact the send a DsReplicaUpdateRefs after each getNcChanges
+ */
+ if (!W_ERROR_EQUAL(r->out.result, WERR_DS_DRA_BUSY)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+ }
+
+ DEBUG(4,("UpdateRefs OK for %s %s\n",
+ r->in.req.req1.dest_dsa_dns_name,
+ r->in.req.req1.naming_context->dn));
+
+ tevent_req_done(req);
+}
+
+WERROR dreplsrv_op_pull_source_recv(struct tevent_req *req)
+{
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return ntstatus_to_werror(status);
+ }
+
+ tevent_req_received(req);
+ return WERR_OK;
+}
+
diff --git a/source4/dsdb/repl/drepl_out_helpers.h b/source4/dsdb/repl/drepl_out_helpers.h
new file mode 100644
index 0000000..158eef4
--- /dev/null
+++ b/source4/dsdb/repl/drepl_out_helpers.h
@@ -0,0 +1,26 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service helper function for outgoing traffic
+
+ Copyright (C) Stefan Metzmacher 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef DREPL_OUT_HELPERS_H
+#define DREPL_OUT_HELPERS_H
+
+
+#endif /* DREPL_OUT_HELPERS_H */
diff --git a/source4/dsdb/repl/drepl_out_pull.c b/source4/dsdb/repl/drepl_out_pull.c
new file mode 100644
index 0000000..fe9bd60
--- /dev/null
+++ b/source4/dsdb/repl/drepl_out_pull.c
@@ -0,0 +1,260 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service outgoing Pull-Replication
+
+ Copyright (C) Stefan Metzmacher 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "dsdb/samdb/samdb.h"
+#include "auth/auth.h"
+#include "samba/service.h"
+#include "lib/events/events.h"
+#include "dsdb/repl/drepl_service.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "libcli/composite/composite.h"
+#include "libcli/security/security.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+/*
+ update repsFrom/repsTo error information
+ */
+void drepl_reps_update(struct dreplsrv_service *s, const char *reps_attr,
+ struct ldb_dn *dn,
+ struct GUID *source_dsa_obj_guid, WERROR status)
+{
+ struct repsFromToBlob *reps;
+ uint32_t count, i;
+ WERROR werr;
+ TALLOC_CTX *tmp_ctx = talloc_new(s);
+ time_t t;
+ NTTIME now;
+
+ t = time(NULL);
+ unix_to_nt_time(&now, t);
+
+ werr = dsdb_loadreps(s->samdb, tmp_ctx, dn, reps_attr, &reps, &count);
+ if (!W_ERROR_IS_OK(werr)) {
+ talloc_free(tmp_ctx);
+ return;
+ }
+
+ for (i=0; i<count; i++) {
+ if (GUID_equal(source_dsa_obj_guid,
+ &reps[i].ctr.ctr1.source_dsa_obj_guid)) {
+ break;
+ }
+ }
+
+ if (i == count) {
+ /* no record to update */
+ talloc_free(tmp_ctx);
+ return;
+ }
+
+ /* only update the status fields */
+ reps[i].ctr.ctr1.last_attempt = now;
+ reps[i].ctr.ctr1.result_last_attempt = status;
+ if (W_ERROR_IS_OK(status)) {
+ reps[i].ctr.ctr1.last_success = now;
+ reps[i].ctr.ctr1.consecutive_sync_failures = 0;
+ } else {
+ reps[i].ctr.ctr1.consecutive_sync_failures++;
+ }
+
+ werr = dsdb_savereps(s->samdb, tmp_ctx, dn, reps_attr, reps, count);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(2,("drepl_reps_update: Failed to save %s for %s: %s\n",
+ reps_attr, ldb_dn_get_linearized(dn), win_errstr(werr)));
+ }
+ talloc_free(tmp_ctx);
+}
+
+WERROR dreplsrv_schedule_partition_pull_source(struct dreplsrv_service *s,
+ struct dreplsrv_partition_source_dsa *source,
+ uint32_t options,
+ enum drsuapi_DsExtendedOperation extended_op,
+ uint64_t fsmo_info,
+ dreplsrv_extended_callback_t callback,
+ void *cb_data)
+{
+ struct dreplsrv_out_operation *op;
+
+ op = talloc_zero(s, struct dreplsrv_out_operation);
+ W_ERROR_HAVE_NO_MEMORY(op);
+
+ op->service = s;
+ /*
+ * source may either be the long-term list of partners, or
+ * from dreplsrv_partition_source_dsa_temporary(). Because it
+ * can be either, we can't talloc_steal() it here, so we
+ * instead we reference it.
+ *
+ * We never talloc_free() the p->sources pointers - indeed we
+ * never remove them - and the temp source will otherwise go
+ * away with the msg it is allocated on.
+ *
+ * Finally the pointer created in drepl_request_extended_op()
+ * is removed with talloc_unlink().
+ *
+ */
+ op->source_dsa = talloc_reference(op, source);
+ if (!op->source_dsa) {
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ op->options = options;
+ op->extended_op = extended_op;
+ op->fsmo_info = fsmo_info;
+ op->callback = callback;
+ op->cb_data = cb_data;
+ op->schedule_time = time(NULL);
+ op->more_flags = 0;
+
+ DLIST_ADD_END(s->ops.pending, op);
+
+ return WERR_OK;
+}
+
+static WERROR dreplsrv_schedule_partition_pull(struct dreplsrv_service *s,
+ struct dreplsrv_partition *p,
+ TALLOC_CTX *mem_ctx)
+{
+ WERROR status;
+ struct dreplsrv_partition_source_dsa *cur;
+
+ for (cur = p->sources; cur; cur = cur->next) {
+ status = dreplsrv_schedule_partition_pull_source(s, cur,
+ 0, DRSUAPI_EXOP_NONE, 0,
+ NULL, NULL);
+ W_ERROR_NOT_OK_RETURN(status);
+ }
+
+ return WERR_OK;
+}
+
+WERROR dreplsrv_schedule_pull_replication(struct dreplsrv_service *s, TALLOC_CTX *mem_ctx)
+{
+ WERROR status;
+ struct dreplsrv_partition *p;
+
+ for (p = s->partitions; p; p = p->next) {
+ status = dreplsrv_schedule_partition_pull(s, p, mem_ctx);
+ W_ERROR_NOT_OK_RETURN(status);
+ }
+
+ return WERR_OK;
+}
+
+
+static void dreplsrv_pending_op_callback(struct tevent_req *subreq)
+{
+ struct dreplsrv_out_operation *op = tevent_req_callback_data(subreq,
+ struct dreplsrv_out_operation);
+ struct repsFromTo1 *rf = op->source_dsa->repsFrom1;
+ struct dreplsrv_service *s = op->service;
+ WERROR werr;
+
+ werr = dreplsrv_op_pull_source_recv(subreq);
+ TALLOC_FREE(subreq);
+
+ DEBUG(4,("dreplsrv_op_pull_source(%s) for %s\n", win_errstr(werr),
+ ldb_dn_get_linearized(op->source_dsa->partition->dn)));
+
+ if (op->extended_op == DRSUAPI_EXOP_NONE) {
+ drepl_reps_update(s, "repsFrom", op->source_dsa->partition->dn,
+ &rf->source_dsa_obj_guid, werr);
+ }
+
+ if (op->callback) {
+ op->callback(s, werr, op->extended_ret, op->cb_data);
+ }
+ talloc_free(op);
+ s->ops.current = NULL;
+ dreplsrv_run_pending_ops(s);
+}
+
+void dreplsrv_run_pull_ops(struct dreplsrv_service *s)
+{
+ struct dreplsrv_out_operation *op;
+ time_t t;
+ NTTIME now;
+ struct tevent_req *subreq;
+ WERROR werr;
+
+ if (s->ops.n_current || s->ops.current) {
+ /* if there's still one running, we're done */
+ return;
+ }
+
+ if (!s->ops.pending) {
+ /* if there're no pending operations, we're done */
+ return;
+ }
+
+ t = time(NULL);
+ unix_to_nt_time(&now, t);
+
+ op = s->ops.pending;
+ s->ops.current = op;
+ DLIST_REMOVE(s->ops.pending, op);
+
+ op->source_dsa->repsFrom1->last_attempt = now;
+
+ /* check if inbound replication is enabled */
+ if (!(op->options & DRSUAPI_DRS_SYNC_FORCED)) {
+ uint32_t rep_options;
+ if (samdb_ntds_options(op->service->samdb, &rep_options) != LDB_SUCCESS) {
+ werr = WERR_DS_DRA_INTERNAL_ERROR;
+ goto failed;
+ }
+
+ if ((rep_options & DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL)) {
+ werr = WERR_DS_DRA_SINK_DISABLED;
+ goto failed;
+ }
+ }
+
+ subreq = dreplsrv_op_pull_source_send(op, s->task->event_ctx, op);
+ if (!subreq) {
+ werr = WERR_NOT_ENOUGH_MEMORY;
+ goto failed;
+ }
+
+ tevent_req_set_callback(subreq, dreplsrv_pending_op_callback, op);
+ return;
+
+failed:
+ if (op->extended_op == DRSUAPI_EXOP_NONE) {
+ drepl_reps_update(s, "repsFrom", op->source_dsa->partition->dn,
+ &op->source_dsa->repsFrom1->source_dsa_obj_guid, werr);
+ }
+ /* unblock queue processing */
+ s->ops.current = NULL;
+ /*
+ * let the callback do its job just like in any other failure situation
+ */
+ if (op->callback) {
+ op->callback(s, werr, op->extended_ret, op->cb_data);
+ }
+}
diff --git a/source4/dsdb/repl/drepl_partitions.c b/source4/dsdb/repl/drepl_partitions.c
new file mode 100644
index 0000000..cc32d59
--- /dev/null
+++ b/source4/dsdb/repl/drepl_partitions.c
@@ -0,0 +1,651 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service
+
+ Copyright (C) Stefan Metzmacher 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "dsdb/samdb/samdb.h"
+#include "auth/auth.h"
+#include "samba/service.h"
+#include "lib/events/events.h"
+#include "dsdb/repl/drepl_service.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "libcli/security/security.h"
+#include "param/param.h"
+#include "dsdb/common/util.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+#undef strcasecmp
+
+/*
+ load the partitions list based on replicated NC attributes in our
+ NTDSDSA object
+ */
+WERROR dreplsrv_load_partitions(struct dreplsrv_service *s)
+{
+ WERROR status;
+ static const char *attrs[] = { "hasMasterNCs", "msDS-hasMasterNCs", "hasPartialReplicaNCs", "msDS-HasFullReplicaNCs", NULL };
+ unsigned int a;
+ int ret;
+ TALLOC_CTX *tmp_ctx;
+ struct ldb_result *res;
+ struct ldb_message_element *el;
+ struct ldb_dn *ntds_dn;
+
+ tmp_ctx = talloc_new(s);
+ W_ERROR_HAVE_NO_MEMORY(tmp_ctx);
+
+ ntds_dn = samdb_ntds_settings_dn(s->samdb, tmp_ctx);
+ if (!ntds_dn) {
+ DEBUG(1,(__location__ ": Unable to find ntds_dn: %s\n", ldb_errstring(s->samdb)));
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, ntds_dn, attrs, DSDB_SEARCH_SHOW_EXTENDED_DN);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(1,("Searching for hasMasterNCs in NTDS DN failed: %s\n", ldb_errstring(s->samdb)));
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ for (a=0; attrs[a]; a++) {
+ int i;
+
+ el = ldb_msg_find_element(res->msgs[0], attrs[a]);
+ if (el == NULL) {
+ continue;
+ }
+ for (i=0; i<el->num_values; i++) {
+ struct ldb_dn *pdn;
+ struct dreplsrv_partition *p, *tp;
+ bool found;
+
+ pdn = ldb_dn_from_ldb_val(tmp_ctx, s->samdb, &el->values[i]);
+ if (pdn == NULL) {
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+ if (!ldb_dn_validate(pdn)) {
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ p = talloc_zero(s, struct dreplsrv_partition);
+ W_ERROR_HAVE_NO_MEMORY(p);
+
+ p->dn = talloc_steal(p, pdn);
+ p->service = s;
+
+ if (strcasecmp(attrs[a], "hasPartialReplicaNCs") == 0) {
+ p->partial_replica = true;
+ } else if (strcasecmp(attrs[a], "msDS-HasFullReplicaNCs") == 0) {
+ p->rodc_replica = true;
+ }
+
+ /* Do not add partitions more than once */
+ found = false;
+ for (tp = s->partitions; tp; tp = tp->next) {
+ if (ldb_dn_compare(tp->dn, p->dn) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ talloc_free(p);
+ continue;
+ }
+
+ DLIST_ADD(s->partitions, p);
+ DEBUG(2, ("dreplsrv_partition[%s] loaded\n", ldb_dn_get_linearized(p->dn)));
+ }
+ }
+
+ talloc_free(tmp_ctx);
+
+ status = dreplsrv_refresh_partitions(s);
+ W_ERROR_NOT_OK_RETURN(status);
+
+ return WERR_OK;
+}
+
+/*
+ Check if particular SPN exists for an account
+ */
+static bool dreplsrv_spn_exists(struct ldb_context *samdb, struct ldb_dn *account_dn,
+ const char *principal_name)
+{
+ TALLOC_CTX *tmp_ctx;
+ const char *attrs_empty[] = { NULL };
+ int ret;
+ struct ldb_result *res;
+
+ tmp_ctx = talloc_new(samdb);
+
+ ret = dsdb_search(samdb, tmp_ctx, &res, account_dn, LDB_SCOPE_BASE, attrs_empty,
+ 0, "servicePrincipalName=%s",
+ ldb_binary_encode_string(tmp_ctx, principal_name));
+ if (ret != LDB_SUCCESS || res->count != 1) {
+ talloc_free(tmp_ctx);
+ return false;
+ }
+
+ talloc_free(tmp_ctx);
+ return true;
+}
+
+/*
+ work out the principal to use for DRS replication connections
+ */
+static NTSTATUS dreplsrv_get_target_principal(struct dreplsrv_service *s,
+ TALLOC_CTX *mem_ctx,
+ const struct repsFromTo1 *rft,
+ char **target_principal)
+{
+ TALLOC_CTX *tmp_ctx;
+ struct ldb_result *res;
+ const char *attrs_server[] = { "dNSHostName", "serverReference", NULL };
+ const char *attrs_ntds[] = { "msDS-HasDomainNCs", "hasMasterNCs", NULL };
+ int ret;
+ const char *hostname, *dnsdomain=NULL;
+ struct ldb_dn *ntds_dn, *server_dn, *computer_dn;
+ struct ldb_dn *forest_dn, *nc_dn;
+
+ *target_principal = NULL;
+
+ tmp_ctx = talloc_new(mem_ctx);
+
+ /* we need to find their hostname */
+ ret = dsdb_find_dn_by_guid(s->samdb, tmp_ctx, &rft->source_dsa_obj_guid, 0, &ntds_dn);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(tmp_ctx);
+ /* its OK for their NTDSDSA DN not to be in our database */
+ return NT_STATUS_OK;
+ }
+
+ server_dn = ldb_dn_copy(tmp_ctx, ntds_dn);
+ if (server_dn == NULL) {
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ /* strip off the NTDS Settings */
+ if (!ldb_dn_remove_child_components(server_dn, 1)) {
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, server_dn, attrs_server, 0);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(tmp_ctx);
+ /* its OK for their server DN not to be in our database */
+ return NT_STATUS_OK;
+ }
+
+ forest_dn = ldb_get_root_basedn(s->samdb);
+ if (forest_dn == NULL) {
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ hostname = ldb_msg_find_attr_as_string(res->msgs[0], "dNSHostName", NULL);
+ computer_dn = ldb_msg_find_attr_as_dn(s->samdb, tmp_ctx, res->msgs[0], "serverReference");
+ if (hostname != NULL && computer_dn != NULL) {
+ char *local_principal;
+
+ /*
+ if we have the dNSHostName attribute then we can use
+ the GC/hostname/realm SPN. All DCs should have this SPN
+
+ Windows DC may set up it's dNSHostName before setting up
+ GC/xx/xx SPN. So make sure it exists, before using it.
+ */
+ local_principal = talloc_asprintf(mem_ctx, "GC/%s/%s",
+ hostname,
+ samdb_dn_to_dns_domain(tmp_ctx, forest_dn));
+ if (dreplsrv_spn_exists(s->samdb, computer_dn, local_principal)) {
+ *target_principal = local_principal;
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ talloc_free(local_principal);
+ }
+
+ /*
+ if we can't find the dNSHostName then we will try for the
+ E3514235-4B06-11D1-AB04-00C04FC2DCD2/${NTDSGUID}/${DNSDOMAIN}
+ SPN. To use that we need the DNS domain name of the target
+ DC. We find that by first looking for the msDS-HasDomainNCs
+ in the NTDSDSA object of the DC, and if we don't find that,
+ then we look for the hasMasterNCs attribute, and eliminate
+ the known schema and configuruation DNs. Despite how
+ bizarre this seems, Hongwei tells us that this is in fact
+ what windows does to find the SPN!!
+ */
+ ret = dsdb_search_dn(s->samdb, tmp_ctx, &res, ntds_dn, attrs_ntds, 0);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+
+ nc_dn = ldb_msg_find_attr_as_dn(s->samdb, tmp_ctx, res->msgs[0], "msDS-HasDomainNCs");
+ if (nc_dn != NULL) {
+ dnsdomain = samdb_dn_to_dns_domain(tmp_ctx, nc_dn);
+ }
+
+ if (dnsdomain == NULL) {
+ struct ldb_message_element *el;
+ int i;
+ el = ldb_msg_find_element(res->msgs[0], "hasMasterNCs");
+ for (i=0; el && i<el->num_values; i++) {
+ nc_dn = ldb_dn_from_ldb_val(tmp_ctx, s->samdb, &el->values[i]);
+ if (nc_dn == NULL ||
+ ldb_dn_compare(ldb_get_config_basedn(s->samdb), nc_dn) == 0 ||
+ ldb_dn_compare(ldb_get_schema_basedn(s->samdb), nc_dn) == 0) {
+ continue;
+ }
+ /* it must be a domain DN, get the equivalent
+ DNS domain name */
+ dnsdomain = samdb_dn_to_dns_domain(tmp_ctx, nc_dn);
+ break;
+ }
+ }
+
+ if (dnsdomain != NULL) {
+ *target_principal = talloc_asprintf(mem_ctx,
+ "E3514235-4B06-11D1-AB04-00C04FC2DCD2/%s/%s@%s",
+ GUID_string(tmp_ctx, &rft->source_dsa_obj_guid),
+ dnsdomain, dnsdomain);
+ }
+
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+}
+
+
+WERROR dreplsrv_out_connection_attach(struct dreplsrv_service *s,
+ const struct repsFromTo1 *rft,
+ struct dreplsrv_out_connection **_conn)
+{
+ struct dreplsrv_out_connection *cur, *conn = NULL;
+ const char *hostname;
+
+ if (!rft->other_info) {
+ return WERR_FOOBAR;
+ }
+
+ if (!rft->other_info->dns_name) {
+ return WERR_FOOBAR;
+ }
+
+ hostname = rft->other_info->dns_name;
+
+ for (cur = s->connections; cur; cur = cur->next) {
+ const char *host;
+
+ host = dcerpc_binding_get_string_option(cur->binding, "host");
+ if (host == NULL) {
+ continue;
+ }
+
+ if (strcmp(host, hostname) == 0) {
+ conn = cur;
+ break;
+ }
+ }
+
+ if (!conn) {
+ NTSTATUS nt_status;
+ char *binding_str;
+ char *target_principal = NULL;
+
+ conn = talloc_zero(s, struct dreplsrv_out_connection);
+ W_ERROR_HAVE_NO_MEMORY(conn);
+
+ conn->service = s;
+
+ binding_str = talloc_asprintf(conn, "ncacn_ip_tcp:%s[krb5,seal]",
+ hostname);
+ W_ERROR_HAVE_NO_MEMORY(binding_str);
+ nt_status = dcerpc_parse_binding(conn, binding_str, &conn->binding);
+ talloc_free(binding_str);
+ if (!NT_STATUS_IS_OK(nt_status)) {
+ return ntstatus_to_werror(nt_status);
+ }
+
+ /* use the GC principal for DRS replication */
+ nt_status = dreplsrv_get_target_principal(s, conn->binding,
+ rft, &target_principal);
+ if (!NT_STATUS_IS_OK(nt_status)) {
+ return ntstatus_to_werror(nt_status);
+ }
+
+ nt_status = dcerpc_binding_set_string_option(conn->binding,
+ "target_principal",
+ target_principal);
+ TALLOC_FREE(target_principal);
+ if (!NT_STATUS_IS_OK(nt_status)) {
+ return ntstatus_to_werror(nt_status);
+ }
+
+ DLIST_ADD_END(s->connections, conn);
+
+ DEBUG(4,("dreplsrv_out_connection_attach(%s): create\n", hostname));
+ } else {
+ DEBUG(4,("dreplsrv_out_connection_attach(%s): attach\n", hostname));
+ }
+
+ *_conn = conn;
+ return WERR_OK;
+}
+
+/*
+ find an existing source dsa in a list
+ */
+static struct dreplsrv_partition_source_dsa *dreplsrv_find_source_dsa(struct dreplsrv_partition_source_dsa *list,
+ struct GUID *guid)
+{
+ struct dreplsrv_partition_source_dsa *s;
+ for (s=list; s; s=s->next) {
+ if (GUID_equal(&s->repsFrom1->source_dsa_obj_guid, guid)) {
+ return s;
+ }
+ }
+ return NULL;
+}
+
+
+
+static WERROR dreplsrv_partition_add_source_dsa(struct dreplsrv_service *s,
+ struct dreplsrv_partition *p,
+ struct dreplsrv_partition_source_dsa **listp,
+ struct dreplsrv_partition_source_dsa *check_list,
+ const struct ldb_val *val)
+{
+ WERROR status;
+ enum ndr_err_code ndr_err;
+ struct dreplsrv_partition_source_dsa *source, *s2;
+
+ source = talloc_zero(p, struct dreplsrv_partition_source_dsa);
+ W_ERROR_HAVE_NO_MEMORY(source);
+
+ ndr_err = ndr_pull_struct_blob(val, source,
+ &source->_repsFromBlob,
+ (ndr_pull_flags_fn_t)ndr_pull_repsFromToBlob);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ NTSTATUS nt_status = ndr_map_error2ntstatus(ndr_err);
+ talloc_free(source);
+ return ntstatus_to_werror(nt_status);
+ }
+ /* NDR_PRINT_DEBUG(repsFromToBlob, &source->_repsFromBlob); */
+ if (source->_repsFromBlob.version != 1) {
+ talloc_free(source);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ source->partition = p;
+ source->repsFrom1 = &source->_repsFromBlob.ctr.ctr1;
+
+ status = dreplsrv_out_connection_attach(s, source->repsFrom1, &source->conn);
+ W_ERROR_NOT_OK_RETURN(status);
+
+ if (check_list &&
+ dreplsrv_find_source_dsa(check_list, &source->repsFrom1->source_dsa_obj_guid)) {
+ /* its in the check list, don't add it again */
+ talloc_free(source);
+ return WERR_OK;
+ }
+
+ /* re-use an existing source if found */
+ for (s2=*listp; s2; s2=s2->next) {
+ if (GUID_equal(&s2->repsFrom1->source_dsa_obj_guid,
+ &source->repsFrom1->source_dsa_obj_guid)) {
+ talloc_free(s2->repsFrom1->other_info);
+ *s2->repsFrom1 = *source->repsFrom1;
+ talloc_steal(s2, s2->repsFrom1->other_info);
+ talloc_free(source);
+ return WERR_OK;
+ }
+ }
+
+ DLIST_ADD_END(*listp, source);
+ return WERR_OK;
+}
+
+/**
+ * Find a partition when given a NC
+ * If the NC can't be found it will return BAD_NC
+ * Initial checks for invalid parameters have to be done beforehand
+ */
+WERROR dreplsrv_partition_find_for_nc(struct dreplsrv_service *s,
+ struct GUID *nc_guid,
+ struct dom_sid *nc_sid,
+ const char *nc_dn_str,
+ struct dreplsrv_partition **_p)
+{
+ struct dreplsrv_partition *p;
+ bool valid_sid, valid_guid;
+
+ SMB_ASSERT(_p);
+
+ valid_sid = nc_sid && !is_null_sid(nc_sid);
+ valid_guid = nc_guid && !GUID_all_zero(nc_guid);
+
+ if (!valid_sid && !valid_guid && (!nc_dn_str)) {
+ return WERR_DS_DRA_BAD_NC;
+ }
+
+ for (p = s->partitions; p; p = p->next) {
+ if ((valid_guid && GUID_equal(&p->nc.guid, nc_guid))
+ || strequal(p->nc.dn, nc_dn_str)
+ || (valid_sid && dom_sid_equal(&p->nc.sid, nc_sid)))
+ {
+ /* fill in he right guid and sid if possible */
+ if (nc_guid && !valid_guid) {
+ dsdb_get_extended_dn_guid(p->dn, nc_guid, "GUID");
+ }
+ if (nc_sid && !valid_sid) {
+ dsdb_get_extended_dn_sid(p->dn, nc_sid, "SID");
+ }
+ *_p = p;
+ return WERR_OK;
+ }
+ }
+
+ return WERR_DS_DRA_BAD_NC;
+}
+
+WERROR dreplsrv_partition_source_dsa_by_guid(struct dreplsrv_partition *p,
+ const struct GUID *dsa_guid,
+ struct dreplsrv_partition_source_dsa **_dsa)
+{
+ struct dreplsrv_partition_source_dsa *dsa;
+
+ SMB_ASSERT(dsa_guid != NULL);
+ SMB_ASSERT(!GUID_all_zero(dsa_guid));
+ SMB_ASSERT(_dsa);
+
+ for (dsa = p->sources; dsa; dsa = dsa->next) {
+ if (GUID_equal(dsa_guid, &dsa->repsFrom1->source_dsa_obj_guid)) {
+ *_dsa = dsa;
+ return WERR_OK;
+ }
+ }
+
+ return WERR_DS_DRA_NO_REPLICA;
+}
+
+WERROR dreplsrv_partition_source_dsa_by_dns(const struct dreplsrv_partition *p,
+ const char *dsa_dns,
+ struct dreplsrv_partition_source_dsa **_dsa)
+{
+ struct dreplsrv_partition_source_dsa *dsa;
+
+ SMB_ASSERT(dsa_dns != NULL);
+ SMB_ASSERT(_dsa);
+
+ for (dsa = p->sources; dsa; dsa = dsa->next) {
+ if (strequal(dsa_dns, dsa->repsFrom1->other_info->dns_name)) {
+ *_dsa = dsa;
+ return WERR_OK;
+ }
+ }
+
+ return WERR_DS_DRA_NO_REPLICA;
+}
+
+
+/*
+ create a temporary dsa structure for a replication. This is needed
+ for the initial replication of a new partition, such as when a new
+ domain NC is created and we are a global catalog server
+ */
+WERROR dreplsrv_partition_source_dsa_temporary(struct dreplsrv_partition *p,
+ TALLOC_CTX *mem_ctx,
+ const struct GUID *dsa_guid,
+ struct dreplsrv_partition_source_dsa **_dsa)
+{
+ struct dreplsrv_partition_source_dsa *dsa;
+ WERROR werr;
+
+ dsa = talloc_zero(mem_ctx, struct dreplsrv_partition_source_dsa);
+ W_ERROR_HAVE_NO_MEMORY(dsa);
+
+ dsa->partition = p;
+ dsa->repsFrom1 = &dsa->_repsFromBlob.ctr.ctr1;
+ dsa->repsFrom1->replica_flags = 0;
+ dsa->repsFrom1->source_dsa_obj_guid = *dsa_guid;
+
+ dsa->repsFrom1->other_info = talloc_zero(dsa, struct repsFromTo1OtherInfo);
+ W_ERROR_HAVE_NO_MEMORY(dsa->repsFrom1->other_info);
+
+ dsa->repsFrom1->other_info->dns_name = samdb_ntds_msdcs_dns_name(p->service->samdb,
+ dsa->repsFrom1->other_info, dsa_guid);
+ W_ERROR_HAVE_NO_MEMORY(dsa->repsFrom1->other_info->dns_name);
+
+ werr = dreplsrv_out_connection_attach(p->service, dsa->repsFrom1, &dsa->conn);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,(__location__ ": Failed to attach connection to %s\n",
+ ldb_dn_get_linearized(p->dn)));
+ talloc_free(dsa);
+ return werr;
+ }
+
+ *_dsa = dsa;
+
+ return WERR_OK;
+}
+
+
+static WERROR dreplsrv_refresh_partition(struct dreplsrv_service *s,
+ struct dreplsrv_partition *p)
+{
+ WERROR status;
+ NTSTATUS ntstatus;
+ struct ldb_message_element *orf_el = NULL;
+ struct ldb_result *r = NULL;
+ unsigned int i;
+ int ret;
+ TALLOC_CTX *mem_ctx = talloc_new(p);
+ static const char *attrs[] = {
+ "repsFrom",
+ "repsTo",
+ NULL
+ };
+ struct ldb_dn *dn;
+
+ DEBUG(4, ("dreplsrv_refresh_partition(%s)\n",
+ ldb_dn_get_linearized(p->dn)));
+
+ ret = dsdb_search_dn(s->samdb, mem_ctx, &r, p->dn, attrs, DSDB_SEARCH_SHOW_EXTENDED_DN);
+ if (ret == LDB_ERR_NO_SUCH_OBJECT) {
+ /* we haven't replicated the partition yet, but we
+ * can fill in the guid, sid etc from the partition DN */
+ dn = p->dn;
+ } else if (ret != LDB_SUCCESS) {
+ talloc_free(mem_ctx);
+ return WERR_FOOBAR;
+ } else {
+ dn = r->msgs[0]->dn;
+ }
+
+ talloc_free(discard_const(p->nc.dn));
+ ZERO_STRUCT(p->nc);
+ p->nc.dn = ldb_dn_alloc_linearized(p, dn);
+ W_ERROR_HAVE_NO_MEMORY(p->nc.dn);
+ ntstatus = dsdb_get_extended_dn_guid(dn, &p->nc.guid, "GUID");
+ if (!NT_STATUS_IS_OK(ntstatus)) {
+ DEBUG(0,(__location__ ": unable to get GUID for %s: %s\n",
+ p->nc.dn, nt_errstr(ntstatus)));
+ talloc_free(mem_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+ dsdb_get_extended_dn_sid(dn, &p->nc.sid, "SID");
+
+ talloc_free(p->uptodatevector.cursors);
+ talloc_free(p->uptodatevector_ex.cursors);
+ ZERO_STRUCT(p->uptodatevector);
+ ZERO_STRUCT(p->uptodatevector_ex);
+
+ ret = dsdb_load_udv_v2(s->samdb, p->dn, p, &p->uptodatevector.cursors, &p->uptodatevector.count);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(4,(__location__ ": no UDV available for %s\n", ldb_dn_get_linearized(p->dn)));
+ }
+
+ status = WERR_OK;
+
+ if (r != NULL && (orf_el = ldb_msg_find_element(r->msgs[0], "repsFrom"))) {
+ for (i=0; i < orf_el->num_values; i++) {
+ status = dreplsrv_partition_add_source_dsa(s, p, &p->sources,
+ NULL, &orf_el->values[i]);
+ W_ERROR_NOT_OK_GOTO_DONE(status);
+ }
+ }
+
+ if (r != NULL && (orf_el = ldb_msg_find_element(r->msgs[0], "repsTo"))) {
+ for (i=0; i < orf_el->num_values; i++) {
+ status = dreplsrv_partition_add_source_dsa(s, p, &p->notifies,
+ p->sources, &orf_el->values[i]);
+ W_ERROR_NOT_OK_GOTO_DONE(status);
+ }
+ }
+
+done:
+ talloc_free(mem_ctx);
+ return status;
+}
+
+WERROR dreplsrv_refresh_partitions(struct dreplsrv_service *s)
+{
+ WERROR status;
+ struct dreplsrv_partition *p;
+
+ for (p = s->partitions; p; p = p->next) {
+ status = dreplsrv_refresh_partition(s, p);
+ W_ERROR_NOT_OK_RETURN(status);
+ }
+
+ return WERR_OK;
+}
diff --git a/source4/dsdb/repl/drepl_periodic.c b/source4/dsdb/repl/drepl_periodic.c
new file mode 100644
index 0000000..4cdc8cb
--- /dev/null
+++ b/source4/dsdb/repl/drepl_periodic.c
@@ -0,0 +1,157 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service periodic handling
+
+ Copyright (C) Stefan Metzmacher 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "lib/events/events.h"
+#include "dsdb/samdb/samdb.h"
+#include "auth/auth.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+static void dreplsrv_periodic_run(struct dreplsrv_service *service);
+
+static void dreplsrv_periodic_handler_te(struct tevent_context *ev, struct tevent_timer *te,
+ struct timeval t, void *ptr)
+{
+ struct dreplsrv_service *service = talloc_get_type(ptr, struct dreplsrv_service);
+ WERROR status;
+
+ service->periodic.te = NULL;
+
+ dreplsrv_periodic_run(service);
+
+ status = dreplsrv_periodic_schedule(service, service->periodic.interval);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(service->task, win_errstr(status), false);
+ return;
+ }
+}
+
+WERROR dreplsrv_periodic_schedule(struct dreplsrv_service *service, uint32_t next_interval)
+{
+ TALLOC_CTX *tmp_mem;
+ struct tevent_timer *new_te;
+ struct timeval next_time;
+
+ /* prevent looping */
+ if (next_interval == 0) next_interval = 1;
+
+ next_time = timeval_current_ofs(next_interval, 50);
+
+ if (service->periodic.te) {
+ /*
+ * if the timestamp of the new event is higher,
+ * as current next we don't need to reschedule
+ */
+ if (timeval_compare(&next_time, &service->periodic.next_event) > 0) {
+ return WERR_OK;
+ }
+ }
+
+ /* reset the next scheduled timestamp */
+ service->periodic.next_event = next_time;
+
+ new_te = tevent_add_timer(service->task->event_ctx, service,
+ service->periodic.next_event,
+ dreplsrv_periodic_handler_te, service);
+ W_ERROR_HAVE_NO_MEMORY(new_te);
+
+ tmp_mem = talloc_new(service);
+ DEBUG(4,("dreplsrv_periodic_schedule(%u) %sscheduled for: %s\n",
+ next_interval,
+ (service->periodic.te?"re":""),
+ nt_time_string(tmp_mem, timeval_to_nttime(&next_time))));
+ talloc_free(tmp_mem);
+
+ talloc_free(service->periodic.te);
+ service->periodic.te = new_te;
+
+ return WERR_OK;
+}
+
+static void dreplsrv_periodic_run(struct dreplsrv_service *service)
+{
+ TALLOC_CTX *mem_ctx;
+
+ DEBUG(4,("dreplsrv_periodic_run(): schedule pull replication\n"));
+
+ /*
+ * KCC or some administrative tool
+ * might have changed Topology graph
+ * i.e. repsFrom/repsTo
+ */
+ dreplsrv_refresh_partitions(service);
+
+ mem_ctx = talloc_new(service);
+ dreplsrv_schedule_pull_replication(service, mem_ctx);
+ talloc_free(mem_ctx);
+
+ DEBUG(4,("dreplsrv_periodic_run(): run pending_ops memory=%u\n",
+ (unsigned)talloc_total_blocks(service)));
+
+ dreplsrv_ridalloc_check_rid_pool(service);
+
+ dreplsrv_run_pending_ops(service);
+}
+
+/*
+ run the next pending op, either a notify or a pull
+ */
+void dreplsrv_run_pending_ops(struct dreplsrv_service *s)
+{
+ if (!s->ops.notifies && !s->ops.pending) {
+ return;
+ }
+ if (!s->ops.notifies ||
+ (s->ops.pending &&
+ s->ops.notifies->schedule_time > s->ops.pending->schedule_time)) {
+ dreplsrv_run_pull_ops(s);
+ } else {
+ dreplsrv_notify_run_ops(s);
+ }
+}
+
+static void dreplsrv_pending_pull_handler_im(struct tevent_context *ev,
+ struct tevent_immediate *im,
+ void *ptr)
+{
+ struct dreplsrv_service *service = talloc_get_type(ptr, struct dreplsrv_service);
+
+ dreplsrv_run_pull_ops(service);
+}
+
+void dreplsrv_pendingops_schedule_pull_now(struct dreplsrv_service *service)
+{
+ tevent_schedule_immediate(service->pending.im, service->task->event_ctx,
+ dreplsrv_pending_pull_handler_im,
+ service);
+
+ return;
+}
+
diff --git a/source4/dsdb/repl/drepl_replica.c b/source4/dsdb/repl/drepl_replica.c
new file mode 100644
index 0000000..05d0683
--- /dev/null
+++ b/source4/dsdb/repl/drepl_replica.c
@@ -0,0 +1,62 @@
+/*
+ Unix SMB/CIFS Implementation.
+
+ DSDB replication service - DsReplica{Add,Del,Mod} handling
+
+ Copyright (C) Andrew Tridgell 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "ldb_module.h"
+#include "dsdb/samdb/samdb.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include "param/param.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+/*
+ implement DsReplicaAdd (forwarded from DRS server)
+ */
+NTSTATUS drepl_replica_add(struct dreplsrv_service *service,
+ struct drsuapi_DsReplicaAdd *r)
+{
+ NDR_PRINT_FUNCTION_DEBUG(drsuapi_DsReplicaAdd, NDR_IN, r);
+ return NT_STATUS_NOT_IMPLEMENTED;
+}
+
+/*
+ implement DsReplicaDel (forwarded from DRS server)
+ */
+NTSTATUS drepl_replica_del(struct dreplsrv_service *service,
+ struct drsuapi_DsReplicaDel *r)
+{
+ NDR_PRINT_FUNCTION_DEBUG(drsuapi_DsReplicaDel, NDR_IN, r);
+ return NT_STATUS_NOT_IMPLEMENTED;
+}
+
+/*
+ implement DsReplicaMod (forwarded from DRS server)
+ */
+NTSTATUS drepl_replica_mod(struct dreplsrv_service *service,
+ struct drsuapi_DsReplicaMod *r)
+{
+ NDR_PRINT_FUNCTION_DEBUG(drsuapi_DsReplicaMod, NDR_IN, r);
+ return NT_STATUS_NOT_IMPLEMENTED;
+}
diff --git a/source4/dsdb/repl/drepl_ridalloc.c b/source4/dsdb/repl/drepl_ridalloc.c
new file mode 100644
index 0000000..6794d4b
--- /dev/null
+++ b/source4/dsdb/repl/drepl_ridalloc.c
@@ -0,0 +1,265 @@
+/*
+ Unix SMB/CIFS Implementation.
+
+ DSDB replication service - RID allocation code
+
+ Copyright (C) Andrew Tridgell 2010
+ Copyright (C) Andrew Bartlett 2010
+
+ based on drepl_notify.c
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "ldb_module.h"
+#include "dsdb/samdb/samdb.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include "param/param.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+/*
+ called when a rid allocation request has completed
+ */
+static void drepl_new_rid_pool_callback(struct dreplsrv_service *service,
+ WERROR werr,
+ enum drsuapi_DsExtendedError ext_err,
+ void *cb_data)
+{
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,(__location__ ": RID Manager failed RID allocation - %s - extended_ret[0x%X]\n",
+ win_errstr(werr), ext_err));
+ } else {
+ DEBUG(3,(__location__ ": RID Manager completed RID allocation OK\n"));
+ }
+
+ service->rid_alloc_in_progress = false;
+}
+
+/*
+ schedule a getncchanges request to the RID Manager to ask for a new
+ set of RIDs using DRSUAPI_EXOP_FSMO_RID_ALLOC
+ */
+static WERROR drepl_request_new_rid_pool(struct dreplsrv_service *service,
+ struct ldb_dn *rid_manager_dn, struct ldb_dn *fsmo_role_dn,
+ uint64_t alloc_pool)
+{
+ WERROR werr = drepl_request_extended_op(service,
+ rid_manager_dn,
+ fsmo_role_dn,
+ DRSUAPI_EXOP_FSMO_RID_ALLOC,
+ alloc_pool,
+ 0,
+ drepl_new_rid_pool_callback, NULL);
+ if (W_ERROR_IS_OK(werr)) {
+ service->rid_alloc_in_progress = true;
+ }
+ return werr;
+}
+
+
+/*
+ see if we are on the last pool we have
+ */
+static int drepl_ridalloc_pool_exhausted(struct ldb_context *ldb,
+ bool *exhausted,
+ uint64_t *_alloc_pool)
+{
+ struct ldb_dn *server_dn, *machine_dn, *rid_set_dn;
+ TALLOC_CTX *tmp_ctx = talloc_new(ldb);
+ uint64_t alloc_pool;
+ uint64_t prev_pool;
+ uint32_t prev_pool_lo, prev_pool_hi;
+ uint32_t next_rid;
+ static const char * const attrs[] = {
+ "rIDAllocationPool",
+ "rIDPreviousAllocationPool",
+ "rIDNextRid",
+ NULL
+ };
+ int ret;
+ struct ldb_result *res;
+
+ *exhausted = false;
+ *_alloc_pool = UINT64_MAX;
+
+ server_dn = ldb_dn_get_parent(tmp_ctx, samdb_ntds_settings_dn(ldb, tmp_ctx));
+ if (!server_dn) {
+ talloc_free(tmp_ctx);
+ return ldb_operr(ldb);
+ }
+
+ ret = samdb_reference_dn(ldb, tmp_ctx, server_dn, "serverReference", &machine_dn);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find serverReference in %s - %s\n",
+ ldb_dn_get_linearized(server_dn), ldb_errstring(ldb)));
+ talloc_free(tmp_ctx);
+ return ret;
+ }
+
+ ret = samdb_reference_dn(ldb, tmp_ctx, machine_dn, "rIDSetReferences", &rid_set_dn);
+ if (ret == LDB_ERR_NO_SUCH_ATTRIBUTE) {
+ *exhausted = true;
+ *_alloc_pool = 0;
+ talloc_free(tmp_ctx);
+ return LDB_SUCCESS;
+ }
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find rIDSetReferences in %s - %s\n",
+ ldb_dn_get_linearized(machine_dn), ldb_errstring(ldb)));
+ talloc_free(tmp_ctx);
+ return ret;
+ }
+
+ ret = ldb_search(ldb, tmp_ctx, &res, rid_set_dn, LDB_SCOPE_BASE, attrs, NULL);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to load RID Set attrs from %s - %s\n",
+ ldb_dn_get_linearized(rid_set_dn), ldb_errstring(ldb)));
+ talloc_free(tmp_ctx);
+ return ret;
+ }
+
+ alloc_pool = ldb_msg_find_attr_as_uint64(res->msgs[0], "rIDAllocationPool", 0);
+ prev_pool = ldb_msg_find_attr_as_uint64(res->msgs[0], "rIDPreviousAllocationPool", 0);
+ prev_pool_lo = prev_pool & 0xFFFFFFFF;
+ prev_pool_hi = prev_pool >> 32;
+ next_rid = ldb_msg_find_attr_as_uint(res->msgs[0], "rIDNextRid", 0);
+
+ if (alloc_pool != prev_pool) {
+ talloc_free(tmp_ctx);
+ return LDB_SUCCESS;
+ }
+
+ if (next_rid < (prev_pool_hi + prev_pool_lo)/2) {
+ talloc_free(tmp_ctx);
+ return LDB_SUCCESS;
+ }
+
+ *exhausted = true;
+ *_alloc_pool = alloc_pool;
+ talloc_free(tmp_ctx);
+ return LDB_SUCCESS;
+}
+
+
+/*
+ see if we are low on RIDs in the RID Set rIDAllocationPool. If we
+ are, then schedule a replication call with DRSUAPI_EXOP_FSMO_RID_ALLOC
+ to the RID Manager
+ */
+WERROR dreplsrv_ridalloc_check_rid_pool(struct dreplsrv_service *service)
+{
+ struct ldb_dn *rid_manager_dn, *fsmo_role_dn;
+ TALLOC_CTX *tmp_ctx = talloc_new(service);
+ struct ldb_context *ldb = service->samdb;
+ bool exhausted;
+ WERROR werr;
+ int ret;
+ uint64_t alloc_pool;
+ bool is_us;
+
+ if (service->am_rodc) {
+ talloc_free(tmp_ctx);
+ return WERR_OK;
+ }
+
+ if (service->rid_alloc_in_progress) {
+ talloc_free(tmp_ctx);
+ return WERR_OK;
+ }
+
+ /*
+ steps:
+ - find who the RID Manager is
+ - if we are the RID Manager then nothing to do
+ - find our RID Set object
+ - load rIDAllocationPool and rIDPreviousAllocationPool
+ - if rIDAllocationPool != rIDPreviousAllocationPool then
+ nothing to do
+ - schedule a getncchanges with DRSUAPI_EXOP_FSMO_RID_ALLOC
+ to the RID Manager
+ */
+
+ /* work out who is the RID Manager */
+ ret = samdb_rid_manager_dn(ldb, tmp_ctx, &rid_manager_dn);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0, (__location__ ": Failed to find RID Manager object - %s\n", ldb_errstring(ldb)));
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ /* find the DN of the RID Manager */
+ ret = samdb_reference_dn(ldb, tmp_ctx, rid_manager_dn, "fSMORoleOwner", &fsmo_role_dn);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find fSMORoleOwner in RID Manager object - %s\n",
+ ldb_errstring(ldb)));
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ ret = samdb_dn_is_our_ntdsa(ldb, fsmo_role_dn, &is_us);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find detrmine if %s is our ntdsDsa object - %s\n",
+ ldb_dn_get_linearized(fsmo_role_dn), ldb_errstring(ldb)));
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ if (is_us) {
+ /* we are the RID Manager - no need to do a
+ DRSUAPI_EXOP_FSMO_RID_ALLOC */
+ talloc_free(tmp_ctx);
+ return WERR_OK;
+ }
+
+ ret = drepl_ridalloc_pool_exhausted(ldb, &exhausted, &alloc_pool);
+ if (ret != LDB_SUCCESS) {
+ talloc_free(tmp_ctx);
+ return WERR_DS_DRA_INTERNAL_ERROR;
+ }
+
+ if (!exhausted) {
+ /* don't need a new pool */
+ talloc_free(tmp_ctx);
+ return WERR_OK;
+ }
+
+ DEBUG(2,(__location__ ": Requesting more RIDs from RID Manager\n"));
+
+ werr = drepl_request_new_rid_pool(service, rid_manager_dn, fsmo_role_dn, alloc_pool);
+ talloc_free(tmp_ctx);
+ return werr;
+}
+
+/* called by the samldb ldb module to tell us to ask for a new RID
+ pool */
+void dreplsrv_allocate_rid(struct imessaging_context *msg,
+ void *private_data,
+ uint32_t msg_type,
+ struct server_id server_id,
+ size_t num_fds,
+ int *fds,
+ DATA_BLOB *data)
+{
+ struct dreplsrv_service *service = talloc_get_type(private_data, struct dreplsrv_service);
+ if (num_fds != 0) {
+ DBG_WARNING("Received %zu fds, ignoring message\n", num_fds);
+ return;
+ }
+ dreplsrv_ridalloc_check_rid_pool(service);
+}
diff --git a/source4/dsdb/repl/drepl_secret.c b/source4/dsdb/repl/drepl_secret.c
new file mode 100644
index 0000000..47a8ca9
--- /dev/null
+++ b/source4/dsdb/repl/drepl_secret.c
@@ -0,0 +1,146 @@
+/*
+ Unix SMB/CIFS Implementation.
+
+ DSDB replication service - repl secret handling
+
+ Copyright (C) Andrew Tridgell 2010
+ Copyright (C) Andrew Bartlett 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "ldb_module.h"
+#include "dsdb/samdb/samdb.h"
+#include "samba/service.h"
+#include "dsdb/repl/drepl_service.h"
+#include "param/param.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+struct repl_secret_state {
+ const char *user_dn;
+};
+
+/*
+ called when a repl secret has completed
+ */
+static void drepl_repl_secret_callback(struct dreplsrv_service *service,
+ WERROR werr,
+ enum drsuapi_DsExtendedError ext_err,
+ void *cb_data)
+{
+ struct repl_secret_state *state = talloc_get_type_abort(cb_data, struct repl_secret_state);
+ if (!W_ERROR_IS_OK(werr)) {
+ if (W_ERROR_EQUAL(werr, WERR_DS_DRA_SECRETS_DENIED)) {
+ DEBUG(3,(__location__ ": repl secret disallowed for user "
+ "%s - not in allowed replication group\n",
+ state->user_dn));
+ } else {
+ DEBUG(3,(__location__ ": repl secret failed for user %s - %s: extended_ret[0x%X]\n",
+ state->user_dn, win_errstr(werr), ext_err));
+ }
+ } else {
+ DEBUG(3,(__location__ ": repl secret completed OK for '%s'\n", state->user_dn));
+ }
+ talloc_free(state);
+}
+
+
+/**
+ * Called when the auth code wants us to try and replicate
+ * a users secrets
+ */
+void drepl_repl_secret(struct dreplsrv_service *service,
+ const char *user_dn)
+{
+ WERROR werr;
+ struct ldb_dn *nc_dn, *nc_root, *source_dsa_dn;
+ struct dreplsrv_partition *p;
+ struct GUID *source_dsa_guid;
+ struct repl_secret_state *state;
+ int ret;
+
+ state = talloc_zero(service, struct repl_secret_state);
+ if (state == NULL) {
+ /* nothing to do, no return value */
+ return;
+ }
+
+ /* keep a copy for logging in the callback */
+ state->user_dn = talloc_strdup(state, user_dn);
+
+ nc_dn = ldb_dn_new(state, service->samdb, user_dn);
+ if (!ldb_dn_validate(nc_dn)) {
+ DEBUG(0,(__location__ ": Failed to parse user_dn '%s'\n", user_dn));
+ talloc_free(state);
+ return;
+ }
+
+ /* work out which partition this is in */
+ ret = dsdb_find_nc_root(service->samdb, state, nc_dn, &nc_root);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to find nc_root for user_dn '%s'\n", user_dn));
+ talloc_free(state);
+ return;
+ }
+
+ /* find the partition in our list */
+ for (p=service->partitions; p; p=p->next) {
+ if (ldb_dn_compare(p->dn, nc_root) == 0) {
+ break;
+ }
+ }
+ if (p == NULL) {
+ DEBUG(0,(__location__ ": Failed to find partition for nc_root '%s'\n", ldb_dn_get_linearized(nc_root)));
+ talloc_free(state);
+ return;
+ }
+
+ if (p->sources == NULL) {
+ DEBUG(0,(__location__ ": No sources for nc_root '%s' for user_dn '%s'\n",
+ ldb_dn_get_linearized(nc_root), user_dn));
+ talloc_free(state);
+ return;
+ }
+
+ /* use the first source, for no particularly good reason */
+ source_dsa_guid = &p->sources->repsFrom1->source_dsa_obj_guid;
+
+ source_dsa_dn = ldb_dn_new(state, service->samdb,
+ talloc_asprintf(state, "<GUID=%s>",
+ GUID_string(state, source_dsa_guid)));
+ if (!ldb_dn_validate(source_dsa_dn)) {
+ DEBUG(0,(__location__ ": Invalid source DSA GUID '%s' for user_dn '%s'\n",
+ GUID_string(state, source_dsa_guid), user_dn));
+ talloc_free(state);
+ return;
+ }
+
+ werr = drepl_request_extended_op(service,
+ nc_dn,
+ source_dsa_dn,
+ DRSUAPI_EXOP_REPL_SECRET,
+ 0,
+ p->sources->repsFrom1->highwatermark.highest_usn,
+ drepl_repl_secret_callback, state);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(2,(__location__ ": Failed to setup secret replication for user_dn '%s'\n", user_dn));
+ talloc_free(state);
+ return;
+ }
+ DEBUG(3,(__location__ ": started secret replication for %s\n", user_dn));
+}
diff --git a/source4/dsdb/repl/drepl_service.c b/source4/dsdb/repl/drepl_service.c
new file mode 100644
index 0000000..02ece26
--- /dev/null
+++ b/source4/dsdb/repl/drepl_service.c
@@ -0,0 +1,545 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service
+
+ Copyright (C) Stefan Metzmacher 2007
+ Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "dsdb/samdb/samdb.h"
+#include "auth/auth.h"
+#include "samba/service.h"
+#include "lib/events/events.h"
+#include "dsdb/repl/drepl_service.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "librpc/gen_ndr/ndr_irpc.h"
+#include "param/param.h"
+#include "libds/common/roles.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+/**
+ * Call-back data for _drepl_replica_sync_done_cb()
+ */
+struct drepl_replica_sync_cb_data {
+ struct irpc_message *msg;
+ struct drsuapi_DsReplicaSync *r;
+
+ /* number of ops left to be completed */
+ int ops_count;
+
+ /* last failure error code */
+ WERROR werr_last_failure;
+};
+
+
+static WERROR dreplsrv_init_creds(struct dreplsrv_service *service)
+{
+ service->system_session_info = system_session(service->task->lp_ctx);
+ if (service->system_session_info == NULL) {
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ return WERR_OK;
+}
+
+static WERROR dreplsrv_connect_samdb(struct dreplsrv_service *service, struct loadparm_context *lp_ctx)
+{
+ const struct GUID *ntds_guid;
+ struct drsuapi_DsBindInfo28 *bind_info28;
+
+ service->samdb = samdb_connect(service,
+ service->task->event_ctx,
+ lp_ctx,
+ service->system_session_info,
+ NULL,
+ 0);
+ if (!service->samdb) {
+ return WERR_DS_UNAVAILABLE;
+ }
+
+ ntds_guid = samdb_ntds_objectGUID(service->samdb);
+ if (!ntds_guid) {
+ return WERR_DS_UNAVAILABLE;
+ }
+ service->ntds_guid = *ntds_guid;
+
+ if (samdb_rodc(service->samdb, &service->am_rodc) != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed to determine RODC status\n"));
+ return WERR_DS_UNAVAILABLE;
+ }
+
+ bind_info28 = &service->bind_info28;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_BASE;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7;
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT;
+#if 0 /* we don't support XPRESS compression yet */
+ bind_info28->supported_extensions |= DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS;
+#endif
+ /* TODO: fill in site_guid */
+ bind_info28->site_guid = GUID_zero();
+ /* TODO: find out how this is really triggered! */
+ bind_info28->pid = 0;
+ bind_info28->repl_epoch = 0;
+
+ return WERR_OK;
+}
+
+
+/**
+ * Callback for dreplsrv_out_operation operation completion.
+ *
+ * We just need to complete a waiting IRPC message here.
+ * In case pull operation has failed,
+ * caller of this callback will dump
+ * failure information.
+ *
+ * NOTE: cb_data is allocated in IRPC msg's context
+ * and will be freed during irpc_send_reply() call.
+ */
+static void _drepl_replica_sync_done_cb(struct dreplsrv_service *service,
+ WERROR werr,
+ enum drsuapi_DsExtendedError ext_err,
+ void *cb_data)
+{
+ struct drepl_replica_sync_cb_data *data = talloc_get_type(cb_data,
+ struct drepl_replica_sync_cb_data);
+ struct irpc_message *msg = data->msg;
+ struct drsuapi_DsReplicaSync *r = data->r;
+
+ /* store last bad result */
+ if (!W_ERROR_IS_OK(werr)) {
+ data->werr_last_failure = werr;
+ }
+
+ /* decrement pending ops count */
+ data->ops_count--;
+
+ if (data->ops_count == 0) {
+ /* Return result to client */
+ r->out.result = data->werr_last_failure;
+
+ /* complete IRPC message */
+ irpc_send_reply(msg, NT_STATUS_OK);
+ }
+}
+
+/**
+ * Helper to schedule a replication operation with a source DSA.
+ * If 'data' is valid pointer, then a callback
+ * for the operation is passed and 'data->msg' is
+ * marked as 'deferred' - defer_reply = true
+ */
+static WERROR _drepl_schedule_replication(struct dreplsrv_service *service,
+ struct dreplsrv_partition_source_dsa *dsa,
+ struct drsuapi_DsReplicaObjectIdentifier *nc,
+ uint32_t rep_options,
+ struct drepl_replica_sync_cb_data *data,
+ TALLOC_CTX *mem_ctx)
+{
+ WERROR werr;
+ dreplsrv_extended_callback_t fn_callback = NULL;
+
+ if (data) {
+ fn_callback = _drepl_replica_sync_done_cb;
+ }
+
+ /* schedule replication item */
+ werr = dreplsrv_schedule_partition_pull_source(service, dsa, rep_options,
+ DRSUAPI_EXOP_NONE, 0,
+ fn_callback, data);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,("%s: failed setup of sync of partition (%s, %s, %s) - %s\n",
+ __FUNCTION__,
+ GUID_string(mem_ctx, &nc->guid),
+ nc->dn,
+ dsa->repsFrom1->other_info->dns_name,
+ win_errstr(werr)));
+ return werr;
+ }
+ /* log we've scheduled a replication item */
+ DEBUG(3,("%s: forcing sync of partition (%s, %s, %s)\n",
+ __FUNCTION__,
+ GUID_string(mem_ctx, &nc->guid),
+ nc->dn,
+ dsa->repsFrom1->other_info->dns_name));
+
+ /* mark IRPC message as deferred if necessary */
+ if (data) {
+ data->ops_count++;
+ data->msg->defer_reply = true;
+ }
+
+ return WERR_OK;
+}
+
+/*
+ DsReplicaSync messages from the DRSUAPI server are forwarded here
+ */
+static NTSTATUS drepl_replica_sync(struct irpc_message *msg,
+ struct drsuapi_DsReplicaSync *r)
+{
+ WERROR werr;
+ struct dreplsrv_partition *p;
+ struct drepl_replica_sync_cb_data *cb_data;
+ struct dreplsrv_partition_source_dsa *dsa;
+ struct drsuapi_DsReplicaSyncRequest1 *req1;
+ struct drsuapi_DsReplicaObjectIdentifier *nc;
+ struct dreplsrv_service *service = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+
+#define REPLICA_SYNC_FAIL(_msg, _werr) do {\
+ if (!W_ERROR_IS_OK(_werr)) { \
+ DEBUG(0,(__location__ ": Failure - %s. werr = %s\n", \
+ _msg, win_errstr(_werr))); \
+ NDR_PRINT_IN_DEBUG(drsuapi_DsReplicaSync, r); \
+ } \
+ r->out.result = _werr; \
+ goto done;\
+ } while(0)
+
+
+ if (r->in.level != 1) {
+ REPLICA_SYNC_FAIL("Unsupported level",
+ WERR_DS_DRA_INVALID_PARAMETER);
+ }
+
+ req1 = &r->in.req->req1;
+ nc = req1->naming_context;
+
+ /* Check input parameters */
+ if (!nc) {
+ REPLICA_SYNC_FAIL("Invalid Naming Context",
+ WERR_DS_DRA_INVALID_PARAMETER);
+ }
+
+ /* Find Naming context to be synchronized */
+ werr = dreplsrv_partition_find_for_nc(service,
+ &nc->guid, &nc->sid, nc->dn,
+ &p);
+ if (!W_ERROR_IS_OK(werr)) {
+ REPLICA_SYNC_FAIL("Failed to find requested Naming Context",
+ werr);
+ }
+
+ /* should we process it asynchronously? */
+ if (req1->options & DRSUAPI_DRS_ASYNC_OP) {
+ cb_data = NULL;
+ } else {
+ cb_data = talloc_zero(msg, struct drepl_replica_sync_cb_data);
+ if (!cb_data) {
+ REPLICA_SYNC_FAIL("Not enough memory",
+ WERR_DS_DRA_INTERNAL_ERROR);
+ }
+
+ cb_data->msg = msg;
+ cb_data->r = r;
+ cb_data->werr_last_failure = WERR_OK;
+ }
+
+ /* collect source DSAs to sync with */
+ if (req1->options & DRSUAPI_DRS_SYNC_ALL) {
+ for (dsa = p->sources; dsa; dsa = dsa->next) {
+ /* schedule replication item */
+ werr = _drepl_schedule_replication(service, dsa, nc,
+ req1->options, cb_data, msg);
+ if (!W_ERROR_IS_OK(werr)) {
+ REPLICA_SYNC_FAIL("_drepl_schedule_replication() failed",
+ werr);
+ }
+ }
+ } else {
+ if (req1->options & DRSUAPI_DRS_SYNC_BYNAME) {
+ /* client should pass at least valid string */
+ if (!req1->source_dsa_dns) {
+ REPLICA_SYNC_FAIL("'source_dsa_dns' is not valid",
+ WERR_DS_DRA_INVALID_PARAMETER);
+ }
+
+ werr = dreplsrv_partition_source_dsa_by_dns(p,
+ req1->source_dsa_dns,
+ &dsa);
+ } else {
+ /* client should pass at least some GUID */
+ if (GUID_all_zero(&req1->source_dsa_guid)) {
+ REPLICA_SYNC_FAIL("'source_dsa_guid' is not valid",
+ WERR_DS_DRA_INVALID_PARAMETER);
+ }
+
+ werr = dreplsrv_partition_source_dsa_by_guid(p,
+ &req1->source_dsa_guid,
+ &dsa);
+ if (W_ERROR_EQUAL(werr, WERR_DS_DRA_NO_REPLICA)) {
+ /* we don't have this source setup as
+ a replication partner. Create a
+ temporary dsa structure for this
+ replication */
+ werr = dreplsrv_partition_source_dsa_temporary(p,
+ msg,
+ &req1->source_dsa_guid,
+ &dsa);
+ }
+ }
+ if (!W_ERROR_IS_OK(werr)) {
+ REPLICA_SYNC_FAIL("Failed to locate source DSA for given NC",
+ werr);
+ }
+
+ /* schedule replication item */
+ werr = _drepl_schedule_replication(service, dsa, nc,
+ req1->options, cb_data, msg);
+ if (!W_ERROR_IS_OK(werr)) {
+ REPLICA_SYNC_FAIL("_drepl_schedule_replication() failed",
+ werr);
+ }
+ }
+
+ /* if we got here, everything is OK */
+ r->out.result = WERR_OK;
+
+ /*
+ * schedule replication event to force
+ * replication as soon as possible
+ */
+ dreplsrv_pendingops_schedule_pull_now(service);
+
+done:
+ return NT_STATUS_OK;
+}
+
+/**
+ * Called when drplsrv should refresh its state.
+ * For example, when KCC change topology, dreplsrv
+ * should update its cache
+ *
+ * @param partition_dn If not empty/NULL, partition to update
+ */
+static NTSTATUS dreplsrv_refresh(struct irpc_message *msg,
+ struct dreplsrv_refresh *r)
+{
+ struct dreplsrv_service *s = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+
+ r->out.result = dreplsrv_refresh_partitions(s);
+
+ return NT_STATUS_OK;
+}
+
+/**
+ * Called when the auth code wants us to try and replicate
+ * a users secrets
+ */
+static NTSTATUS drepl_trigger_repl_secret(struct irpc_message *msg,
+ struct drepl_trigger_repl_secret *r)
+{
+ struct dreplsrv_service *service = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+
+
+ drepl_repl_secret(service, r->in.user_dn);
+
+ /* we are not going to be sending a reply to this request */
+ msg->no_reply = true;
+
+ return NT_STATUS_OK;
+}
+
+
+/*
+ DsReplicaAdd messages from the DRSUAPI server are forwarded here
+ */
+static NTSTATUS dreplsrv_replica_add(struct irpc_message *msg,
+ struct drsuapi_DsReplicaAdd *r)
+{
+ struct dreplsrv_service *service = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+ return drepl_replica_add(service, r);
+}
+
+/*
+ DsReplicaDel messages from the DRSUAPI server are forwarded here
+ */
+static NTSTATUS dreplsrv_replica_del(struct irpc_message *msg,
+ struct drsuapi_DsReplicaDel *r)
+{
+ struct dreplsrv_service *service = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+ return drepl_replica_del(service, r);
+}
+
+/*
+ DsReplicaMod messages from the DRSUAPI server are forwarded here
+ */
+static NTSTATUS dreplsrv_replica_mod(struct irpc_message *msg,
+ struct drsuapi_DsReplicaMod *r)
+{
+ struct dreplsrv_service *service = talloc_get_type(msg->private_data,
+ struct dreplsrv_service);
+ return drepl_replica_mod(service, r);
+}
+
+
+/*
+ startup the dsdb replicator service task
+*/
+static NTSTATUS dreplsrv_task_init(struct task_server *task)
+{
+ WERROR status;
+ struct dreplsrv_service *service;
+ uint32_t periodic_startup_interval;
+
+ switch (lpcfg_server_role(task->lp_ctx)) {
+ case ROLE_STANDALONE:
+ task_server_terminate(task, "dreplsrv: no DSDB replication required in standalone configuration",
+ false);
+ return NT_STATUS_INVALID_DOMAIN_ROLE;
+ case ROLE_DOMAIN_MEMBER:
+ task_server_terminate(task, "dreplsrv: no DSDB replication required in domain member configuration",
+ false);
+ return NT_STATUS_INVALID_DOMAIN_ROLE;
+ case ROLE_ACTIVE_DIRECTORY_DC:
+ /* Yes, we want DSDB replication */
+ break;
+ }
+
+ task_server_set_title(task, "task[dreplsrv]");
+
+ service = talloc_zero(task, struct dreplsrv_service);
+ if (!service) {
+ task_server_terminate(task, "dreplsrv_task_init: out of memory", true);
+ return NT_STATUS_NO_MEMORY;
+ }
+ service->task = task;
+ service->startup_time = timeval_current();
+ task->private_data = service;
+
+ status = dreplsrv_init_creds(service);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(task, talloc_asprintf(task,
+ "dreplsrv: Failed to obtain server credentials: %s\n",
+ win_errstr(status)), true);
+ return werror_to_ntstatus(status);
+ }
+
+ status = dreplsrv_connect_samdb(service, task->lp_ctx);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(task, talloc_asprintf(task,
+ "dreplsrv: Failed to connect to local samdb: %s\n",
+ win_errstr(status)), true);
+ return werror_to_ntstatus(status);
+ }
+
+ status = dreplsrv_load_partitions(service);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(task, talloc_asprintf(task,
+ "dreplsrv: Failed to load partitions: %s\n",
+ win_errstr(status)), true);
+ return werror_to_ntstatus(status);
+ }
+
+ periodic_startup_interval = lpcfg_parm_int(task->lp_ctx, NULL, "dreplsrv", "periodic_startup_interval", 15); /* in seconds */
+ service->periodic.interval = lpcfg_parm_int(task->lp_ctx, NULL, "dreplsrv", "periodic_interval", 300); /* in seconds */
+
+ status = dreplsrv_periodic_schedule(service, periodic_startup_interval);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(task, talloc_asprintf(task,
+ "dreplsrv: Failed to periodic schedule: %s\n",
+ win_errstr(status)), true);
+ return werror_to_ntstatus(status);
+ }
+
+ service->pending.im = tevent_create_immediate(service);
+ if (service->pending.im == NULL) {
+ task_server_terminate(task,
+ "dreplsrv: Failed to create immediate "
+ "task for future DsReplicaSync\n",
+ true);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ /* if we are a RODC then we do not send DSReplicaSync*/
+ if (!service->am_rodc) {
+ service->notify.interval = lpcfg_parm_int(task->lp_ctx, NULL, "dreplsrv",
+ "notify_interval", 5); /* in seconds */
+ status = dreplsrv_notify_schedule(service, service->notify.interval);
+ if (!W_ERROR_IS_OK(status)) {
+ task_server_terminate(task, talloc_asprintf(task,
+ "dreplsrv: Failed to setup notify schedule: %s\n",
+ win_errstr(status)), true);
+ return werror_to_ntstatus(status);
+ }
+ }
+
+ irpc_add_name(task->msg_ctx, "dreplsrv");
+
+ IRPC_REGISTER(task->msg_ctx, irpc, DREPLSRV_REFRESH, dreplsrv_refresh, service);
+ IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICASYNC, drepl_replica_sync, service);
+ IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICAADD, dreplsrv_replica_add, service);
+ IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICADEL, dreplsrv_replica_del, service);
+ IRPC_REGISTER(task->msg_ctx, drsuapi, DRSUAPI_DSREPLICAMOD, dreplsrv_replica_mod, service);
+ IRPC_REGISTER(task->msg_ctx, irpc, DREPL_TAKEFSMOROLE, drepl_take_FSMO_role, service);
+ IRPC_REGISTER(task->msg_ctx, irpc, DREPL_TRIGGER_REPL_SECRET, drepl_trigger_repl_secret, service);
+ imessaging_register(task->msg_ctx, service, MSG_DREPL_ALLOCATE_RID, dreplsrv_allocate_rid);
+
+ return NT_STATUS_OK;
+}
+
+/*
+ register ourselves as a available server
+*/
+NTSTATUS server_service_drepl_init(TALLOC_CTX *ctx)
+{
+ static const struct service_details details = {
+ .inhibit_fork_on_accept = true,
+ .inhibit_pre_fork = true,
+ .task_init = dreplsrv_task_init,
+ .post_fork = NULL,
+ };
+ return register_server_service(ctx, "drepl", &details);
+}
diff --git a/source4/dsdb/repl/drepl_service.h b/source4/dsdb/repl/drepl_service.h
new file mode 100644
index 0000000..6e57759
--- /dev/null
+++ b/source4/dsdb/repl/drepl_service.h
@@ -0,0 +1,251 @@
+/*
+ Unix SMB/CIFS Implementation.
+ DSDB replication service
+
+ Copyright (C) Stefan Metzmacher 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef _DSDB_REPL_DREPL_SERVICE_H_
+#define _DSDB_REPL_DREPL_SERVICE_H_
+
+#include "librpc/gen_ndr/ndr_drsuapi_c.h"
+
+struct dreplsrv_service;
+struct dreplsrv_partition;
+
+struct dreplsrv_drsuapi_connection {
+ /*
+ * this pipe pointer is also the indicator
+ * for a valid connection
+ */
+ struct dcerpc_pipe *pipe;
+ struct dcerpc_binding_handle *drsuapi_handle;
+
+ DATA_BLOB gensec_skey;
+ struct drsuapi_DsBindInfo28 remote_info28;
+ struct policy_handle bind_handle;
+};
+
+struct dreplsrv_out_connection {
+ struct dreplsrv_out_connection *prev, *next;
+
+ struct dreplsrv_service *service;
+
+ /*
+ * the binding for the outgoing connection
+ */
+ struct dcerpc_binding *binding;
+
+ /* the out going connection to the source dsa */
+ struct dreplsrv_drsuapi_connection *drsuapi;
+};
+
+struct dreplsrv_partition_source_dsa {
+ struct dreplsrv_partition_source_dsa *prev, *next;
+
+ struct dreplsrv_partition *partition;
+
+ /*
+ * the cached repsFrom value for this source dsa
+ *
+ * it needs to be updated after each DsGetNCChanges() call
+ * to the source dsa
+ *
+ * repsFrom1 == &_repsFromBlob.ctr.ctr1
+ */
+ struct repsFromToBlob _repsFromBlob;
+ struct repsFromTo1 *repsFrom1;
+
+ /* the last uSN when we sent a notify */
+ uint64_t notify_uSN;
+
+ /* the reference to the source_dsa and its outgoing connection */
+ struct dreplsrv_out_connection *conn;
+};
+
+struct dreplsrv_partition {
+ struct dreplsrv_partition *prev, *next;
+
+ struct dreplsrv_service *service;
+
+ /* the dn of the partition */
+ struct ldb_dn *dn;
+ struct drsuapi_DsReplicaObjectIdentifier nc;
+
+ /*
+ * uptodate vector needs to be updated before and after each DsGetNCChanges() call
+ *
+ * - before: we need to use our own invocationId together with our highestCommittedUSN
+ * - after: we need to merge in the remote uptodatevector, to avoid reading it again
+ */
+ struct replUpToDateVectorCtr2 uptodatevector;
+ struct drsuapi_DsReplicaCursorCtrEx uptodatevector_ex;
+
+ /*
+ * a linked list of all source dsa's we replicate from
+ */
+ struct dreplsrv_partition_source_dsa *sources;
+
+ /*
+ * a linked list of all source dsa's we will notify,
+ * that are not also in sources
+ */
+ struct dreplsrv_partition_source_dsa *notifies;
+
+ bool partial_replica;
+ bool rodc_replica;
+};
+
+typedef void (*dreplsrv_extended_callback_t)(struct dreplsrv_service *,
+ WERROR,
+ enum drsuapi_DsExtendedError,
+ void *cb_data);
+
+struct dreplsrv_out_operation {
+ struct dreplsrv_out_operation *prev, *next;
+ time_t schedule_time;
+
+ struct dreplsrv_service *service;
+
+ struct dreplsrv_partition_source_dsa *source_dsa;
+
+ /* replication options - currently used by DsReplicaSync */
+ uint32_t options;
+ enum drsuapi_DsExtendedOperation extended_op;
+ uint64_t fsmo_info;
+ enum drsuapi_DsExtendedError extended_ret;
+ dreplsrv_extended_callback_t callback;
+ void *cb_data;
+ /* more replication flags - used by DsReplicaSync GET_TGT */
+ uint32_t more_flags;
+};
+
+struct dreplsrv_notify_operation {
+ struct dreplsrv_notify_operation *prev, *next;
+ time_t schedule_time;
+
+ struct dreplsrv_service *service;
+ uint64_t uSN;
+
+ struct dreplsrv_partition_source_dsa *source_dsa;
+ bool is_urgent;
+ uint32_t replica_flags;
+};
+
+struct dreplsrv_service {
+ /* the whole drepl service is in one task */
+ struct task_server *task;
+
+ /* the time the service was started */
+ struct timeval startup_time;
+
+ /*
+ * system session info
+ * with machine account credentials
+ */
+ struct auth_session_info *system_session_info;
+
+ /*
+ * a connection to the local samdb
+ */
+ struct ldb_context *samdb;
+
+ /* the guid of our NTDS Settings object, which never changes! */
+ struct GUID ntds_guid;
+ /*
+ * the struct holds the values used for outgoing DsBind() calls,
+ * so that we need to set them up only once
+ */
+ struct drsuapi_DsBindInfo28 bind_info28;
+
+ /* some stuff for periodic processing */
+ struct {
+ /*
+ * the interval between to periodic runs
+ */
+ uint32_t interval;
+
+ /*
+ * the timestamp for the next event,
+ * this is the timstamp passed to event_add_timed()
+ */
+ struct timeval next_event;
+
+ /* here we have a reference to the timed event the schedules the periodic stuff */
+ struct tevent_timer *te;
+ } periodic;
+
+ /* some stuff for running only the incoming notify ops */
+ struct {
+ /*
+ * here we have a reference to the immidicate event that was
+ * scheduled from the DsReplicaSync
+ */
+ struct tevent_immediate *im;
+ } pending;
+
+ /* some stuff for notify processing */
+ struct {
+ /*
+ * the interval between notify runs
+ */
+ uint32_t interval;
+
+ /*
+ * the timestamp for the next event,
+ * this is the timstamp passed to event_add_timed()
+ */
+ struct timeval next_event;
+
+ /* here we have a reference to the timed event the schedules the notifies */
+ struct tevent_timer *te;
+ } notify;
+
+ /*
+ * the list of partitions we need to replicate
+ */
+ struct dreplsrv_partition *partitions;
+
+ /*
+ * the list of cached connections
+ */
+ struct dreplsrv_out_connection *connections;
+
+ struct {
+ /* the pointer to the current active operation */
+ struct dreplsrv_out_operation *current;
+
+ /* the list of pending operations */
+ struct dreplsrv_out_operation *pending;
+
+ /* the list of pending notify operations */
+ struct dreplsrv_notify_operation *notifies;
+
+ /* an active notify operation */
+ struct dreplsrv_notify_operation *n_current;
+ } ops;
+
+ bool rid_alloc_in_progress;
+
+ bool am_rodc;
+};
+
+#include "lib/messaging/irpc.h"
+#include "dsdb/repl/drepl_out_helpers.h"
+#include "dsdb/repl/drepl_service_proto.h"
+
+#endif /* _DSDB_REPL_DREPL_SERVICE_H_ */
diff --git a/source4/dsdb/repl/replicated_objects.c b/source4/dsdb/repl/replicated_objects.c
new file mode 100644
index 0000000..6a07a88
--- /dev/null
+++ b/source4/dsdb/repl/replicated_objects.c
@@ -0,0 +1,1283 @@
+/*
+ Unix SMB/CIFS Implementation.
+ Helper functions for applying replicated objects
+
+ Copyright (C) Stefan Metzmacher <metze@samba.org> 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "dsdb/samdb/samdb.h"
+#include <ldb_errors.h>
+#include "../lib/util/dlinklist.h"
+#include "librpc/gen_ndr/ndr_misc.h"
+#include "librpc/gen_ndr/ndr_drsuapi.h"
+#include "librpc/gen_ndr/ndr_drsblobs.h"
+#include "../libcli/drsuapi/drsuapi.h"
+#include "libcli/auth/libcli_auth.h"
+#include "param/param.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_DRS_REPL
+
+static WERROR dsdb_repl_merge_working_schema(struct ldb_context *ldb,
+ struct dsdb_schema *dest_schema,
+ const struct dsdb_schema *ref_schema)
+{
+ const struct dsdb_class *cur_class = NULL;
+ const struct dsdb_attribute *cur_attr = NULL;
+ int ret;
+
+ for (cur_class = ref_schema->classes;
+ cur_class;
+ cur_class = cur_class->next)
+ {
+ const struct dsdb_class *tmp1;
+ struct dsdb_class *tmp2;
+
+ tmp1 = dsdb_class_by_governsID_id(dest_schema,
+ cur_class->governsID_id);
+ if (tmp1 != NULL) {
+ continue;
+ }
+
+ /*
+ * Do a shallow copy so that original next and prev are
+ * not modified, we don't need to do a deep copy
+ * as the rest won't be modified and this is for
+ * a short lived object.
+ */
+ tmp2 = talloc(dest_schema, struct dsdb_class);
+ if (tmp2 == NULL) {
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+ *tmp2 = *cur_class;
+ DLIST_ADD(dest_schema->classes, tmp2);
+ }
+
+ for (cur_attr = ref_schema->attributes;
+ cur_attr;
+ cur_attr = cur_attr->next)
+ {
+ const struct dsdb_attribute *tmp1;
+ struct dsdb_attribute *tmp2;
+
+ tmp1 = dsdb_attribute_by_attributeID_id(dest_schema,
+ cur_attr->attributeID_id);
+ if (tmp1 != NULL) {
+ continue;
+ }
+
+ /*
+ * Do a shallow copy so that original next and prev are
+ * not modified, we don't need to do a deep copy
+ * as the rest won't be modified and this is for
+ * a short lived object.
+ */
+ tmp2 = talloc(dest_schema, struct dsdb_attribute);
+ if (tmp2 == NULL) {
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+ *tmp2 = *cur_attr;
+ DLIST_ADD(dest_schema->attributes, tmp2);
+ }
+
+ ret = dsdb_setup_sorted_accessors(ldb, dest_schema);
+ if (LDB_SUCCESS != ret) {
+ DEBUG(0,("Failed to add new attribute to reference schema!\n"));
+ return WERR_INTERNAL_ERROR;
+ }
+
+ return WERR_OK;
+}
+
+WERROR dsdb_repl_resolve_working_schema(struct ldb_context *ldb,
+ struct dsdb_schema_prefixmap *pfm_remote,
+ uint32_t cycle_before_switching,
+ struct dsdb_schema *initial_schema,
+ struct dsdb_schema *resulting_schema,
+ uint32_t object_count,
+ const struct drsuapi_DsReplicaObjectListItemEx *first_object)
+{
+ struct schema_list {
+ struct schema_list *next, *prev;
+ const struct drsuapi_DsReplicaObjectListItemEx *obj;
+ };
+ struct schema_list *schema_list = NULL, *schema_list_item, *schema_list_next_item;
+ WERROR werr;
+ struct dsdb_schema *working_schema;
+ const struct drsuapi_DsReplicaObjectListItemEx *cur;
+ DATA_BLOB empty_key = data_blob_null;
+ int ret, pass_no;
+ uint32_t ignore_attids[] = {
+ DRSUAPI_ATTID_auxiliaryClass,
+ DRSUAPI_ATTID_mayContain,
+ DRSUAPI_ATTID_mustContain,
+ DRSUAPI_ATTID_possSuperiors,
+ DRSUAPI_ATTID_systemPossSuperiors,
+ DRSUAPI_ATTID_INVALID
+ };
+ TALLOC_CTX *frame = talloc_stackframe();
+
+ /* create a list of objects yet to be converted */
+ for (cur = first_object; cur; cur = cur->next_object) {
+ schema_list_item = talloc(frame, struct schema_list);
+ if (schema_list_item == NULL) {
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ schema_list_item->obj = cur;
+ DLIST_ADD_END(schema_list, schema_list_item);
+ }
+
+ /* resolve objects until all are resolved and in local schema */
+ pass_no = 1;
+ working_schema = initial_schema;
+
+ while (schema_list) {
+ uint32_t converted_obj_count = 0;
+ uint32_t failed_obj_count = 0;
+
+ if (resulting_schema != working_schema) {
+ /*
+ * If the selfmade schema is not the schema used to
+ * translate and validate replicated object,
+ * Which means that we are using the bootstrap schema
+ * Then we add attributes and classes that were already
+ * translated to the working schema, the idea is that
+ * we might need to add new attributes and classes
+ * to be able to translate critical replicated objects
+ * and without that we wouldn't be able to translate them
+ */
+ werr = dsdb_repl_merge_working_schema(ldb,
+ working_schema,
+ resulting_schema);
+ if (!W_ERROR_IS_OK(werr)) {
+ talloc_free(frame);
+ return werr;
+ }
+ }
+
+ for (schema_list_item = schema_list;
+ schema_list_item;
+ schema_list_item=schema_list_next_item) {
+ struct dsdb_extended_replicated_object object;
+
+ cur = schema_list_item->obj;
+
+ /*
+ * Save the next item, now we have saved out
+ * the current one, so we can DLIST_REMOVE it
+ * safely
+ */
+ schema_list_next_item = schema_list_item->next;
+
+ /*
+ * Convert the objects into LDB messages using the
+ * schema we have so far. It's ok if we fail to convert
+ * an object. We should convert more objects on next pass.
+ */
+ werr = dsdb_convert_object_ex(ldb, working_schema,
+ NULL,
+ pfm_remote,
+ cur, &empty_key,
+ ignore_attids,
+ 0,
+ schema_list_item, &object);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(4,("debug: Failed to convert schema "
+ "object %s into ldb msg, "
+ "will try during next loop\n",
+ cur->object.identifier->dn));
+
+ failed_obj_count++;
+ } else {
+ /*
+ * Convert the schema from ldb_message format
+ * (OIDs as OID strings) into schema, using
+ * the remote prefixMap
+ *
+ * It's not likely, but possible to get the
+ * same object twice and we should keep
+ * the last instance.
+ */
+ werr = dsdb_schema_set_el_from_ldb_msg_dups(ldb,
+ resulting_schema,
+ object.msg,
+ true);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(4,("debug: failed to convert "
+ "object %s into a schema element, "
+ "will try during next loop: %s\n",
+ ldb_dn_get_linearized(object.msg->dn),
+ win_errstr(werr)));
+ failed_obj_count++;
+ } else {
+ DEBUG(8,("Converted object %s into a schema element\n",
+ ldb_dn_get_linearized(object.msg->dn)));
+ DLIST_REMOVE(schema_list, schema_list_item);
+ TALLOC_FREE(schema_list_item);
+ converted_obj_count++;
+ }
+ }
+ }
+
+ DEBUG(4,("Schema load pass %d: converted %d, %d of %d objects left to be converted.\n",
+ pass_no, converted_obj_count, failed_obj_count, object_count));
+
+ /* check if we converted any objects in this pass */
+ if (converted_obj_count == 0) {
+ DEBUG(0,("Can't continue Schema load: "
+ "didn't manage to convert any objects: "
+ "all %d remaining of %d objects "
+ "failed to convert\n",
+ failed_obj_count, object_count));
+ talloc_free(frame);
+ return WERR_INTERNAL_ERROR;
+ }
+
+ /*
+ * Don't try to load the schema if there is missing object
+ * _and_ we are on the first pass as some critical objects
+ * might be missing.
+ */
+ if (failed_obj_count == 0 || pass_no > cycle_before_switching) {
+ /* prepare for another cycle */
+ working_schema = resulting_schema;
+
+ ret = dsdb_setup_sorted_accessors(ldb, working_schema);
+ if (LDB_SUCCESS != ret) {
+ DEBUG(0,("Failed to create schema-cache indexes!\n"));
+ talloc_free(frame);
+ return WERR_INTERNAL_ERROR;
+ }
+ }
+ pass_no++;
+ }
+
+ talloc_free(frame);
+ return WERR_OK;
+}
+
+/**
+ * Multi-pass working schema creation
+ * Function will:
+ * - shallow copy initial schema supplied
+ * - create a working schema in multiple passes
+ * until all objects are resolved
+ * Working schema is a schema with Attributes, Classes
+ * and indexes, but w/o subClassOf, possibleSupperiors etc.
+ * It is to be used just us cache for converting attribute values.
+ */
+WERROR dsdb_repl_make_working_schema(struct ldb_context *ldb,
+ const struct dsdb_schema *initial_schema,
+ const struct drsuapi_DsReplicaOIDMapping_Ctr *mapping_ctr,
+ uint32_t object_count,
+ const struct drsuapi_DsReplicaObjectListItemEx *first_object,
+ const DATA_BLOB *gensec_skey,
+ TALLOC_CTX *mem_ctx,
+ struct dsdb_schema **_schema_out)
+{
+ WERROR werr;
+ struct dsdb_schema_prefixmap *pfm_remote;
+ uint32_t r;
+ struct dsdb_schema *working_schema;
+
+ /* make a copy of the iniatial_scheam so we don't mess with it */
+ working_schema = dsdb_schema_copy_shallow(mem_ctx, ldb, initial_schema);
+ if (!working_schema) {
+ DEBUG(0,(__location__ ": schema copy failed!\n"));
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+ working_schema->resolving_in_progress = true;
+
+ /* we are going to need remote prefixMap for decoding */
+ werr = dsdb_schema_pfm_from_drsuapi_pfm(mapping_ctr, true,
+ working_schema, &pfm_remote, NULL);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,(__location__ ": Failed to decode remote prefixMap: %s\n",
+ win_errstr(werr)));
+ talloc_free(working_schema);
+ return werr;
+ }
+
+ for (r=0; r < pfm_remote->length; r++) {
+ const struct dsdb_schema_prefixmap_oid *rm = &pfm_remote->prefixes[r];
+ bool found_oid = false;
+ uint32_t l;
+
+ for (l=0; l < working_schema->prefixmap->length; l++) {
+ const struct dsdb_schema_prefixmap_oid *lm = &working_schema->prefixmap->prefixes[l];
+ int cmp;
+
+ cmp = data_blob_cmp(&rm->bin_oid, &lm->bin_oid);
+ if (cmp == 0) {
+ found_oid = true;
+ break;
+ }
+ }
+
+ if (found_oid) {
+ continue;
+ }
+
+ /*
+ * We prefer the same is as we got from the remote peer
+ * if there's no conflict.
+ */
+ werr = dsdb_schema_pfm_add_entry(working_schema->prefixmap,
+ rm->bin_oid, &rm->id, NULL);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0,(__location__ ": Failed to merge remote prefixMap: %s",
+ win_errstr(werr)));
+ talloc_free(working_schema);
+ return werr;
+ }
+ }
+
+ werr = dsdb_repl_resolve_working_schema(ldb,
+ pfm_remote,
+ 0, /* cycle_before_switching */
+ working_schema,
+ working_schema,
+ object_count,
+ first_object);
+ if (!W_ERROR_IS_OK(werr)) {
+ DEBUG(0, ("%s: dsdb_repl_resolve_working_schema() failed: %s",
+ __location__, win_errstr(werr)));
+ talloc_free(working_schema);
+ return werr;
+ }
+
+ working_schema->resolving_in_progress = false;
+
+ *_schema_out = working_schema;
+
+ return WERR_OK;
+}
+
+static bool dsdb_attid_in_list(const uint32_t attid_list[], uint32_t attid)
+{
+ const uint32_t *cur;
+ if (!attid_list) {
+ return false;
+ }
+ for (cur = attid_list; *cur != DRSUAPI_ATTID_INVALID; cur++) {
+ if (*cur == attid) {
+ return true;
+ }
+ }
+ return false;
+}
+
+WERROR dsdb_convert_object_ex(struct ldb_context *ldb,
+ const struct dsdb_schema *schema,
+ struct ldb_dn *partition_dn,
+ const struct dsdb_schema_prefixmap *pfm_remote,
+ const struct drsuapi_DsReplicaObjectListItemEx *in,
+ const DATA_BLOB *gensec_skey,
+ const uint32_t *ignore_attids,
+ uint32_t dsdb_repl_flags,
+ TALLOC_CTX *mem_ctx,
+ struct dsdb_extended_replicated_object *out)
+{
+ WERROR status = WERR_OK;
+ uint32_t i;
+ struct ldb_message *msg;
+ struct replPropertyMetaDataBlob *md;
+ int instanceType;
+ struct ldb_message_element *instanceType_e = NULL;
+ NTTIME whenChanged = 0;
+ time_t whenChanged_t;
+ const char *whenChanged_s;
+ struct dom_sid *sid = NULL;
+ uint32_t rid = 0;
+ uint32_t attr_count;
+
+ if (!in->object.identifier) {
+ return WERR_FOOBAR;
+ }
+
+ if (!in->object.identifier->dn || !in->object.identifier->dn[0]) {
+ return WERR_FOOBAR;
+ }
+
+ if (in->object.attribute_ctr.num_attributes != 0 && !in->meta_data_ctr) {
+ return WERR_FOOBAR;
+ }
+
+ if (in->object.attribute_ctr.num_attributes != in->meta_data_ctr->count) {
+ return WERR_FOOBAR;
+ }
+
+ sid = &in->object.identifier->sid;
+ if (sid->num_auths > 0) {
+ rid = sid->sub_auths[sid->num_auths - 1];
+ }
+
+ msg = ldb_msg_new(mem_ctx);
+ W_ERROR_HAVE_NO_MEMORY(msg);
+
+ msg->dn = ldb_dn_new(msg, ldb, in->object.identifier->dn);
+ W_ERROR_HAVE_NO_MEMORY(msg->dn);
+
+ msg->num_elements = in->object.attribute_ctr.num_attributes;
+ msg->elements = talloc_array(msg, struct ldb_message_element,
+ msg->num_elements);
+ W_ERROR_HAVE_NO_MEMORY(msg->elements);
+
+ md = talloc(mem_ctx, struct replPropertyMetaDataBlob);
+ W_ERROR_HAVE_NO_MEMORY(md);
+
+ md->version = 1;
+ md->reserved = 0;
+ md->ctr.ctr1.count = in->meta_data_ctr->count;
+ md->ctr.ctr1.reserved = 0;
+ md->ctr.ctr1.array = talloc_array(mem_ctx,
+ struct replPropertyMetaData1,
+ md->ctr.ctr1.count);
+ W_ERROR_HAVE_NO_MEMORY(md->ctr.ctr1.array);
+
+ for (i=0, attr_count=0; i < in->meta_data_ctr->count; i++, attr_count++) {
+ struct drsuapi_DsReplicaAttribute *a;
+ struct drsuapi_DsReplicaMetaData *d;
+ struct replPropertyMetaData1 *m;
+ struct ldb_message_element *e;
+ uint32_t j;
+
+ a = &in->object.attribute_ctr.attributes[i];
+ d = &in->meta_data_ctr->meta_data[i];
+ m = &md->ctr.ctr1.array[attr_count];
+ e = &msg->elements[attr_count];
+
+ if (dsdb_attid_in_list(ignore_attids, a->attid)) {
+ attr_count--;
+ continue;
+ }
+
+ if (GUID_all_zero(&d->originating_invocation_id)) {
+ status = WERR_DS_SRC_GUID_MISMATCH;
+ DEBUG(0, ("Refusing replication of object containing invalid zero invocationID on attribute %d of %s: %s\n",
+ a->attid,
+ ldb_dn_get_linearized(msg->dn),
+ win_errstr(status)));
+ return status;
+ }
+
+ if (a->attid == DRSUAPI_ATTID_instanceType) {
+ if (instanceType_e != NULL) {
+ return WERR_FOOBAR;
+ }
+ instanceType_e = e;
+ }
+
+ for (j=0; j<a->value_ctr.num_values; j++) {
+ status = drsuapi_decrypt_attribute(a->value_ctr.values[j].blob,
+ gensec_skey, rid,
+ dsdb_repl_flags, a);
+ if (!W_ERROR_IS_OK(status)) {
+ break;
+ }
+ }
+ if (W_ERROR_EQUAL(status, WERR_TOO_MANY_SECRETS)) {
+ WERROR get_name_status = dsdb_attribute_drsuapi_to_ldb(ldb, schema, pfm_remote,
+ a, msg->elements, e, NULL);
+ if (W_ERROR_IS_OK(get_name_status)) {
+ DEBUG(0, ("Unxpectedly got secret value %s on %s from DRS server\n",
+ e->name, ldb_dn_get_linearized(msg->dn)));
+ } else {
+ DEBUG(0, ("Unxpectedly got secret value on %s from DRS server",
+ ldb_dn_get_linearized(msg->dn)));
+ }
+ } else if (!W_ERROR_IS_OK(status)) {
+ return status;
+ }
+
+ /*
+ * This function also fills in the local attid value,
+ * based on comparing the remote and local prefixMap
+ * tables. If we don't convert the value, then we can
+ * have invalid values in the replPropertyMetaData we
+ * store on disk, as the prefixMap is per host, not
+ * per-domain. This may be why Microsoft added the
+ * msDS-IntID feature, however this is not used for
+ * extra attributes in the schema partition itself.
+ */
+ status = dsdb_attribute_drsuapi_to_ldb(ldb, schema, pfm_remote,
+ a, msg->elements, e,
+ &m->attid);
+ W_ERROR_NOT_OK_RETURN(status);
+
+ m->version = d->version;
+ m->originating_change_time = d->originating_change_time;
+ m->originating_invocation_id = d->originating_invocation_id;
+ m->originating_usn = d->originating_usn;
+ m->local_usn = 0;
+
+ if (a->attid == DRSUAPI_ATTID_name) {
+ const struct ldb_val *rdn_val = ldb_dn_get_rdn_val(msg->dn);
+ if (rdn_val == NULL) {
+ DEBUG(0, ("Unxpectedly unable to get RDN from %s for validation",
+ ldb_dn_get_linearized(msg->dn)));
+ return WERR_FOOBAR;
+ }
+ if (e->num_values != 1) {
+ DEBUG(0, ("Unxpectedly got wrong number of attribute values (got %u, expected 1) when checking RDN against name of %s",
+ e->num_values,
+ ldb_dn_get_linearized(msg->dn)));
+ return WERR_FOOBAR;
+ }
+ if (data_blob_cmp(rdn_val,
+ &e->values[0]) != 0) {
+ DEBUG(0, ("Unxpectedly got mismatching RDN values when checking RDN against name of %s",
+ ldb_dn_get_linearized(msg->dn)));
+ return WERR_FOOBAR;
+ }
+ }
+ if (d->originating_change_time > whenChanged) {
+ whenChanged = d->originating_change_time;
+ }
+
+ }
+
+ msg->num_elements = attr_count;
+ md->ctr.ctr1.count = attr_count;
+
+ if (instanceType_e == NULL) {
+ return WERR_FOOBAR;
+ }
+
+ instanceType = ldb_msg_find_attr_as_int(msg, "instanceType", 0);
+
+ if ((instanceType & INSTANCE_TYPE_IS_NC_HEAD)
+ && partition_dn != NULL) {
+ int partition_dn_cmp = ldb_dn_compare(partition_dn, msg->dn);
+ if (partition_dn_cmp != 0) {
+ DEBUG(4, ("Remote server advised us of a new partition %s while processing %s, ignoring\n",
+ ldb_dn_get_linearized(msg->dn),
+ ldb_dn_get_linearized(partition_dn)));
+ return WERR_DS_ADD_REPLICA_INHIBITED;
+ }
+ }
+
+ if (dsdb_repl_flags & DSDB_REPL_FLAG_PARTIAL_REPLICA) {
+ /* the instanceType type for partial_replica
+ replication is sent via DRS with TYPE_WRITE set, but
+ must be used on the client with TYPE_WRITE removed
+ */
+ if (instanceType & INSTANCE_TYPE_WRITE) {
+ /*
+ * Make sure we do not change the order
+ * of msg->elements!
+ *
+ * That's why we use
+ * instanceType_e->num_values = 0
+ * instead of
+ * ldb_msg_remove_attr(msg, "instanceType");
+ */
+ struct ldb_message_element *e;
+
+ e = ldb_msg_find_element(msg, "instanceType");
+ if (e != instanceType_e) {
+ DEBUG(0,("instanceType_e[%p] changed to e[%p]\n",
+ instanceType_e, e));
+ return WERR_FOOBAR;
+ }
+
+ instanceType_e->num_values = 0;
+
+ instanceType &= ~INSTANCE_TYPE_WRITE;
+ if (ldb_msg_add_fmt(msg, "instanceType", "%d", instanceType) != LDB_SUCCESS) {
+ return WERR_INTERNAL_ERROR;
+ }
+ }
+ } else {
+ if (!(instanceType & INSTANCE_TYPE_WRITE)) {
+ DBG_ERR("Refusing to replicate %s from a read-only "
+ "replica into a read-write replica!\n",
+ ldb_dn_get_linearized(msg->dn));
+ return WERR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA;
+ }
+ }
+
+ whenChanged_t = nt_time_to_unix(whenChanged);
+ whenChanged_s = ldb_timestring(msg, whenChanged_t);
+ W_ERROR_HAVE_NO_MEMORY(whenChanged_s);
+
+ out->object_guid = in->object.identifier->guid;
+
+ if (in->parent_object_guid == NULL) {
+ out->parent_guid = NULL;
+ } else {
+ out->parent_guid = talloc(mem_ctx, struct GUID);
+ W_ERROR_HAVE_NO_MEMORY(out->parent_guid);
+ *out->parent_guid = *in->parent_object_guid;
+ }
+
+ out->msg = msg;
+ out->when_changed = whenChanged_s;
+ out->meta_data = md;
+ return WERR_OK;
+}
+
+WERROR dsdb_replicated_objects_convert(struct ldb_context *ldb,
+ const struct dsdb_schema *schema,
+ struct ldb_dn *partition_dn,
+ const struct drsuapi_DsReplicaOIDMapping_Ctr *mapping_ctr,
+ uint32_t object_count,
+ const struct drsuapi_DsReplicaObjectListItemEx *first_object,
+ uint32_t linked_attributes_count,
+ const struct drsuapi_DsReplicaLinkedAttribute *linked_attributes,
+ const struct repsFromTo1 *source_dsa,
+ const struct drsuapi_DsReplicaCursor2CtrEx *uptodateness_vector,
+ const DATA_BLOB *gensec_skey,
+ uint32_t dsdb_repl_flags,
+ TALLOC_CTX *mem_ctx,
+ struct dsdb_extended_replicated_objects **objects)
+{
+ WERROR status;
+ struct dsdb_schema_prefixmap *pfm_remote;
+ struct dsdb_extended_replicated_objects *out;
+ const struct drsuapi_DsReplicaObjectListItemEx *cur;
+ struct dsdb_syntax_ctx syntax_ctx;
+ uint32_t i;
+
+ out = talloc_zero(mem_ctx, struct dsdb_extended_replicated_objects);
+ W_ERROR_HAVE_NO_MEMORY(out);
+ out->version = DSDB_EXTENDED_REPLICATED_OBJECTS_VERSION;
+ out->dsdb_repl_flags = dsdb_repl_flags;
+
+ /*
+ * Ensure schema is kept valid for as long as 'out'
+ * which may contain pointers to it
+ */
+ schema = talloc_reference(out, schema);
+ W_ERROR_HAVE_NO_MEMORY(schema);
+
+ status = dsdb_schema_pfm_from_drsuapi_pfm(mapping_ctr, true,
+ out, &pfm_remote, NULL);
+ if (!W_ERROR_IS_OK(status)) {
+ DEBUG(0,(__location__ ": Failed to decode remote prefixMap: %s\n",
+ win_errstr(status)));
+ talloc_free(out);
+ return status;
+ }
+
+ /* use default syntax conversion context */
+ dsdb_syntax_ctx_init(&syntax_ctx, ldb, schema);
+ syntax_ctx.pfm_remote = pfm_remote;
+
+ if (ldb_dn_compare(partition_dn, ldb_get_schema_basedn(ldb)) != 0) {
+ /*
+ * check for schema changes in case
+ * we are not replicating Schema NC
+ */
+ status = dsdb_schema_info_cmp(schema, mapping_ctr);
+ if (!W_ERROR_IS_OK(status)) {
+ DEBUG(4,("Can't replicate %s because remote schema has changed since we last replicated the schema\n",
+ ldb_dn_get_linearized(partition_dn)));
+ talloc_free(out);
+ return status;
+ }
+ }
+
+ out->partition_dn = partition_dn;
+
+ out->source_dsa = source_dsa;
+ out->uptodateness_vector= uptodateness_vector;
+
+ out->num_objects = 0;
+ out->objects = talloc_array(out,
+ struct dsdb_extended_replicated_object,
+ object_count);
+ W_ERROR_HAVE_NO_MEMORY_AND_FREE(out->objects, out);
+
+ for (i=0, cur = first_object; cur; cur = cur->next_object, i++) {
+ if (i == object_count) {
+ talloc_free(out);
+ return WERR_FOOBAR;
+ }
+
+ status = dsdb_convert_object_ex(ldb, schema, out->partition_dn,
+ pfm_remote,
+ cur, gensec_skey,
+ NULL,
+ dsdb_repl_flags,
+ out->objects,
+ &out->objects[out->num_objects]);
+
+ /*
+ * Check to see if we have been advised of a
+ * subdomain or new application partition. We don't
+ * want to start on that here, instead the caller
+ * should consider if it would like to replicate it
+ * based on the cross-ref object.
+ */
+ if (W_ERROR_EQUAL(status, WERR_DS_ADD_REPLICA_INHIBITED)) {
+ struct GUID_txt_buf guid_str;
+ DBG_ERR("Ignoring object outside partition %s %s: %s\n",
+ GUID_buf_string(&cur->object.identifier->guid,
+ &guid_str),
+ cur->object.identifier->dn,
+ win_errstr(status));
+ continue;
+ }
+
+ if (!W_ERROR_IS_OK(status)) {
+ talloc_free(out);
+ DEBUG(0,("Failed to convert object %s: %s\n",
+ cur->object.identifier->dn,
+ win_errstr(status)));
+ return status;
+ }
+
+ /* Assuming we didn't skip or error, increment the number of objects */
+ out->num_objects++;
+ }
+
+ DBG_INFO("Proceesed %"PRIu32" DRS objects, saw %"PRIu32" objects "
+ "and expected %"PRIu32" objects\n",
+ out->num_objects, i, object_count);
+
+ out->objects = talloc_realloc(out, out->objects,
+ struct dsdb_extended_replicated_object,
+ out->num_objects);
+ if (out->num_objects != 0 && out->objects == NULL) {
+ DBG_ERR("FAILURE: talloc_realloc() failed after "
+ "processing %"PRIu32" DRS objects!\n",
+ out->num_objects);
+ talloc_free(out);
+ return WERR_FOOBAR;
+ }
+ if (i != object_count) {
+ DBG_ERR("FAILURE: saw %"PRIu32" DRS objects, server said we "
+ "should expected to see %"PRIu32" objects!\n",
+ i, object_count);
+ talloc_free(out);
+ return WERR_FOOBAR;
+ }
+
+ out->linked_attributes = talloc_array(out,
+ struct drsuapi_DsReplicaLinkedAttribute,
+ linked_attributes_count);
+ W_ERROR_HAVE_NO_MEMORY_AND_FREE(out->linked_attributes, out);
+
+ for (i=0; i < linked_attributes_count; i++) {
+ const struct drsuapi_DsReplicaLinkedAttribute *ra = &linked_attributes[i];
+ struct drsuapi_DsReplicaLinkedAttribute *la = &out->linked_attributes[i];
+
+ if (ra->identifier == NULL) {
+ talloc_free(out);
+ return WERR_BAD_NET_RESP;
+ }
+
+ *la = *ra;
+
+ la->identifier = talloc_zero(out->linked_attributes,
+ struct drsuapi_DsReplicaObjectIdentifier);
+ W_ERROR_HAVE_NO_MEMORY_AND_FREE(la->identifier, out);
+
+ /*
+ * We typically only get the guid filled
+ * and the repl_meta_data module only cares abouf
+ * the guid.
+ */
+ la->identifier->guid = ra->identifier->guid;
+
+ if (ra->value.blob != NULL) {
+ la->value.blob = talloc_zero(out->linked_attributes,
+ DATA_BLOB);
+ W_ERROR_HAVE_NO_MEMORY_AND_FREE(la->value.blob, out);
+
+ if (ra->value.blob->length != 0) {
+ *la->value.blob = data_blob_dup_talloc(la->value.blob,
+ *ra->value.blob);
+ W_ERROR_HAVE_NO_MEMORY_AND_FREE(la->value.blob->data, out);
+ }
+ }
+
+ status = dsdb_attribute_drsuapi_remote_to_local(&syntax_ctx,
+ ra->attid,
+ &la->attid,
+ NULL);
+ if (!W_ERROR_IS_OK(status)) {
+ DEBUG(0,(__location__": linked_attribute[%u] attid 0x%08X not found: %s\n",
+ i, ra->attid, win_errstr(status)));
+ return status;
+ }
+ }
+
+ out->linked_attributes_count = linked_attributes_count;
+
+ /* free pfm_remote, we won't need it anymore */
+ talloc_free(pfm_remote);
+
+ *objects = out;
+ return WERR_OK;
+}
+
+/**
+ * Commits a list of replicated objects.
+ *
+ * @param working_schema dsdb_schema to be used for resolving
+ * Classes/Attributes during Schema replication. If not NULL,
+ * it will be set on ldb and used while committing replicated objects
+ */
+WERROR dsdb_replicated_objects_commit(struct ldb_context *ldb,
+ struct dsdb_schema *working_schema,
+ struct dsdb_extended_replicated_objects *objects,
+ uint64_t *notify_uSN)
+{
+ WERROR werr;
+ struct ldb_result *ext_res;
+ struct dsdb_schema *cur_schema = NULL;
+ struct dsdb_schema *new_schema = NULL;
+ int ret;
+ uint64_t seq_num1, seq_num2;
+ bool used_global_schema = false;
+
+ TALLOC_CTX *tmp_ctx = talloc_new(objects);
+ if (!tmp_ctx) {
+ DEBUG(0,("Failed to start talloc\n"));
+ return WERR_NOT_ENOUGH_MEMORY;
+ }
+
+ /* wrap the extended operation in a transaction
+ See [MS-DRSR] 3.3.2 Transactions
+ */
+ ret = ldb_transaction_start(ldb);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ " Failed to start transaction: %s\n",
+ ldb_errstring(ldb)));
+ return WERR_FOOBAR;
+ }
+
+ ret = dsdb_load_partition_usn(ldb, objects->partition_dn, &seq_num1, NULL);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ " Failed to load partition uSN\n"));
+ ldb_transaction_cancel(ldb);
+ TALLOC_FREE(tmp_ctx);
+ return WERR_FOOBAR;
+ }
+
+ /*
+ * Set working_schema for ldb in case we are replicating from Schema NC.
+ * Schema won't be reloaded during Replicated Objects commit, as it is
+ * done in a transaction. So we need some way to search for newly
+ * added Classes and Attributes
+ */
+ if (working_schema) {
+ /* store current schema so we can fall back in case of failure */
+ cur_schema = dsdb_get_schema(ldb, tmp_ctx);
+ used_global_schema = dsdb_uses_global_schema(ldb);
+
+ ret = dsdb_reference_schema(ldb, working_schema, SCHEMA_MEMORY_ONLY);
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ "Failed to reference working schema - %s\n",
+ ldb_strerror(ret)));
+ /* TODO: Map LDB Error to NTSTATUS? */
+ ldb_transaction_cancel(ldb);
+ TALLOC_FREE(tmp_ctx);
+ return WERR_INTERNAL_ERROR;
+ }
+ }
+
+ ret = ldb_extended(ldb, DSDB_EXTENDED_REPLICATED_OBJECTS_OID, objects, &ext_res);
+ if (ret != LDB_SUCCESS) {
+ /* restore previous schema */
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ } else if (cur_schema) {
+ dsdb_reference_schema(ldb, cur_schema, SCHEMA_MEMORY_ONLY);
+ }
+
+ if (W_ERROR_EQUAL(objects->error, WERR_DS_DRA_RECYCLED_TARGET)) {
+ DEBUG(3,("Missing target while attempting to apply records: %s\n",
+ ldb_errstring(ldb)));
+ } else if (W_ERROR_EQUAL(objects->error, WERR_DS_DRA_MISSING_PARENT)) {
+ DEBUG(3,("Missing parent while attempting to apply records: %s\n",
+ ldb_errstring(ldb)));
+ } else {
+ DEBUG(1,("Failed to apply records: %s: %s\n",
+ ldb_errstring(ldb), ldb_strerror(ret)));
+ }
+ ldb_transaction_cancel(ldb);
+ TALLOC_FREE(tmp_ctx);
+
+ if (!W_ERROR_IS_OK(objects->error)) {
+ return objects->error;
+ }
+ return WERR_FOOBAR;
+ }
+ talloc_free(ext_res);
+
+ /* Save our updated prefixMap and check the schema is good. */
+ if (working_schema) {
+ struct ldb_result *ext_res_2;
+
+ werr = dsdb_write_prefixes_from_schema_to_ldb(working_schema,
+ ldb,
+ working_schema);
+ if (!W_ERROR_IS_OK(werr)) {
+ /* restore previous schema */
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ } else if (cur_schema ) {
+ dsdb_reference_schema(ldb,
+ cur_schema,
+ SCHEMA_MEMORY_ONLY);
+ }
+ DEBUG(0,("Failed to save updated prefixMap: %s\n",
+ win_errstr(werr)));
+ ldb_transaction_cancel(ldb);
+ TALLOC_FREE(tmp_ctx);
+ return werr;
+ }
+
+ /*
+ * Use dsdb_schema_from_db through dsdb extended to check we
+ * can load the schema currently sitting in the transaction.
+ * We need this check because someone might have written to
+ * the schema or prefixMap before we started the transaction,
+ * which may have caused corruption.
+ */
+ ret = ldb_extended(ldb, DSDB_EXTENDED_SCHEMA_LOAD,
+ NULL, &ext_res_2);
+
+ if (ret != LDB_SUCCESS) {
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ } else if (cur_schema) {
+ dsdb_reference_schema(ldb, cur_schema, SCHEMA_MEMORY_ONLY);
+ }
+ DEBUG(0,("Corrupt schema write attempt detected, "
+ "aborting schema modification operation.\n"
+ "This probably happened due to bad timing of "
+ "another schema edit: %s (%s)\n",
+ ldb_errstring(ldb),
+ ldb_strerror(ret)));
+ ldb_transaction_cancel(ldb);
+ TALLOC_FREE(tmp_ctx);
+ return WERR_FOOBAR;
+ }
+ }
+
+ ret = ldb_transaction_prepare_commit(ldb);
+ if (ret != LDB_SUCCESS) {
+ /* restore previous schema */
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ } else if (cur_schema ) {
+ dsdb_reference_schema(ldb, cur_schema, SCHEMA_MEMORY_ONLY);
+ }
+ DBG_ERR(" Failed to prepare commit of transaction: %s (%s)\n",
+ ldb_errstring(ldb),
+ ldb_strerror(ret));
+ TALLOC_FREE(tmp_ctx);
+ return WERR_FOOBAR;
+ }
+
+ ret = dsdb_load_partition_usn(ldb, objects->partition_dn, &seq_num2, NULL);
+ if (ret != LDB_SUCCESS) {
+ /* restore previous schema */
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ } else if (cur_schema ) {
+ dsdb_reference_schema(ldb, cur_schema, SCHEMA_MEMORY_ONLY);
+ }
+ DEBUG(0,(__location__ " Failed to load partition uSN\n"));
+ ldb_transaction_cancel(ldb);
+ TALLOC_FREE(tmp_ctx);
+ return WERR_FOOBAR;
+ }
+
+ ret = ldb_transaction_commit(ldb);
+ if (ret != LDB_SUCCESS) {
+ /* restore previous schema */
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ } else if (cur_schema ) {
+ dsdb_reference_schema(ldb, cur_schema, SCHEMA_MEMORY_ONLY);
+ }
+ DEBUG(0,(__location__ " Failed to commit transaction\n"));
+ TALLOC_FREE(tmp_ctx);
+ return WERR_FOOBAR;
+ }
+
+ if (seq_num1 > *notify_uSN) {
+ /*
+ * A notify was already required before
+ * the current transaction.
+ */
+ } else if (objects->originating_updates) {
+ /*
+ * Applying the replicated changes
+ * required originating updates,
+ * so a notify is required.
+ */
+ } else {
+ /*
+ * There's no need to notify the
+ * server about the change we just from it.
+ */
+ *notify_uSN = seq_num2;
+ }
+
+ /*
+ * Reset the Schema used by ldb. This will lead to
+ * a schema cache being refreshed from database.
+ */
+ if (working_schema) {
+ /* Reload the schema */
+ new_schema = dsdb_get_schema(ldb, tmp_ctx);
+ /* TODO:
+ * If dsdb_get_schema() fails, we just fall back
+ * to what we had. However, the database is probably
+ * unable to operate for other users from this
+ * point... */
+ if (new_schema == NULL || new_schema == working_schema) {
+ DBG_ERR("Failed to re-load schema after commit of "
+ "transaction (working: %p/%"PRIu64", new: "
+ "%p/%"PRIu64")\n", new_schema,
+ new_schema != NULL ?
+ new_schema->metadata_usn : 0,
+ working_schema, working_schema->metadata_usn);
+ dsdb_reference_schema(ldb, cur_schema, SCHEMA_MEMORY_ONLY);
+ if (used_global_schema) {
+ dsdb_set_global_schema(ldb);
+ }
+ TALLOC_FREE(tmp_ctx);
+ return WERR_INTERNAL_ERROR;
+ } else if (used_global_schema) {
+ dsdb_make_schema_global(ldb, new_schema);
+ }
+ }
+
+ DEBUG(2,("Replicated %u objects (%u linked attributes) for %s\n",
+ objects->num_objects, objects->linked_attributes_count,
+ ldb_dn_get_linearized(objects->partition_dn)));
+
+ TALLOC_FREE(tmp_ctx);
+ return WERR_OK;
+}
+
+static WERROR dsdb_origin_object_convert(struct ldb_context *ldb,
+ const struct dsdb_schema *schema,
+ const struct drsuapi_DsReplicaObjectListItem *in,
+ TALLOC_CTX *mem_ctx,
+ struct ldb_message **_msg)
+{
+ WERROR status;
+ unsigned int i;
+ struct ldb_message *msg;
+
+ if (!in->object.identifier) {
+ return WERR_FOOBAR;
+ }
+
+ if (!in->object.identifier->dn || !in->object.identifier->dn[0]) {
+ return WERR_FOOBAR;
+ }
+
+ msg = ldb_msg_new(mem_ctx);
+ W_ERROR_HAVE_NO_MEMORY(msg);
+
+ msg->dn = ldb_dn_new(msg, ldb, in->object.identifier->dn);
+ W_ERROR_HAVE_NO_MEMORY(msg->dn);
+
+ msg->num_elements = in->object.attribute_ctr.num_attributes;
+ msg->elements = talloc_array(msg, struct ldb_message_element,
+ msg->num_elements);
+ W_ERROR_HAVE_NO_MEMORY(msg->elements);
+
+ for (i=0; i < msg->num_elements; i++) {
+ struct drsuapi_DsReplicaAttribute *a;
+ struct ldb_message_element *e;
+
+ a = &in->object.attribute_ctr.attributes[i];
+ e = &msg->elements[i];
+
+ status = dsdb_attribute_drsuapi_to_ldb(ldb, schema, schema->prefixmap,
+ a, msg->elements, e, NULL);
+ W_ERROR_NOT_OK_RETURN(status);
+ }
+
+
+ *_msg = msg;
+
+ return WERR_OK;
+}
+
+WERROR dsdb_origin_objects_commit(struct ldb_context *ldb,
+ TALLOC_CTX *mem_ctx,
+ const struct drsuapi_DsReplicaObjectListItem *first_object,
+ uint32_t *_num,
+ uint32_t dsdb_repl_flags,
+ struct drsuapi_DsReplicaObjectIdentifier2 **_ids)
+{
+ WERROR status;
+ const struct dsdb_schema *schema;
+ const struct drsuapi_DsReplicaObjectListItem *cur;
+ struct ldb_message **objects;
+ struct drsuapi_DsReplicaObjectIdentifier2 *ids;
+ uint32_t i;
+ uint32_t num_objects = 0;
+ const char * const attrs[] = {
+ "objectGUID",
+ "objectSid",
+ NULL
+ };
+ struct ldb_result *res;
+ int ret;
+
+ for (cur = first_object; cur; cur = cur->next_object) {
+ num_objects++;
+ }
+
+ if (num_objects == 0) {
+ return WERR_OK;
+ }
+
+ ret = ldb_transaction_start(ldb);
+ if (ret != LDB_SUCCESS) {
+ return WERR_DS_INTERNAL_FAILURE;
+ }
+
+ objects = talloc_array(mem_ctx, struct ldb_message *,
+ num_objects);
+ if (objects == NULL) {
+ status = WERR_NOT_ENOUGH_MEMORY;
+ goto cancel;
+ }
+
+ schema = dsdb_get_schema(ldb, objects);
+ if (!schema) {
+ return WERR_DS_SCHEMA_NOT_LOADED;
+ }
+
+ for (i=0, cur = first_object; cur; cur = cur->next_object, i++) {
+ status = dsdb_origin_object_convert(ldb, schema, cur,
+ objects, &objects[i]);
+ if (!W_ERROR_IS_OK(status)) {
+ goto cancel;
+ }
+ }
+
+ ids = talloc_array(mem_ctx,
+ struct drsuapi_DsReplicaObjectIdentifier2,
+ num_objects);
+ if (ids == NULL) {
+ status = WERR_NOT_ENOUGH_MEMORY;
+ goto cancel;
+ }
+
+ if (dsdb_repl_flags & DSDB_REPL_FLAG_ADD_NCNAME) {
+ /* check for possible NC creation */
+ for (i=0; i < num_objects; i++) {
+ struct ldb_message *msg = objects[i];
+ struct ldb_message_element *el;
+ struct ldb_dn *nc_dn;
+
+ if (ldb_msg_check_string_attribute(msg, "objectClass", "crossRef") == 0) {
+ continue;
+ }
+ el = ldb_msg_find_element(msg, "nCName");
+ if (el == NULL || el->num_values != 1) {
+ continue;
+ }
+ nc_dn = ldb_dn_from_ldb_val(objects, ldb, &el->values[0]);
+ if (!ldb_dn_validate(nc_dn)) {
+ continue;
+ }
+ ret = dsdb_create_partial_replica_NC(ldb, nc_dn);
+ if (ret != LDB_SUCCESS) {
+ status = WERR_DS_INTERNAL_FAILURE;
+ goto cancel;
+ }
+ }
+ }
+
+ for (i=0; i < num_objects; i++) {
+ struct dom_sid *sid = NULL;
+ struct ldb_request *add_req;
+
+ DEBUG(6,(__location__ ": adding %s\n",
+ ldb_dn_get_linearized(objects[i]->dn)));
+
+ ret = ldb_build_add_req(&add_req,
+ ldb,
+ objects,
+ objects[i],
+ NULL,
+ NULL,
+ ldb_op_default_callback,
+ NULL);
+ if (ret != LDB_SUCCESS) {
+ status = WERR_DS_INTERNAL_FAILURE;
+ goto cancel;
+ }
+
+ ret = ldb_request_add_control(add_req, LDB_CONTROL_RELAX_OID, true, NULL);
+ if (ret != LDB_SUCCESS) {
+ status = WERR_DS_INTERNAL_FAILURE;
+ goto cancel;
+ }
+
+ ret = ldb_request(ldb, add_req);
+ if (ret == LDB_SUCCESS) {
+ ret = ldb_wait(add_req->handle, LDB_WAIT_ALL);
+ }
+ if (ret != LDB_SUCCESS) {
+ DEBUG(0,(__location__ ": Failed add of %s - %s\n",
+ ldb_dn_get_linearized(objects[i]->dn), ldb_errstring(ldb)));
+ status = WERR_DS_INTERNAL_FAILURE;
+ goto cancel;
+ }
+
+ talloc_free(add_req);
+
+ ret = ldb_search(ldb, objects, &res, objects[i]->dn,
+ LDB_SCOPE_BASE, attrs,
+ "(objectClass=*)");
+ if (ret != LDB_SUCCESS) {
+ status = WERR_DS_INTERNAL_FAILURE;
+ goto cancel;
+ }
+ ids[i].guid = samdb_result_guid(res->msgs[0], "objectGUID");
+ sid = samdb_result_dom_sid(objects, res->msgs[0], "objectSid");
+ if (sid) {
+ ids[i].sid = *sid;
+ } else {
+ ZERO_STRUCT(ids[i].sid);
+ }
+ }
+
+ ret = ldb_transaction_commit(ldb);
+ if (ret != LDB_SUCCESS) {
+ return WERR_DS_INTERNAL_FAILURE;
+ }
+
+ talloc_free(objects);
+
+ *_num = num_objects;
+ *_ids = ids;
+ return WERR_OK;
+
+cancel:
+ talloc_free(objects);
+ ldb_transaction_cancel(ldb);
+ return status;
+}