summaryrefslogtreecommitdiffstats
path: root/source3/lib/dbwrap
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
commit8daa83a594a2e98f39d764422bfbdbc62c9efd44 (patch)
tree4099e8021376c7d8c05bdf8503093d80e9c7bad0 /source3/lib/dbwrap
parentInitial commit. (diff)
downloadsamba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.tar.xz
samba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.zip
Adding upstream version 2:4.20.0+dfsg.upstream/2%4.20.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'source3/lib/dbwrap')
-rw-r--r--source3/lib/dbwrap/dbwrap_ctdb.c1985
-rw-r--r--source3/lib/dbwrap/dbwrap_ctdb.h40
-rw-r--r--source3/lib/dbwrap/dbwrap_open.c197
-rw-r--r--source3/lib/dbwrap/dbwrap_open.h45
-rw-r--r--source3/lib/dbwrap/dbwrap_watch.c1285
-rw-r--r--source3/lib/dbwrap/dbwrap_watch.h45
6 files changed, 3597 insertions, 0 deletions
diff --git a/source3/lib/dbwrap/dbwrap_ctdb.c b/source3/lib/dbwrap/dbwrap_ctdb.c
new file mode 100644
index 0000000..46165e8
--- /dev/null
+++ b/source3/lib/dbwrap/dbwrap_ctdb.c
@@ -0,0 +1,1985 @@
+/*
+ Unix SMB/CIFS implementation.
+ Database interface wrapper around ctdbd
+ Copyright (C) Volker Lendecke 2007-2009
+ Copyright (C) Michael Adam 2009
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "lib/tdb_wrap/tdb_wrap.h"
+#include "util_tdb.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_ctdb.h"
+#include "dbwrap/dbwrap_rbt.h"
+#include "lib/param/param.h"
+
+#include "ctdb/include/ctdb_protocol.h"
+#include "ctdbd_conn.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_private.h"
+#include "dbwrap/dbwrap_ctdb.h"
+#include "g_lock.h"
+#include "messages.h"
+#include "messages_ctdb.h"
+#include "lib/cluster_support.h"
+#include "lib/util/tevent_ntstatus.h"
+
+struct db_ctdb_transaction_handle {
+ struct db_ctdb_ctx *ctx;
+ /*
+ * we store the writes done under a transaction:
+ */
+ struct ctdb_marshall_buffer *m_write;
+ uint32_t nesting;
+ bool nested_cancel;
+ char *lock_name;
+};
+
+struct db_ctdb_ctx {
+ struct db_context *db;
+ struct tdb_wrap *wtdb;
+ uint32_t db_id;
+ struct db_ctdb_transaction_handle *transaction;
+ struct g_lock_ctx *lock_ctx;
+
+ /* thresholds for warning messages */
+ int warn_unlock_msecs;
+ int warn_migrate_msecs;
+ int warn_migrate_attempts;
+ int warn_locktime_msecs;
+};
+
+struct db_ctdb_rec {
+ struct db_ctdb_ctx *ctdb_ctx;
+ struct ctdb_ltdb_header header;
+ struct timeval lock_time;
+};
+
+struct ctdb_async_ctx {
+ bool initialized;
+ struct ctdbd_connection *async_conn;
+};
+
+static struct ctdb_async_ctx ctdb_async_ctx;
+
+static int ctdb_async_ctx_init_internal(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ bool reinit)
+{
+ int ret;
+
+ if (reinit) {
+ TALLOC_FREE(ctdb_async_ctx.async_conn);
+ ctdb_async_ctx.initialized = false;
+ }
+
+ if (ctdb_async_ctx.initialized) {
+ return 0;
+ }
+
+ become_root();
+ ret = ctdbd_init_async_connection(
+ mem_ctx,
+ lp_ctdbd_socket(),
+ lp_ctdb_timeout(),
+ &ctdb_async_ctx.async_conn);
+ unbecome_root();
+
+ if (ret != 0) {
+ DBG_ERR("ctdbd_init_async_connection(%s, timeout=%d) "
+ "failed: ret=%d %s\n",
+ lp_ctdbd_socket(),
+ lp_ctdb_timeout(),
+ ret, strerror(ret));
+ return ret;
+ }
+
+ SMB_ASSERT(ctdb_async_ctx.async_conn != NULL);
+
+ ctdb_async_ctx.initialized = true;
+ return 0;
+}
+
+static int ctdb_async_ctx_init(TALLOC_CTX *mem_ctx, struct tevent_context *ev)
+{
+ return ctdb_async_ctx_init_internal(mem_ctx, ev, false);
+}
+
+int ctdb_async_ctx_reinit(TALLOC_CTX *mem_ctx, struct tevent_context *ev)
+{
+ return ctdb_async_ctx_init_internal(mem_ctx, ev, true);
+}
+
+static NTSTATUS tdb_error_to_ntstatus(struct tdb_context *tdb)
+{
+ enum TDB_ERROR tret = tdb_error(tdb);
+
+ return map_nt_error_from_tdb(tret);
+}
+
+struct db_ctdb_ltdb_parse_state {
+ void (*parser)(TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data);
+ void *private_data;
+};
+
+static int db_ctdb_ltdb_parser(TDB_DATA key, TDB_DATA data,
+ void *private_data)
+{
+ struct db_ctdb_ltdb_parse_state *state =
+ (struct db_ctdb_ltdb_parse_state *)private_data;
+
+ if (data.dsize < sizeof(struct ctdb_ltdb_header)) {
+ return -1;
+ }
+
+ state->parser(
+ key, (struct ctdb_ltdb_header *)data.dptr,
+ make_tdb_data(data.dptr + sizeof(struct ctdb_ltdb_header),
+ data.dsize - sizeof(struct ctdb_ltdb_header)),
+ state->private_data);
+ return 0;
+}
+
+static NTSTATUS db_ctdb_ltdb_parse(
+ struct db_ctdb_ctx *db, TDB_DATA key,
+ void (*parser)(TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data),
+ void *private_data)
+{
+ struct db_ctdb_ltdb_parse_state state;
+ int ret;
+
+ state.parser = parser;
+ state.private_data = private_data;
+
+ ret = tdb_parse_record(db->wtdb->tdb, key, db_ctdb_ltdb_parser,
+ &state);
+ if (ret == -1) {
+ return NT_STATUS_NOT_FOUND;
+ }
+ return NT_STATUS_OK;
+}
+
+/*
+ * Store a record together with the ctdb record header
+ * in the local copy of the database.
+ */
+static NTSTATUS db_ctdb_ltdb_store(struct db_ctdb_ctx *db,
+ TDB_DATA key,
+ struct ctdb_ltdb_header *header,
+ const TDB_DATA *dbufs, int num_dbufs)
+{
+ TDB_DATA recs[num_dbufs+1];
+ int ret;
+
+ recs[0] = (TDB_DATA) { .dptr = (uint8_t *)header,
+ .dsize = sizeof(struct ctdb_ltdb_header) };
+ memcpy(&recs[1], dbufs, sizeof(TDB_DATA) * num_dbufs);
+
+ ret = tdb_storev(db->wtdb->tdb, key, recs, num_dbufs + 1, TDB_REPLACE);
+
+ return (ret == 0) ? NT_STATUS_OK
+ : tdb_error_to_ntstatus(db->wtdb->tdb);
+
+}
+
+/*
+ form a ctdb_rec_data record from a key/data pair
+ */
+static struct ctdb_rec_data_old *db_ctdb_marshall_record(TALLOC_CTX *mem_ctx, uint32_t reqid,
+ TDB_DATA key,
+ struct ctdb_ltdb_header *header,
+ TDB_DATA data)
+{
+ size_t length;
+ struct ctdb_rec_data_old *d;
+
+ length = offsetof(struct ctdb_rec_data_old, data) + key.dsize +
+ data.dsize + sizeof(*header);
+ d = (struct ctdb_rec_data_old *)talloc_size(mem_ctx, length);
+ if (d == NULL) {
+ return NULL;
+ }
+ d->length = length;
+ d->reqid = reqid;
+ d->keylen = key.dsize;
+ memcpy(&d->data[0], key.dptr, key.dsize);
+
+ d->datalen = data.dsize + sizeof(*header);
+ memcpy(&d->data[key.dsize], header, sizeof(*header));
+ memcpy(&d->data[key.dsize+sizeof(*header)], data.dptr, data.dsize);
+ return d;
+}
+
+
+/* helper function for marshalling multiple records */
+static struct ctdb_marshall_buffer *db_ctdb_marshall_add(TALLOC_CTX *mem_ctx,
+ struct ctdb_marshall_buffer *m,
+ uint32_t db_id,
+ uint32_t reqid,
+ TDB_DATA key,
+ struct ctdb_ltdb_header *header,
+ TDB_DATA data)
+{
+ struct ctdb_rec_data_old *r;
+ size_t m_size, r_size;
+ struct ctdb_marshall_buffer *m2 = NULL;
+
+ r = db_ctdb_marshall_record(talloc_tos(), reqid, key, header, data);
+ if (r == NULL) {
+ talloc_free(m);
+ return NULL;
+ }
+
+ if (m == NULL) {
+ m = (struct ctdb_marshall_buffer *)talloc_zero_size(
+ mem_ctx, offsetof(struct ctdb_marshall_buffer, data));
+ if (m == NULL) {
+ goto done;
+ }
+ m->db_id = db_id;
+ }
+
+ m_size = talloc_get_size(m);
+ r_size = talloc_get_size(r);
+
+ m2 = (struct ctdb_marshall_buffer *)talloc_realloc_size(
+ mem_ctx, m, m_size + r_size);
+ if (m2 == NULL) {
+ talloc_free(m);
+ goto done;
+ }
+
+ memcpy(m_size + (uint8_t *)m2, r, r_size);
+
+ m2->count++;
+
+done:
+ talloc_free(r);
+ return m2;
+}
+
+/* we've finished marshalling, return a data blob with the marshalled records */
+static TDB_DATA db_ctdb_marshall_finish(struct ctdb_marshall_buffer *m)
+{
+ TDB_DATA data;
+ data.dptr = (uint8_t *)m;
+ data.dsize = talloc_get_size(m);
+ return data;
+}
+
+/*
+ loop over a marshalling buffer
+
+ - pass r==NULL to start
+ - loop the number of times indicated by m->count
+*/
+static struct ctdb_rec_data_old *db_ctdb_marshall_loop_next_key(
+ struct ctdb_marshall_buffer *m, struct ctdb_rec_data_old *r, TDB_DATA *key)
+{
+ if (r == NULL) {
+ r = (struct ctdb_rec_data_old *)&m->data[0];
+ } else {
+ r = (struct ctdb_rec_data_old *)(r->length + (uint8_t *)r);
+ }
+
+ key->dptr = &r->data[0];
+ key->dsize = r->keylen;
+ return r;
+}
+
+static bool db_ctdb_marshall_buf_parse(
+ struct ctdb_rec_data_old *r, uint32_t *reqid,
+ struct ctdb_ltdb_header **header, TDB_DATA *data)
+{
+ if (r->datalen < sizeof(struct ctdb_ltdb_header)) {
+ return false;
+ }
+
+ *reqid = r->reqid;
+
+ data->dptr = &r->data[r->keylen] + sizeof(struct ctdb_ltdb_header);
+ data->dsize = r->datalen - sizeof(struct ctdb_ltdb_header);
+
+ *header = (struct ctdb_ltdb_header *)&r->data[r->keylen];
+
+ return true;
+}
+
+/**
+ * CTDB transaction destructor
+ */
+static int db_ctdb_transaction_destructor(struct db_ctdb_transaction_handle *h)
+{
+ NTSTATUS status;
+
+ status = g_lock_unlock(h->ctx->lock_ctx,
+ string_term_tdb_data(h->lock_name));
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0, ("g_lock_unlock failed for %s: %s\n", h->lock_name,
+ nt_errstr(status)));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * CTDB dbwrap API: transaction_start function
+ * starts a transaction on a persistent database
+ */
+static int db_ctdb_transaction_start(struct db_context *db)
+{
+ struct db_ctdb_transaction_handle *h;
+ NTSTATUS status;
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+
+ if (!db->persistent) {
+ DEBUG(0,("transactions not supported on non-persistent database 0x%08x\n",
+ ctx->db_id));
+ return -1;
+ }
+
+ if (ctx->transaction) {
+ ctx->transaction->nesting++;
+ DEBUG(5, (__location__ " transaction start on db 0x%08x: nesting %d -> %d\n",
+ ctx->db_id, ctx->transaction->nesting - 1, ctx->transaction->nesting));
+ return 0;
+ }
+
+ h = talloc_zero(db, struct db_ctdb_transaction_handle);
+ if (h == NULL) {
+ DEBUG(0,(__location__ " oom for transaction handle\n"));
+ return -1;
+ }
+
+ h->ctx = ctx;
+
+ h->lock_name = talloc_asprintf(h, "transaction_db_0x%08x",
+ (unsigned int)ctx->db_id);
+ if (h->lock_name == NULL) {
+ DEBUG(0, ("talloc_asprintf failed\n"));
+ TALLOC_FREE(h);
+ return -1;
+ }
+
+ /*
+ * Wait a day, i.e. forever...
+ */
+ status = g_lock_lock(ctx->lock_ctx, string_term_tdb_data(h->lock_name),
+ G_LOCK_WRITE, timeval_set(86400, 0), NULL, NULL);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0, ("g_lock_lock failed: %s\n", nt_errstr(status)));
+ TALLOC_FREE(h);
+ return -1;
+ }
+
+ talloc_set_destructor(h, db_ctdb_transaction_destructor);
+
+ ctx->transaction = h;
+
+ DEBUG(5,(__location__ " transaction started on db 0x%08x\n", ctx->db_id));
+
+ return 0;
+}
+
+static bool parse_newest_in_marshall_buffer(
+ struct ctdb_marshall_buffer *buf, TDB_DATA key,
+ void (*parser)(TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data),
+ void *private_data)
+{
+ struct ctdb_rec_data_old *rec = NULL;
+ struct ctdb_ltdb_header *h = NULL;
+ TDB_DATA data;
+ uint32_t i;
+
+ if (buf == NULL) {
+ return false;
+ }
+
+ /*
+ * Walk the list of records written during this
+ * transaction. If we want to read one we have already
+ * written, return the last written sample. Thus we do not do
+ * a "break;" for the first hit, this record might have been
+ * overwritten later.
+ */
+
+ for (i=0; i<buf->count; i++) {
+ TDB_DATA tkey;
+ uint32_t reqid;
+
+ rec = db_ctdb_marshall_loop_next_key(buf, rec, &tkey);
+ if (rec == NULL) {
+ return false;
+ }
+
+ if (!tdb_data_equal(key, tkey)) {
+ continue;
+ }
+
+ if (!db_ctdb_marshall_buf_parse(rec, &reqid, &h, &data)) {
+ return false;
+ }
+ }
+
+ if (h == NULL) {
+ return false;
+ }
+
+ parser(key, h, data, private_data);
+
+ return true;
+}
+
+struct pull_newest_from_marshall_buffer_state {
+ struct ctdb_ltdb_header *pheader;
+ TALLOC_CTX *mem_ctx;
+ TDB_DATA *pdata;
+};
+
+static void pull_newest_from_marshall_buffer_parser(
+ TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data)
+{
+ struct pull_newest_from_marshall_buffer_state *state =
+ (struct pull_newest_from_marshall_buffer_state *)private_data;
+
+ if (state->pheader != NULL) {
+ memcpy(state->pheader, header, sizeof(*state->pheader));
+ }
+ if (state->pdata != NULL) {
+ state->pdata->dsize = data.dsize;
+ state->pdata->dptr = (uint8_t *)talloc_memdup(
+ state->mem_ctx, data.dptr, data.dsize);
+ }
+}
+
+static bool pull_newest_from_marshall_buffer(struct ctdb_marshall_buffer *buf,
+ TDB_DATA key,
+ struct ctdb_ltdb_header *pheader,
+ TALLOC_CTX *mem_ctx,
+ TDB_DATA *pdata)
+{
+ struct pull_newest_from_marshall_buffer_state state;
+
+ state.pheader = pheader;
+ state.mem_ctx = mem_ctx;
+ state.pdata = pdata;
+
+ if (!parse_newest_in_marshall_buffer(
+ buf, key, pull_newest_from_marshall_buffer_parser,
+ &state)) {
+ return false;
+ }
+ if ((pdata != NULL) && (pdata->dsize != 0) && (pdata->dptr == NULL)) {
+ /* ENOMEM */
+ return false;
+ }
+ return true;
+}
+
+static NTSTATUS db_ctdb_storev_transaction(struct db_record *rec,
+ const TDB_DATA *dbufs, int num_dbufs,
+ int flag);
+static NTSTATUS db_ctdb_delete_transaction(struct db_record *rec);
+
+static struct db_record *db_ctdb_fetch_locked_transaction(struct db_ctdb_ctx *ctx,
+ TALLOC_CTX *mem_ctx,
+ TDB_DATA key)
+{
+ struct db_record *result;
+ TDB_DATA ctdb_data;
+
+ if (!(result = talloc(mem_ctx, struct db_record))) {
+ DEBUG(0, ("talloc failed\n"));
+ return NULL;
+ }
+
+ result->db = ctx->db;
+ result->private_data = ctx->transaction;
+
+ result->key.dsize = key.dsize;
+ result->key.dptr = (uint8_t *)talloc_memdup(result, key.dptr,
+ key.dsize);
+ if (result->key.dptr == NULL) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ result->storev = db_ctdb_storev_transaction;
+ result->delete_rec = db_ctdb_delete_transaction;
+
+ if (ctx->transaction == NULL) {
+ DEBUG(0, ("no transaction available\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ if (pull_newest_from_marshall_buffer(ctx->transaction->m_write, key,
+ NULL, result, &result->value)) {
+ result->value_valid = true;
+ return result;
+ }
+
+ ctdb_data = tdb_fetch(ctx->wtdb->tdb, key);
+ if (ctdb_data.dptr == NULL) {
+ /* create the record */
+ result->value = tdb_null;
+ result->value_valid = true;
+ return result;
+ }
+
+ result->value.dsize = ctdb_data.dsize - sizeof(struct ctdb_ltdb_header);
+ result->value.dptr = NULL;
+
+ if ((result->value.dsize != 0)
+ && !(result->value.dptr = (uint8_t *)talloc_memdup(
+ result, ctdb_data.dptr + sizeof(struct ctdb_ltdb_header),
+ result->value.dsize))) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ result->value_valid = true;
+
+ SAFE_FREE(ctdb_data.dptr);
+
+ return result;
+}
+
+static int db_ctdb_record_destructor(struct db_record **recp)
+{
+ struct db_record *rec = talloc_get_type_abort(*recp, struct db_record);
+ struct db_ctdb_transaction_handle *h = talloc_get_type_abort(
+ rec->private_data, struct db_ctdb_transaction_handle);
+ int ret = h->ctx->db->transaction_commit(h->ctx->db);
+ if (ret != 0) {
+ DEBUG(0,(__location__ " transaction_commit failed\n"));
+ }
+ return 0;
+}
+
+/*
+ auto-create a transaction for persistent databases
+ */
+static struct db_record *db_ctdb_fetch_locked_persistent(struct db_ctdb_ctx *ctx,
+ TALLOC_CTX *mem_ctx,
+ TDB_DATA key)
+{
+ int res;
+ struct db_record *rec, **recp;
+
+ res = db_ctdb_transaction_start(ctx->db);
+ if (res == -1) {
+ return NULL;
+ }
+
+ rec = db_ctdb_fetch_locked_transaction(ctx, mem_ctx, key);
+ if (rec == NULL) {
+ ctx->db->transaction_cancel(ctx->db);
+ return NULL;
+ }
+
+ /* destroy this transaction when we release the lock */
+ recp = talloc(rec, struct db_record *);
+ if (recp == NULL) {
+ ctx->db->transaction_cancel(ctx->db);
+ talloc_free(rec);
+ return NULL;
+ }
+ *recp = rec;
+ talloc_set_destructor(recp, db_ctdb_record_destructor);
+ return rec;
+}
+
+
+/*
+ stores a record inside a transaction
+ */
+static NTSTATUS db_ctdb_transaction_store(struct db_ctdb_transaction_handle *h,
+ TDB_DATA key, TDB_DATA data)
+{
+ TALLOC_CTX *tmp_ctx = talloc_new(h);
+ TDB_DATA rec;
+ struct ctdb_ltdb_header header;
+
+ ZERO_STRUCT(header);
+
+ /* we need the header so we can update the RSN */
+
+ if (!pull_newest_from_marshall_buffer(h->m_write, key, &header,
+ NULL, NULL)) {
+
+ rec = tdb_fetch(h->ctx->wtdb->tdb, key);
+
+ if (rec.dptr != NULL) {
+ memcpy(&header, rec.dptr,
+ sizeof(struct ctdb_ltdb_header));
+ rec.dsize -= sizeof(struct ctdb_ltdb_header);
+
+ /*
+ * a special case, we are writing the same
+ * data that is there now
+ */
+ if (data.dsize == rec.dsize &&
+ memcmp(data.dptr,
+ rec.dptr + sizeof(struct ctdb_ltdb_header),
+ data.dsize) == 0) {
+ SAFE_FREE(rec.dptr);
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+ }
+ }
+ SAFE_FREE(rec.dptr);
+ }
+
+ header.dmaster = get_my_vnn();
+ header.rsn++;
+
+ h->m_write = db_ctdb_marshall_add(h, h->m_write, h->ctx->db_id, 0, key, &header, data);
+ if (h->m_write == NULL) {
+ DEBUG(0,(__location__ " Failed to add to marshalling record\n"));
+ talloc_free(tmp_ctx);
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ talloc_free(tmp_ctx);
+ return NT_STATUS_OK;
+}
+
+
+/*
+ a record store inside a transaction
+ */
+static NTSTATUS db_ctdb_storev_transaction(
+ struct db_record *rec, const TDB_DATA *dbufs, int num_dbufs, int flag)
+{
+ struct db_ctdb_transaction_handle *h = talloc_get_type_abort(
+ rec->private_data, struct db_ctdb_transaction_handle);
+ NTSTATUS status;
+ TDB_DATA data = {0};
+
+ status = dbwrap_merge_dbufs(&data, rec, dbufs, num_dbufs);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ status = db_ctdb_transaction_store(h, rec->key, data);
+
+ TALLOC_FREE(data.dptr);
+
+ return status;
+}
+
+/*
+ a record delete inside a transaction
+ */
+static NTSTATUS db_ctdb_delete_transaction(struct db_record *rec)
+{
+ struct db_ctdb_transaction_handle *h = talloc_get_type_abort(
+ rec->private_data, struct db_ctdb_transaction_handle);
+ NTSTATUS status;
+
+ status = db_ctdb_transaction_store(h, rec->key, tdb_null);
+ return status;
+}
+
+static void db_ctdb_fetch_db_seqnum_parser(
+ TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data)
+{
+ uint64_t *seqnum = (uint64_t *)private_data;
+
+ if (data.dsize != sizeof(uint64_t)) {
+ *seqnum = 0;
+ return;
+ }
+ memcpy(seqnum, data.dptr, sizeof(*seqnum));
+}
+
+/**
+ * Fetch the db sequence number of a persistent db directly from the db.
+ */
+static NTSTATUS db_ctdb_fetch_db_seqnum_from_db(struct db_ctdb_ctx *db,
+ uint64_t *seqnum)
+{
+ NTSTATUS status;
+ TDB_DATA key;
+
+ if (seqnum == NULL) {
+ return NT_STATUS_INVALID_PARAMETER;
+ }
+
+ key = string_term_tdb_data(CTDB_DB_SEQNUM_KEY);
+
+ status = db_ctdb_ltdb_parse(
+ db, key, db_ctdb_fetch_db_seqnum_parser, seqnum);
+
+ if (NT_STATUS_IS_OK(status)) {
+ return NT_STATUS_OK;
+ }
+ if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
+ *seqnum = 0;
+ return NT_STATUS_OK;
+ }
+ return status;
+}
+
+/**
+ * Store the database sequence number inside a transaction.
+ */
+static NTSTATUS db_ctdb_store_db_seqnum(struct db_ctdb_transaction_handle *h,
+ uint64_t seqnum)
+{
+ NTSTATUS status;
+ TDB_DATA key = string_term_tdb_data(CTDB_DB_SEQNUM_KEY);
+ TDB_DATA data = { .dptr=(uint8_t *)&seqnum, .dsize=sizeof(seqnum) };
+
+ status = db_ctdb_transaction_store(h, key, data);
+
+ return status;
+}
+
+/*
+ commit a transaction
+ */
+static int db_ctdb_transaction_commit(struct db_context *db)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+ NTSTATUS rets;
+ int32_t status;
+ struct db_ctdb_transaction_handle *h = ctx->transaction;
+ uint64_t old_seqnum, new_seqnum;
+ int ret;
+
+ if (h == NULL) {
+ DEBUG(0,(__location__ " transaction commit with no open transaction on db 0x%08x\n", ctx->db_id));
+ return -1;
+ }
+
+ if (h->nested_cancel) {
+ db->transaction_cancel(db);
+ DEBUG(5,(__location__ " Failed transaction commit after nested cancel\n"));
+ return -1;
+ }
+
+ if (h->nesting != 0) {
+ h->nesting--;
+ DEBUG(5, (__location__ " transaction commit on db 0x%08x: nesting %d -> %d\n",
+ ctx->db_id, ctx->transaction->nesting + 1, ctx->transaction->nesting));
+ return 0;
+ }
+
+ if (h->m_write == NULL) {
+ /*
+ * No changes were made, so don't change the seqnum,
+ * don't push to other node, just exit with success.
+ */
+ ret = 0;
+ goto done;
+ }
+
+ DEBUG(5,(__location__ " transaction commit on db 0x%08x\n", ctx->db_id));
+
+ /*
+ * As the last db action before committing, bump the database sequence
+ * number. Note that this undoes all changes to the seqnum records
+ * performed under the transaction. This record is not meant to be
+ * modified by user interaction. It is for internal use only...
+ */
+ rets = db_ctdb_fetch_db_seqnum_from_db(ctx, &old_seqnum);
+ if (!NT_STATUS_IS_OK(rets)) {
+ DEBUG(1, (__location__ " failed to fetch the db sequence number "
+ "in transaction commit on db 0x%08x\n", ctx->db_id));
+ ret = -1;
+ goto done;
+ }
+
+ new_seqnum = old_seqnum + 1;
+
+ rets = db_ctdb_store_db_seqnum(h, new_seqnum);
+ if (!NT_STATUS_IS_OK(rets)) {
+ DEBUG(1, (__location__ "failed to store the db sequence number "
+ " in transaction commit on db 0x%08x\n", ctx->db_id));
+ ret = -1;
+ goto done;
+ }
+
+again:
+ /* tell ctdbd to commit to the other nodes */
+ ret = ctdbd_control_local(messaging_ctdb_connection(),
+ CTDB_CONTROL_TRANS3_COMMIT,
+ h->ctx->db_id, 0,
+ db_ctdb_marshall_finish(h->m_write),
+ NULL, NULL, &status);
+ if ((ret != 0) || status != 0) {
+ /*
+ * The TRANS3_COMMIT control should only possibly fail when a
+ * recovery has been running concurrently. In any case, the db
+ * will be the same on all nodes, either the new copy or the
+ * old copy. This can be detected by comparing the old and new
+ * local sequence numbers.
+ */
+ rets = db_ctdb_fetch_db_seqnum_from_db(ctx, &new_seqnum);
+ if (!NT_STATUS_IS_OK(rets)) {
+ DEBUG(1, (__location__ " failed to refetch db sequence "
+ "number after failed TRANS3_COMMIT\n"));
+ ret = -1;
+ goto done;
+ }
+
+ if (new_seqnum == old_seqnum) {
+ /* Recovery prevented all our changes: retry. */
+ goto again;
+ }
+ if (new_seqnum != (old_seqnum + 1)) {
+ DEBUG(0, (__location__ " ERROR: new_seqnum[%lu] != "
+ "old_seqnum[%lu] + (0 or 1) after failed "
+ "TRANS3_COMMIT - this should not happen!\n",
+ (unsigned long)new_seqnum,
+ (unsigned long)old_seqnum));
+ ret = -1;
+ goto done;
+ }
+ /*
+ * Recovery propagated our changes to all nodes, completing
+ * our commit for us - succeed.
+ */
+ }
+
+ ret = 0;
+
+done:
+ h->ctx->transaction = NULL;
+ talloc_free(h);
+ return ret;
+}
+
+
+/*
+ cancel a transaction
+ */
+static int db_ctdb_transaction_cancel(struct db_context *db)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+ struct db_ctdb_transaction_handle *h = ctx->transaction;
+
+ if (h == NULL) {
+ DEBUG(0,(__location__ " transaction cancel with no open transaction on db 0x%08x\n", ctx->db_id));
+ return -1;
+ }
+
+ if (h->nesting != 0) {
+ h->nesting--;
+ h->nested_cancel = true;
+ DEBUG(5, (__location__ " transaction cancel on db 0x%08x: nesting %d -> %d\n",
+ ctx->db_id, ctx->transaction->nesting + 1, ctx->transaction->nesting));
+ return 0;
+ }
+
+ DEBUG(5,(__location__ " Cancel transaction on db 0x%08x\n", ctx->db_id));
+
+ ctx->transaction = NULL;
+ talloc_free(h);
+ return 0;
+}
+
+
+static NTSTATUS db_ctdb_storev(struct db_record *rec,
+ const TDB_DATA *dbufs, int num_dbufs, int flag)
+{
+ struct db_ctdb_rec *crec = talloc_get_type_abort(
+ rec->private_data, struct db_ctdb_rec);
+ NTSTATUS status;
+
+ status = db_ctdb_ltdb_store(crec->ctdb_ctx, rec->key, &(crec->header),
+ dbufs, num_dbufs);
+ return status;
+}
+
+
+
+static NTSTATUS db_ctdb_send_schedule_for_deletion(struct db_record *rec)
+{
+ NTSTATUS status = NT_STATUS_OK;
+ int ret;
+ struct ctdb_control_schedule_for_deletion *dd;
+ TDB_DATA indata;
+ int32_t cstatus;
+ struct db_ctdb_rec *crec = talloc_get_type_abort(
+ rec->private_data, struct db_ctdb_rec);
+ struct db_ctdb_ctx *ctx = crec->ctdb_ctx;
+
+ indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + rec->key.dsize;
+ indata.dptr = talloc_zero_array(crec, uint8_t, indata.dsize);
+ if (indata.dptr == NULL) {
+ DEBUG(0, (__location__ " talloc failed!\n"));
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
+ dd->db_id = ctx->db_id;
+ dd->hdr = crec->header;
+ dd->keylen = rec->key.dsize;
+ memcpy(dd->key, rec->key.dptr, rec->key.dsize);
+
+ ret = ctdbd_control_local(messaging_ctdb_connection(),
+ CTDB_CONTROL_SCHEDULE_FOR_DELETION,
+ crec->ctdb_ctx->db_id,
+ CTDB_CTRL_FLAG_NOREPLY, /* flags */
+ indata,
+ NULL, /* mem_ctx */
+ NULL, /* outdata */
+ &cstatus);
+ talloc_free(indata.dptr);
+
+ if ((ret != 0) || cstatus != 0) {
+ DEBUG(1, (__location__ " Error sending local control "
+ "SCHEDULE_FOR_DELETION: %s, cstatus = %"PRIi32"\n",
+ strerror(ret), cstatus));
+ if (ret != 0) {
+ status = map_nt_error_from_unix(ret);
+ } else {
+ status = NT_STATUS_UNSUCCESSFUL;
+ }
+ }
+
+ return status;
+}
+
+static NTSTATUS db_ctdb_delete(struct db_record *rec)
+{
+ NTSTATUS status;
+
+ /*
+ * We have to store the header with empty data. TODO: Fix the
+ * tdb-level cleanup
+ */
+
+ status = db_ctdb_storev(rec, &tdb_null, 1, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ status = db_ctdb_send_schedule_for_deletion(rec);
+ return status;
+}
+
+static int db_ctdb_record_destr(struct db_record* data)
+{
+ struct db_ctdb_rec *crec = talloc_get_type_abort(
+ data->private_data, struct db_ctdb_rec);
+ int threshold;
+ int ret;
+ struct timeval before;
+ double timediff;
+
+ DEBUG(10, (DEBUGLEVEL > 10
+ ? "Unlocking db %u key %s\n"
+ : "Unlocking db %u key %.20s\n",
+ (int)crec->ctdb_ctx->db_id,
+ hex_encode_talloc(data, (unsigned char *)data->key.dptr,
+ data->key.dsize)));
+
+ before = timeval_current();
+
+ ret = tdb_chainunlock(crec->ctdb_ctx->wtdb->tdb, data->key);
+
+ timediff = timeval_elapsed(&before);
+ timediff *= 1000; /* get us milliseconds */
+
+ if (timediff > crec->ctdb_ctx->warn_unlock_msecs) {
+ char *key;
+ key = hex_encode_talloc(talloc_tos(),
+ (unsigned char *)data->key.dptr,
+ data->key.dsize);
+ DEBUG(0, ("tdb_chainunlock on db %s, key %s took %f milliseconds\n",
+ tdb_name(crec->ctdb_ctx->wtdb->tdb), key,
+ timediff));
+ TALLOC_FREE(key);
+ }
+
+ if (ret != 0) {
+ DEBUG(0, ("tdb_chainunlock failed\n"));
+ return -1;
+ }
+
+ threshold = crec->ctdb_ctx->warn_locktime_msecs;
+ if (threshold != 0) {
+ timediff = timeval_elapsed(&crec->lock_time) * 1000;
+ if (timediff > threshold) {
+ const char *key;
+
+ key = hex_encode_talloc(data,
+ (unsigned char *)data->key.dptr,
+ data->key.dsize);
+ DEBUG(0, ("Held tdb lock on db %s, key %s "
+ "%f milliseconds\n",
+ tdb_name(crec->ctdb_ctx->wtdb->tdb),
+ key, timediff));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Check whether we have a valid local copy of the given record,
+ * either for reading or for writing.
+ */
+static bool db_ctdb_can_use_local_hdr(const struct ctdb_ltdb_header *hdr,
+ uint32_t my_vnn, bool read_only)
+{
+ if (hdr->dmaster != my_vnn) {
+ /* If we're not dmaster, it must be r/o copy. */
+ return read_only && (hdr->flags & CTDB_REC_RO_HAVE_READONLY);
+ }
+
+ /*
+ * If we want write access, no one may have r/o copies.
+ */
+ return read_only || !(hdr->flags & CTDB_REC_RO_HAVE_DELEGATIONS);
+}
+
+static bool db_ctdb_can_use_local_copy(TDB_DATA ctdb_data, uint32_t my_vnn,
+ bool read_only)
+{
+ if (ctdb_data.dptr == NULL) {
+ return false;
+ }
+
+ if (ctdb_data.dsize < sizeof(struct ctdb_ltdb_header)) {
+ return false;
+ }
+
+ return db_ctdb_can_use_local_hdr(
+ (struct ctdb_ltdb_header *)ctdb_data.dptr, my_vnn, read_only);
+}
+
+static struct db_record *fetch_locked_internal(struct db_ctdb_ctx *ctx,
+ TALLOC_CTX *mem_ctx,
+ TDB_DATA key)
+{
+ struct db_record *result;
+ struct db_ctdb_rec *crec;
+ TDB_DATA ctdb_data;
+ int migrate_attempts;
+ struct timeval migrate_start;
+ struct timeval chainlock_start;
+ struct timeval ctdb_start_time;
+ double chainlock_time = 0;
+ double ctdb_time = 0;
+ int duration_msecs;
+ int lockret;
+ int ret;
+
+ if (!(result = talloc(mem_ctx, struct db_record))) {
+ DEBUG(0, ("talloc failed\n"));
+ return NULL;
+ }
+
+ if (!(crec = talloc_zero(result, struct db_ctdb_rec))) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ result->db = ctx->db;
+ result->private_data = (void *)crec;
+ crec->ctdb_ctx = ctx;
+
+ result->key.dsize = key.dsize;
+ result->key.dptr = (uint8_t *)talloc_memdup(result, key.dptr,
+ key.dsize);
+ if (result->key.dptr == NULL) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ migrate_attempts = 0;
+ GetTimeOfDay(&migrate_start);
+
+ /*
+ * Do a blocking lock on the record
+ */
+again:
+
+ if (DEBUGLEVEL >= 10) {
+ char *keystr = hex_encode_talloc(result, key.dptr, key.dsize);
+ DEBUG(10, (DEBUGLEVEL > 10
+ ? "Locking db %u key %s\n"
+ : "Locking db %u key %.20s\n",
+ (int)crec->ctdb_ctx->db_id, keystr));
+ TALLOC_FREE(keystr);
+ }
+
+ GetTimeOfDay(&chainlock_start);
+ lockret = tdb_chainlock(ctx->wtdb->tdb, key);
+ chainlock_time += timeval_elapsed(&chainlock_start);
+
+ if (lockret != 0) {
+ DEBUG(3, ("tdb_chainlock failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ result->storev = db_ctdb_storev;
+ result->delete_rec = db_ctdb_delete;
+ talloc_set_destructor(result, db_ctdb_record_destr);
+
+ ctdb_data = tdb_fetch(ctx->wtdb->tdb, key);
+
+ /*
+ * See if we have a valid record and we are the dmaster. If so, we can
+ * take the shortcut and just return it.
+ */
+
+ if (!db_ctdb_can_use_local_copy(ctdb_data, get_my_vnn(), false)) {
+ SAFE_FREE(ctdb_data.dptr);
+ tdb_chainunlock(ctx->wtdb->tdb, key);
+ talloc_set_destructor(result, NULL);
+
+ migrate_attempts += 1;
+
+ DEBUG(10, ("ctdb_data.dptr = %p, dmaster = %"PRIu32" "
+ "(%"PRIu32") %"PRIu32"\n",
+ ctdb_data.dptr, ctdb_data.dptr ?
+ ((struct ctdb_ltdb_header *)ctdb_data.dptr)->dmaster :
+ UINT32_MAX,
+ get_my_vnn(),
+ ctdb_data.dptr ?
+ ((struct ctdb_ltdb_header *)ctdb_data.dptr)->flags : 0));
+
+ GetTimeOfDay(&ctdb_start_time);
+ ret = ctdbd_migrate(messaging_ctdb_connection(), ctx->db_id,
+ key);
+ ctdb_time += timeval_elapsed(&ctdb_start_time);
+
+ if (ret != 0) {
+ DEBUG(5, ("ctdbd_migrate failed: %s\n",
+ strerror(ret)));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ /* now its migrated, try again */
+ goto again;
+ }
+
+ {
+ double duration;
+ duration = timeval_elapsed(&migrate_start);
+
+ /*
+ * Convert the duration to milliseconds to avoid a
+ * floating-point division of
+ * lp_parm_int("migrate_duration") by 1000.
+ */
+ duration_msecs = duration * 1000;
+ }
+
+ if ((migrate_attempts > ctx->warn_migrate_attempts) ||
+ (duration_msecs > ctx->warn_migrate_msecs)) {
+ int chain = 0;
+
+ if (tdb_get_flags(ctx->wtdb->tdb) & TDB_INCOMPATIBLE_HASH) {
+ chain = tdb_jenkins_hash(&key) %
+ tdb_hash_size(ctx->wtdb->tdb);
+ }
+
+ DEBUG(0, ("db_ctdb_fetch_locked for %s key %s, chain %d "
+ "needed %d attempts, %d milliseconds, "
+ "chainlock: %f ms, CTDB %f ms\n",
+ tdb_name(ctx->wtdb->tdb),
+ hex_encode_talloc(talloc_tos(),
+ (unsigned char *)key.dptr,
+ key.dsize),
+ chain,
+ migrate_attempts, duration_msecs,
+ chainlock_time * 1000.0,
+ ctdb_time * 1000.0));
+ }
+
+ GetTimeOfDay(&crec->lock_time);
+
+ memcpy(&crec->header, ctdb_data.dptr, sizeof(crec->header));
+
+ result->value.dsize = ctdb_data.dsize - sizeof(crec->header);
+ result->value.dptr = NULL;
+
+ if (result->value.dsize != 0) {
+ result->value.dptr = talloc_memdup(
+ result, ctdb_data.dptr + sizeof(crec->header),
+ result->value.dsize);
+ if (result->value.dptr == NULL) {
+ DBG_ERR("talloc failed\n");
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ }
+ result->value_valid = true;
+
+ SAFE_FREE(ctdb_data.dptr);
+
+ return result;
+}
+
+static struct db_record *db_ctdb_fetch_locked(struct db_context *db,
+ TALLOC_CTX *mem_ctx,
+ TDB_DATA key)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+
+ if (ctx->transaction != NULL) {
+ return db_ctdb_fetch_locked_transaction(ctx, mem_ctx, key);
+ }
+
+ if (db->persistent) {
+ return db_ctdb_fetch_locked_persistent(ctx, mem_ctx, key);
+ }
+
+ return fetch_locked_internal(ctx, mem_ctx, key);
+}
+
+struct db_ctdb_parse_record_state {
+ void (*parser)(TDB_DATA key, TDB_DATA data, void *private_data);
+ void *private_data;
+ uint32_t my_vnn;
+ bool ask_for_readonly_copy;
+ bool done;
+ bool empty_record;
+};
+
+static void db_ctdb_parse_record_parser(
+ TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data)
+{
+ struct db_ctdb_parse_record_state *state =
+ (struct db_ctdb_parse_record_state *)private_data;
+ state->parser(key, data, state->private_data);
+}
+
+static void db_ctdb_parse_record_parser_nonpersistent(
+ TDB_DATA key, struct ctdb_ltdb_header *header,
+ TDB_DATA data, void *private_data)
+{
+ struct db_ctdb_parse_record_state *state =
+ (struct db_ctdb_parse_record_state *)private_data;
+
+ if (db_ctdb_can_use_local_hdr(header, state->my_vnn, true)) {
+ /*
+ * A record consisting only of the ctdb header can be
+ * a validly created empty record or a tombstone
+ * record of a deleted record (not vacuumed yet). Mark
+ * it accordingly.
+ */
+ state->empty_record = (data.dsize == 0);
+ if (!state->empty_record) {
+ state->parser(key, data, state->private_data);
+ }
+ state->done = true;
+ } else {
+ /*
+ * We found something in the db, so it seems that this record,
+ * while not usable locally right now, is popular. Ask for a
+ * R/O copy.
+ */
+ state->ask_for_readonly_copy = true;
+ }
+}
+
+static NTSTATUS db_ctdb_try_parse_local_record(struct db_ctdb_ctx *ctx,
+ TDB_DATA key,
+ struct db_ctdb_parse_record_state *state)
+{
+ NTSTATUS status;
+
+ if (ctx->transaction != NULL) {
+ struct db_ctdb_transaction_handle *h = ctx->transaction;
+ bool found;
+
+ /*
+ * Transactions only happen for persistent db's.
+ */
+
+ found = parse_newest_in_marshall_buffer(
+ h->m_write, key, db_ctdb_parse_record_parser, state);
+
+ if (found) {
+ return NT_STATUS_OK;
+ }
+ }
+
+ if (ctx->db->persistent) {
+ /*
+ * Persistent db, but not found in the transaction buffer
+ */
+ return db_ctdb_ltdb_parse(
+ ctx, key, db_ctdb_parse_record_parser, state);
+ }
+
+ state->done = false;
+ state->ask_for_readonly_copy = false;
+
+ status = db_ctdb_ltdb_parse(
+ ctx, key, db_ctdb_parse_record_parser_nonpersistent, state);
+ if (NT_STATUS_IS_OK(status) && state->done) {
+ if (state->empty_record) {
+ /*
+ * We know authoritatively, that this is an empty
+ * record. Since ctdb does not distinguish between empty
+ * and deleted records, this can be a record stored as
+ * empty or a not-yet-vacuumed tombstone record of a
+ * deleted record. Now Samba right now can live without
+ * empty records, so we can safely report this record
+ * as non-existing.
+ *
+ * See bugs 10008 and 12005.
+ */
+ return NT_STATUS_NOT_FOUND;
+ }
+ return NT_STATUS_OK;
+ }
+
+ return NT_STATUS_MORE_PROCESSING_REQUIRED;
+}
+
+static NTSTATUS db_ctdb_parse_record(struct db_context *db, TDB_DATA key,
+ void (*parser)(TDB_DATA key,
+ TDB_DATA data,
+ void *private_data),
+ void *private_data)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_ctdb_ctx);
+ struct db_ctdb_parse_record_state state;
+ NTSTATUS status;
+ int ret;
+
+ state.parser = parser;
+ state.private_data = private_data;
+ state.my_vnn = get_my_vnn();
+ state.empty_record = false;
+
+ status = db_ctdb_try_parse_local_record(ctx, key, &state);
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
+ return status;
+ }
+
+ ret = ctdbd_parse(messaging_ctdb_connection(), ctx->db_id, key,
+ state.ask_for_readonly_copy, parser, private_data);
+ if (ret != 0) {
+ if (ret == ENOENT) {
+ /*
+ * This maps to
+ * NT_STATUS_OBJECT_NAME_NOT_FOUND. Our upper
+ * layers expect NT_STATUS_NOT_FOUND for "no
+ * record around". We need to convert dbwrap
+ * to 0/errno away from NTSTATUS ... :-)
+ */
+ return NT_STATUS_NOT_FOUND;
+ }
+ return map_nt_error_from_unix(ret);
+ }
+ return NT_STATUS_OK;
+}
+
+static void db_ctdb_parse_record_done(struct tevent_req *subreq);
+
+static struct tevent_req *db_ctdb_parse_record_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct db_context *db,
+ TDB_DATA key,
+ void (*parser)(TDB_DATA key,
+ TDB_DATA data,
+ void *private_data),
+ void *private_data,
+ enum dbwrap_req_state *req_state)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_ctdb_ctx);
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct db_ctdb_parse_record_state *state = NULL;
+ NTSTATUS status;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct db_ctdb_parse_record_state);
+ if (req == NULL) {
+ *req_state = DBWRAP_REQ_ERROR;
+ return NULL;
+
+ }
+
+ *state = (struct db_ctdb_parse_record_state) {
+ .parser = parser,
+ .private_data = private_data,
+ .my_vnn = get_my_vnn(),
+ .empty_record = false,
+ };
+
+ status = db_ctdb_try_parse_local_record(ctx, key, state);
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_MORE_PROCESSING_REQUIRED)) {
+ if (tevent_req_nterror(req, status)) {
+ *req_state = DBWRAP_REQ_ERROR;
+ return tevent_req_post(req, ev);
+ }
+ *req_state = DBWRAP_REQ_DONE;
+ tevent_req_done(req);
+ return tevent_req_post(req, ev);
+ }
+
+ subreq = ctdbd_parse_send(state,
+ ev,
+ ctdb_async_ctx.async_conn,
+ ctx->db_id,
+ key,
+ state->ask_for_readonly_copy,
+ parser,
+ private_data,
+ req_state);
+ if (tevent_req_nomem(subreq, req)) {
+ *req_state = DBWRAP_REQ_ERROR;
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, db_ctdb_parse_record_done, req);
+
+ return req;
+}
+
+static void db_ctdb_parse_record_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ int ret;
+
+ ret = ctdbd_parse_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (ret != 0) {
+ if (ret == ENOENT) {
+ /*
+ * This maps to NT_STATUS_OBJECT_NAME_NOT_FOUND. Our
+ * upper layers expect NT_STATUS_NOT_FOUND for "no
+ * record around". We need to convert dbwrap to 0/errno
+ * away from NTSTATUS ... :-)
+ */
+ tevent_req_nterror(req, NT_STATUS_NOT_FOUND);
+ return;
+ }
+ tevent_req_nterror(req, map_nt_error_from_unix(ret));
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static NTSTATUS db_ctdb_parse_record_recv(struct tevent_req *req)
+{
+ return tevent_req_simple_recv_ntstatus(req);
+}
+
+struct traverse_state {
+ struct db_context *db;
+ int (*fn)(struct db_record *rec, void *private_data);
+ void *private_data;
+ int count;
+};
+
+static void traverse_callback(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct traverse_state *state = (struct traverse_state *)private_data;
+ struct db_record *rec = NULL;
+ TALLOC_CTX *tmp_ctx = NULL;
+
+ tmp_ctx = talloc_new(state->db);
+ if (tmp_ctx == NULL) {
+ DBG_ERR("talloc_new failed\n");
+ return;
+ }
+
+ /* we have to give them a locked record to prevent races */
+ rec = db_ctdb_fetch_locked(state->db, tmp_ctx, key);
+ if (rec != NULL && rec->value.dsize > 0) {
+ state->fn(rec, state->private_data);
+ state->count++;
+ }
+ talloc_free(tmp_ctx);
+}
+
+static int traverse_persistent_callback(TDB_CONTEXT *tdb, TDB_DATA kbuf, TDB_DATA dbuf,
+ void *private_data)
+{
+ struct traverse_state *state = (struct traverse_state *)private_data;
+ struct db_record *rec;
+ TALLOC_CTX *tmp_ctx = talloc_new(state->db);
+ int ret = 0;
+
+ /*
+ * Skip the __db_sequence_number__ key:
+ * This is used for persistent transactions internally.
+ */
+ if (kbuf.dsize == strlen(CTDB_DB_SEQNUM_KEY) + 1 &&
+ strcmp((const char*)kbuf.dptr, CTDB_DB_SEQNUM_KEY) == 0)
+ {
+ goto done;
+ }
+
+ /* we have to give them a locked record to prevent races */
+ rec = db_ctdb_fetch_locked(state->db, tmp_ctx, kbuf);
+ if (rec && rec->value.dsize > 0) {
+ ret = state->fn(rec, state->private_data);
+ }
+
+done:
+ talloc_free(tmp_ctx);
+ return ret;
+}
+
+/* wrapper to use traverse_persistent_callback with dbwrap */
+static int traverse_persistent_callback_dbwrap(struct db_record *rec, void* data)
+{
+ return traverse_persistent_callback(NULL, rec->key, rec->value, data);
+}
+
+static int db_ctdbd_traverse(uint32_t db_id,
+ void (*fn)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data)
+{
+ struct ctdbd_connection *conn;
+ int ret;
+
+ become_root();
+ ret = ctdbd_init_connection(talloc_tos(), lp_ctdbd_socket(),
+ lp_ctdb_timeout(), &conn);
+ unbecome_root();
+ if (ret != 0) {
+ DBG_WARNING("ctdbd_init_connection failed: %s\n",
+ strerror(ret));
+ return ret;
+ }
+
+ ret = ctdbd_traverse(conn, db_id, fn, private_data);
+ TALLOC_FREE(conn);
+
+ if (ret != 0) {
+ DBG_WARNING("ctdbd_traverse failed: %s\n",
+ strerror(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int db_ctdb_traverse(struct db_context *db,
+ int (*fn)(struct db_record *rec,
+ void *private_data),
+ void *private_data)
+{
+ int ret;
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+ struct traverse_state state;
+
+ state = (struct traverse_state) {
+ .db = db,
+ .fn = fn,
+ .private_data = private_data,
+ };
+
+ if (db->persistent) {
+ struct tdb_context *ltdb = ctx->wtdb->tdb;
+
+ /* for persistent databases we don't need to do a ctdb traverse,
+ we can do a faster local traverse */
+ ret = tdb_traverse(ltdb, traverse_persistent_callback, &state);
+ if (ret < 0) {
+ return ret;
+ }
+ if (ctx->transaction && ctx->transaction->m_write) {
+ /*
+ * we now have to handle keys not yet
+ * present at transaction start
+ */
+ struct db_context *newkeys = db_open_rbt(talloc_tos());
+ struct ctdb_marshall_buffer *mbuf = ctx->transaction->m_write;
+ struct ctdb_rec_data_old *rec=NULL;
+ uint32_t i;
+ int count = 0;
+ NTSTATUS status;
+
+ if (newkeys == NULL) {
+ return -1;
+ }
+
+ for (i=0; i<mbuf->count; i++) {
+ TDB_DATA key;
+ rec = db_ctdb_marshall_loop_next_key(
+ mbuf, rec, &key);
+ SMB_ASSERT(rec != NULL);
+
+ if (!tdb_exists(ltdb, key)) {
+ dbwrap_store(newkeys, key, tdb_null, 0);
+ }
+ }
+ status = dbwrap_traverse(newkeys,
+ traverse_persistent_callback_dbwrap,
+ &state,
+ &count);
+ talloc_free(newkeys);
+ if (!NT_STATUS_IS_OK(status)) {
+ return -1;
+ }
+ ret += count;
+ }
+ return ret;
+ }
+
+ ret = db_ctdbd_traverse(ctx->db_id, traverse_callback, &state);
+ if (ret != 0) {
+ return -1;
+ }
+ return state.count;
+}
+
+static NTSTATUS db_ctdb_storev_deny(struct db_record *rec,
+ const TDB_DATA *dbufs, int num_dbufs, int flag)
+{
+ return NT_STATUS_MEDIA_WRITE_PROTECTED;
+}
+
+static NTSTATUS db_ctdb_delete_deny(struct db_record *rec)
+{
+ return NT_STATUS_MEDIA_WRITE_PROTECTED;
+}
+
+static void traverse_read_callback(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct traverse_state *state = (struct traverse_state *)private_data;
+ struct db_record rec;
+
+ ZERO_STRUCT(rec);
+ rec.db = state->db;
+ rec.key = key;
+ rec.value = data;
+ rec.storev = db_ctdb_storev_deny;
+ rec.delete_rec = db_ctdb_delete_deny;
+ rec.private_data = NULL;
+ rec.value_valid = true;
+ state->fn(&rec, state->private_data);
+ state->count++;
+}
+
+static int traverse_persistent_callback_read(TDB_CONTEXT *tdb, TDB_DATA kbuf, TDB_DATA dbuf,
+ void *private_data)
+{
+ struct traverse_state *state = (struct traverse_state *)private_data;
+ struct db_record rec;
+
+ /*
+ * Skip the __db_sequence_number__ key:
+ * This is used for persistent transactions internally.
+ */
+ if (kbuf.dsize == strlen(CTDB_DB_SEQNUM_KEY) + 1 &&
+ strcmp((const char*)kbuf.dptr, CTDB_DB_SEQNUM_KEY) == 0)
+ {
+ return 0;
+ }
+
+ ZERO_STRUCT(rec);
+ rec.db = state->db;
+ rec.key = kbuf;
+ rec.value = dbuf;
+ rec.value_valid = true;
+ rec.storev = db_ctdb_storev_deny;
+ rec.delete_rec = db_ctdb_delete_deny;
+ rec.private_data = NULL;
+
+ if (rec.value.dsize <= sizeof(struct ctdb_ltdb_header)) {
+ /* a deleted record */
+ return 0;
+ }
+ rec.value.dsize -= sizeof(struct ctdb_ltdb_header);
+ rec.value.dptr += sizeof(struct ctdb_ltdb_header);
+
+ state->count++;
+ return state->fn(&rec, state->private_data);
+}
+
+static int db_ctdb_traverse_read(struct db_context *db,
+ int (*fn)(struct db_record *rec,
+ void *private_data),
+ void *private_data)
+{
+ int ret;
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+ struct traverse_state state;
+
+ state = (struct traverse_state) {
+ .db = db,
+ .fn = fn,
+ .private_data = private_data,
+ };
+
+ if (db->persistent) {
+ /* for persistent databases we don't need to do a ctdb traverse,
+ we can do a faster local traverse */
+ int nrecs;
+
+ nrecs = tdb_traverse_read(ctx->wtdb->tdb,
+ traverse_persistent_callback_read,
+ &state);
+ if (nrecs == -1) {
+ return -1;
+ }
+ return state.count;
+ }
+
+ ret = db_ctdbd_traverse(ctx->db_id, traverse_read_callback, &state);
+ if (ret != 0) {
+ return -1;
+ }
+ return state.count;
+}
+
+static int db_ctdb_get_seqnum(struct db_context *db)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(db->private_data,
+ struct db_ctdb_ctx);
+ return tdb_get_seqnum(ctx->wtdb->tdb);
+}
+
+static size_t db_ctdb_id(struct db_context *db, uint8_t *id, size_t idlen)
+{
+ struct db_ctdb_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_ctdb_ctx);
+
+ if (idlen >= sizeof(ctx->db_id)) {
+ memcpy(id, &ctx->db_id, sizeof(ctx->db_id));
+ }
+
+ return sizeof(ctx->db_id);
+}
+
+struct db_context *db_open_ctdb(TALLOC_CTX *mem_ctx,
+ struct messaging_context *msg_ctx,
+ const char *name,
+ int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ enum dbwrap_lock_order lock_order,
+ uint64_t dbwrap_flags)
+{
+ struct db_context *result;
+ struct db_ctdb_ctx *db_ctdb;
+ char *db_path;
+ struct loadparm_context *lp_ctx;
+ TDB_DATA data;
+ TDB_DATA outdata = {0};
+ bool persistent = (tdb_flags & TDB_CLEAR_IF_FIRST) == 0;
+ int32_t cstatus;
+ int ret;
+
+ if (!lp_clustering()) {
+ DEBUG(10, ("Clustering disabled -- no ctdb\n"));
+ return NULL;
+ }
+
+ if (!(result = talloc_zero(mem_ctx, struct db_context))) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ if (!(db_ctdb = talloc(result, struct db_ctdb_ctx))) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ result->name = talloc_strdup(result, name);
+ if (result->name == NULL) {
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ db_ctdb->transaction = NULL;
+ db_ctdb->db = result;
+
+ ret = ctdbd_db_attach(messaging_ctdb_connection(), name,
+ &db_ctdb->db_id, persistent);
+ if (ret != 0) {
+ DEBUG(0, ("ctdbd_db_attach failed for %s: %s\n", name,
+ strerror(ret)));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ if (tdb_flags & TDB_SEQNUM) {
+ data.dptr = (uint8_t *)&db_ctdb->db_id;
+ data.dsize = sizeof(db_ctdb->db_id);
+
+ ret = ctdbd_control_local(messaging_ctdb_connection(),
+ CTDB_CONTROL_ENABLE_SEQNUM,
+ 0, 0, data,
+ NULL, NULL, &cstatus);
+ if ((ret != 0) || cstatus != 0) {
+ DBG_ERR("ctdb_control for enable seqnum "
+ "failed: %s\n", strerror(ret));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ }
+
+ db_path = ctdbd_dbpath(messaging_ctdb_connection(), db_ctdb,
+ db_ctdb->db_id);
+ if (db_path == NULL) {
+ DBG_ERR("ctdbd_dbpath failed\n");
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ result->persistent = persistent;
+ result->lock_order = lock_order;
+
+ data.dptr = (uint8_t *)&db_ctdb->db_id;
+ data.dsize = sizeof(db_ctdb->db_id);
+
+ ret = ctdbd_control_local(messaging_ctdb_connection(),
+ CTDB_CONTROL_DB_OPEN_FLAGS,
+ 0, 0, data, NULL, &outdata, &cstatus);
+ if (ret != 0) {
+ DBG_ERR(" ctdb control for db_open_flags "
+ "failed: %s\n", strerror(ret));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ if (cstatus != 0 || outdata.dsize != sizeof(int)) {
+ DBG_ERR("ctdb_control for db_open_flags failed\n");
+ TALLOC_FREE(outdata.dptr);
+ TALLOC_FREE(result);
+ return NULL;
+ }
+
+ tdb_flags = *(int *)outdata.dptr;
+ TALLOC_FREE(outdata.dptr);
+
+ if (!result->persistent) {
+ ret = ctdb_async_ctx_init(NULL, messaging_tevent_context(msg_ctx));
+ if (ret != 0) {
+ DBG_ERR("ctdb_async_ctx_init failed: %s\n", strerror(ret));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ }
+
+ if (!result->persistent &&
+ (dbwrap_flags & DBWRAP_FLAG_OPTIMIZE_READONLY_ACCESS))
+ {
+ TDB_DATA indata;
+
+ indata = make_tdb_data((uint8_t *)&db_ctdb->db_id,
+ sizeof(db_ctdb->db_id));
+
+ ret = ctdbd_control_local(
+ messaging_ctdb_connection(),
+ CTDB_CONTROL_SET_DB_READONLY, 0, 0,
+ indata, NULL, NULL, &cstatus);
+ if ((ret != 0) || (cstatus != 0)) {
+ DEBUG(1, ("CTDB_CONTROL_SET_DB_READONLY failed: "
+ "%s, %"PRIi32"\n", strerror(ret), cstatus));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ }
+
+ lp_ctx = loadparm_init_s3(db_path, loadparm_s3_helpers());
+
+ if (hash_size == 0) {
+ hash_size = lpcfg_tdb_hash_size(lp_ctx, db_path);
+ }
+
+ db_ctdb->wtdb = tdb_wrap_open(db_ctdb, db_path, hash_size,
+ lpcfg_tdb_flags(lp_ctx, tdb_flags),
+ O_RDWR, 0);
+ talloc_unlink(db_path, lp_ctx);
+ if (db_ctdb->wtdb == NULL) {
+ DEBUG(0, ("Could not open tdb %s: %s\n", db_path, strerror(errno)));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ talloc_free(db_path);
+
+ /* honor permissions if user has specified O_CREAT */
+ if (open_flags & O_CREAT) {
+ int fd;
+ fd = tdb_fd(db_ctdb->wtdb->tdb);
+ ret = fchmod(fd, mode);
+ if (ret == -1) {
+ DBG_WARNING("fchmod failed: %s\n",
+ strerror(errno));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ }
+
+ if (result->persistent) {
+ db_ctdb->lock_ctx = g_lock_ctx_init(db_ctdb, msg_ctx);
+ if (db_ctdb->lock_ctx == NULL) {
+ DEBUG(0, ("g_lock_ctx_init failed\n"));
+ TALLOC_FREE(result);
+ return NULL;
+ }
+ }
+
+ db_ctdb->warn_unlock_msecs = lp_parm_int(-1, "ctdb",
+ "unlock_warn_threshold", 5);
+ db_ctdb->warn_migrate_attempts = lp_parm_int(-1, "ctdb",
+ "migrate_attempts", 10);
+ db_ctdb->warn_migrate_msecs = lp_parm_int(-1, "ctdb",
+ "migrate_duration", 5000);
+ db_ctdb->warn_locktime_msecs = lp_ctdb_locktime_warn_threshold();
+
+ result->private_data = (void *)db_ctdb;
+ result->fetch_locked = db_ctdb_fetch_locked;
+ result->parse_record = db_ctdb_parse_record;
+ result->parse_record_send = db_ctdb_parse_record_send;
+ result->parse_record_recv = db_ctdb_parse_record_recv;
+ result->traverse = db_ctdb_traverse;
+ result->traverse_read = db_ctdb_traverse_read;
+ result->get_seqnum = db_ctdb_get_seqnum;
+ result->transaction_start = db_ctdb_transaction_start;
+ result->transaction_commit = db_ctdb_transaction_commit;
+ result->transaction_cancel = db_ctdb_transaction_cancel;
+ result->id = db_ctdb_id;
+
+ DEBUG(3,("db_open_ctdb: opened database '%s' with dbid 0x%x\n",
+ name, db_ctdb->db_id));
+
+ return result;
+}
diff --git a/source3/lib/dbwrap/dbwrap_ctdb.h b/source3/lib/dbwrap/dbwrap_ctdb.h
new file mode 100644
index 0000000..0b82479
--- /dev/null
+++ b/source3/lib/dbwrap/dbwrap_ctdb.h
@@ -0,0 +1,40 @@
+/*
+ Unix SMB/CIFS implementation.
+ Database interface wrapper around ctdbd
+ Copyright (C) Volker Lendecke 2007-2009
+ Copyright (C) Michael Adam 2009
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __DBWRAP_CTDB_H__
+#define __DBWRAP_CTDB_H__
+
+#include <talloc.h>
+
+#include "dbwrap/dbwrap_private.h"
+
+struct db_context;
+struct ctdbd_connection;
+
+struct db_context *db_open_ctdb(TALLOC_CTX *mem_ctx,
+ struct messaging_context *msg_ctx,
+ const char *name,
+ int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ enum dbwrap_lock_order lock_order,
+ uint64_t dbwrap_flags);
+int ctdb_async_ctx_reinit(TALLOC_CTX *mem_ctx, struct tevent_context *ev);
+
+#endif /* __DBWRAP_CTDB_H__ */
diff --git a/source3/lib/dbwrap/dbwrap_open.c b/source3/lib/dbwrap/dbwrap_open.c
new file mode 100644
index 0000000..52c8a94
--- /dev/null
+++ b/source3/lib/dbwrap/dbwrap_open.c
@@ -0,0 +1,197 @@
+/*
+ Unix SMB/CIFS implementation.
+ Database interface wrapper
+
+ Copyright (C) Volker Lendecke 2005-2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_private.h"
+#include "dbwrap/dbwrap_open.h"
+#include "dbwrap/dbwrap_tdb.h"
+#include "dbwrap/dbwrap_ctdb.h"
+#include "lib/param/param.h"
+#include "lib/cluster_support.h"
+#include "lib/messages_ctdb.h"
+#include "util_tdb.h"
+#include "ctdbd_conn.h"
+#include "global_contexts.h"
+
+bool db_is_local(const char *name)
+{
+ const char *sockname = lp_ctdbd_socket();
+
+ if (lp_clustering() && socket_exist(sockname)) {
+ const char *partname;
+ /* ctdb only wants the file part of the name */
+ partname = strrchr(name, '/');
+ if (partname) {
+ partname++;
+ } else {
+ partname = name;
+ }
+ /* allow ctdb for individual databases to be disabled */
+ if (lp_parm_bool(-1, "ctdb", partname, True)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * open a database
+ */
+struct db_context *db_open(TALLOC_CTX *mem_ctx,
+ const char *name,
+ int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ enum dbwrap_lock_order lock_order,
+ uint64_t dbwrap_flags)
+{
+ struct db_context *result = NULL;
+ const char *base;
+ struct loadparm_context *lp_ctx = NULL;
+
+ if ((lock_order != DBWRAP_LOCK_ORDER_NONE) &&
+ !DBWRAP_LOCK_ORDER_VALID(lock_order)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ base = strrchr_m(name, '/');
+ if (base != NULL) {
+ base++;
+ } else {
+ base = name;
+ }
+
+ if (tdb_flags & TDB_CLEAR_IF_FIRST) {
+ bool try_readonly = false;
+
+ if (dbwrap_flags & DBWRAP_FLAG_OPTIMIZE_READONLY_ACCESS) {
+ try_readonly = true;
+ }
+
+ try_readonly = lp_parm_bool(-1, "dbwrap_optimize_readonly", "*", try_readonly);
+ try_readonly = lp_parm_bool(-1, "dbwrap_optimize_readonly", base, try_readonly);
+
+ if (try_readonly) {
+ dbwrap_flags |= DBWRAP_FLAG_OPTIMIZE_READONLY_ACCESS;
+ } else {
+ dbwrap_flags &= ~DBWRAP_FLAG_OPTIMIZE_READONLY_ACCESS;
+ }
+ }
+
+ if (tdb_flags & TDB_CLEAR_IF_FIRST) {
+ bool try_mutex = true;
+ bool require_mutex = false;
+
+ try_mutex = lp_parm_bool(-1, "dbwrap_tdb_mutexes", "*", try_mutex);
+ try_mutex = lp_parm_bool(-1, "dbwrap_tdb_mutexes", base, try_mutex);
+
+ if (!lp_use_mmap()) {
+ /*
+ * Mutexes require mmap. "use mmap = no" can
+ * be a debugging tool, so let it override the
+ * mutex parameters
+ */
+ try_mutex = false;
+ }
+
+ if (try_mutex && tdb_runtime_check_for_robust_mutexes()) {
+ tdb_flags |= TDB_MUTEX_LOCKING;
+ }
+
+ require_mutex = lp_parm_bool(-1, "dbwrap_tdb_require_mutexes",
+ "*", require_mutex);
+ require_mutex = lp_parm_bool(-1, "dbwrap_tdb_require_mutexes",
+ base, require_mutex);
+
+ if (require_mutex) {
+ tdb_flags |= TDB_MUTEX_LOCKING;
+ }
+ }
+
+ if (lp_clustering()) {
+ const char *sockname;
+
+ sockname = lp_ctdbd_socket();
+ if (!socket_exist(sockname)) {
+ DBG_WARNING("ctdb socket does %s not exist - "
+ "is ctdb not running?\n",
+ sockname);
+ return NULL;
+ }
+
+ /* allow ctdb for individual databases to be disabled */
+ if (lp_parm_bool(-1, "ctdb", base, true)) {
+ struct messaging_context *msg_ctx;
+ struct ctdbd_connection *conn;
+
+ /*
+ * Initialize messaging before getting the ctdb
+ * connection, as the ctdb connection requires messaging
+ * to be initialized.
+ */
+ msg_ctx = global_messaging_context();
+ if (msg_ctx == NULL) {
+ DBG_ERR("Failed to initialize messaging\n");
+ return NULL;
+ }
+
+ conn = messaging_ctdb_connection();
+ if (conn == NULL) {
+ DBG_WARNING("No ctdb connection\n");
+ errno = EIO;
+ return NULL;
+ }
+
+ result = db_open_ctdb(mem_ctx, msg_ctx, base,
+ hash_size,
+ tdb_flags, open_flags, mode,
+ lock_order, dbwrap_flags);
+ if (result == NULL) {
+ DBG_ERR("failed to attach to ctdb %s\n", base);
+ if (errno == 0) {
+ errno = EIO;
+ }
+ return NULL;
+ }
+
+ return result;
+ }
+ }
+
+ lp_ctx = loadparm_init_s3(mem_ctx, loadparm_s3_helpers());
+
+ if (hash_size == 0) {
+ hash_size = lpcfg_tdb_hash_size(lp_ctx, name);
+ }
+ tdb_flags = lpcfg_tdb_flags(lp_ctx, tdb_flags);
+
+ result = dbwrap_local_open(mem_ctx,
+ name,
+ hash_size,
+ tdb_flags,
+ open_flags,
+ mode,
+ lock_order,
+ dbwrap_flags);
+ talloc_unlink(mem_ctx, lp_ctx);
+ return result;
+}
diff --git a/source3/lib/dbwrap/dbwrap_open.h b/source3/lib/dbwrap/dbwrap_open.h
new file mode 100644
index 0000000..d14794e
--- /dev/null
+++ b/source3/lib/dbwrap/dbwrap_open.h
@@ -0,0 +1,45 @@
+/*
+ Unix SMB/CIFS implementation.
+ Database interface wrapper around tdb
+
+ Copyright (C) Volker Lendecke 2005-2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __DBWRAP_OPEN_H__
+#define __DBWRAP_OPEN_H__
+
+struct db_context;
+
+/**
+ * Convenience function to check whether a tdb database
+ * is local or clustered (ctdb) in a clustered environment.
+ */
+bool db_is_local(const char *name);
+
+/**
+ * Convenience function that will determine whether to
+ * open a tdb database via the tdb backend or via the ctdb
+ * backend, based on lp_clustering() and a db-specific
+ * settings.
+ */
+struct db_context *db_open(TALLOC_CTX *mem_ctx,
+ const char *name,
+ int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ enum dbwrap_lock_order lock_order,
+ uint64_t dbwrap_flags);
+
+#endif /* __DBWRAP_OPEN_H__ */
diff --git a/source3/lib/dbwrap/dbwrap_watch.c b/source3/lib/dbwrap/dbwrap_watch.c
new file mode 100644
index 0000000..df93119
--- /dev/null
+++ b/source3/lib/dbwrap/dbwrap_watch.c
@@ -0,0 +1,1285 @@
+/*
+ Unix SMB/CIFS implementation.
+ Watch dbwrap record changes
+ Copyright (C) Volker Lendecke 2012
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "lib/util/server_id.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap_watch.h"
+#include "dbwrap_open.h"
+#include "lib/util/util_tdb.h"
+#include "lib/util/tevent_ntstatus.h"
+#include "serverid.h"
+#include "server_id_watch.h"
+#include "lib/dbwrap/dbwrap_private.h"
+
+struct dbwrap_watcher {
+ /*
+ * Process watching this record
+ */
+ struct server_id pid;
+ /*
+ * Individual instance inside the waiter, incremented each
+ * time a watcher is created
+ */
+ uint64_t instance;
+};
+
+#define DBWRAP_WATCHER_BUF_LENGTH (SERVER_ID_BUF_LENGTH + sizeof(uint64_t))
+#define DBWRAP_MAX_WATCHERS (INT32_MAX/DBWRAP_WATCHER_BUF_LENGTH)
+
+/*
+ * Watched records contain a header of:
+ *
+ * [uint32] num_records
+ * 0 [DBWRAP_WATCHER_BUF_LENGTH] \
+ * 1 [DBWRAP_WATCHER_BUF_LENGTH] |
+ * .. |- Array of watchers
+ * (num_records-1)[DBWRAP_WATCHER_BUF_LENGTH] /
+ *
+ * [Remainder of record....]
+ *
+ * If this header is absent then this is a
+ * fresh record of length zero (no watchers).
+ */
+
+static bool dbwrap_watch_rec_parse(
+ TDB_DATA data,
+ uint8_t **pwatchers,
+ size_t *pnum_watchers,
+ TDB_DATA *pdata)
+{
+ size_t num_watchers;
+
+ if (data.dsize == 0) {
+ /* Fresh record */
+ if (pwatchers != NULL) {
+ *pwatchers = NULL;
+ }
+ if (pnum_watchers != NULL) {
+ *pnum_watchers = 0;
+ }
+ if (pdata != NULL) {
+ *pdata = (TDB_DATA) { .dptr = NULL };
+ }
+ return true;
+ }
+
+ if (data.dsize < sizeof(uint32_t)) {
+ /* Invalid record */
+ return false;
+ }
+
+ num_watchers = IVAL(data.dptr, 0);
+
+ data.dptr += sizeof(uint32_t);
+ data.dsize -= sizeof(uint32_t);
+
+ if (num_watchers > data.dsize/DBWRAP_WATCHER_BUF_LENGTH) {
+ /* Invalid record */
+ return false;
+ }
+
+ if (pwatchers != NULL) {
+ *pwatchers = data.dptr;
+ }
+ if (pnum_watchers != NULL) {
+ *pnum_watchers = num_watchers;
+ }
+ if (pdata != NULL) {
+ size_t watchers_len = num_watchers * DBWRAP_WATCHER_BUF_LENGTH;
+ *pdata = (TDB_DATA) {
+ .dptr = data.dptr + watchers_len,
+ .dsize = data.dsize - watchers_len
+ };
+ }
+
+ return true;
+}
+
+static void dbwrap_watcher_get(struct dbwrap_watcher *w,
+ const uint8_t buf[DBWRAP_WATCHER_BUF_LENGTH])
+{
+ server_id_get(&w->pid, buf);
+ w->instance = BVAL(buf, SERVER_ID_BUF_LENGTH);
+}
+
+static void dbwrap_watcher_put(uint8_t buf[DBWRAP_WATCHER_BUF_LENGTH],
+ const struct dbwrap_watcher *w)
+{
+ server_id_put(buf, w->pid);
+ SBVAL(buf, SERVER_ID_BUF_LENGTH, w->instance);
+}
+
+static void dbwrap_watch_log_invalid_record(
+ struct db_context *db, TDB_DATA key, TDB_DATA value)
+{
+ DBG_ERR("Found invalid record in %s\n", dbwrap_name(db));
+ dump_data(1, key.dptr, key.dsize);
+ dump_data(1, value.dptr, value.dsize);
+}
+
+struct db_watched_ctx {
+ struct db_context *backend;
+ struct messaging_context *msg;
+};
+
+struct db_watched_record {
+ struct db_record *rec;
+ struct server_id self;
+ struct {
+ struct db_record *rec;
+ TDB_DATA initial_value;
+ bool initial_valid;
+ } backend;
+ bool force_fini_store;
+ struct dbwrap_watcher added;
+ bool removed_first;
+ struct {
+ /*
+ * The is the number of watcher records
+ * parsed from backend.initial_value
+ */
+ size_t count;
+ /*
+ * This is the pointer to
+ * the optentially first watcher record
+ * parsed from backend.initial_value
+ *
+ * The pointer actually points to memory
+ * in backend.initial_value.
+ *
+ * Note it might be NULL, if count is 0.
+ */
+ uint8_t *first;
+ /*
+ * This remembers if we already
+ * notified the watchers.
+ *
+ * As we only need to do that once during:
+ * do_locked
+ * or:
+ * between rec = fetch_locked
+ * and
+ * TALLOC_FREE(rec)
+ */
+ bool alerted;
+ } watchers;
+ struct {
+ struct dbwrap_watcher watcher;
+ } wakeup;
+};
+
+static struct db_watched_record *db_record_get_watched_record(struct db_record *rec)
+{
+ /*
+ * we can't use wrec = talloc_get_type_abort() here!
+ * because wrec is likely a stack variable in
+ * dbwrap_watched_do_locked_fn()
+ *
+ * In order to have a least some protection
+ * we verify the cross reference pointers
+ * between rec and wrec
+ */
+ struct db_watched_record *wrec =
+ (struct db_watched_record *)rec->private_data;
+ SMB_ASSERT(wrec->rec == rec);
+ return wrec;
+}
+
+static NTSTATUS dbwrap_watched_record_storev(
+ struct db_watched_record *wrec,
+ const TDB_DATA *dbufs, int num_dbufs, int flags);
+static NTSTATUS dbwrap_watched_storev(struct db_record *rec,
+ const TDB_DATA *dbufs, int num_dbufs,
+ int flags);
+static NTSTATUS dbwrap_watched_delete(struct db_record *rec);
+static void dbwrap_watched_trigger_wakeup(struct messaging_context *msg_ctx,
+ struct dbwrap_watcher *watcher);
+static int db_watched_record_destructor(struct db_watched_record *wrec);
+
+static void db_watched_record_init(struct db_context *db,
+ struct messaging_context *msg_ctx,
+ struct db_record *rec,
+ struct db_watched_record *wrec,
+ struct db_record *backend_rec,
+ TDB_DATA backend_value)
+{
+ bool ok;
+
+ *rec = (struct db_record) {
+ .db = db,
+ .key = dbwrap_record_get_key(backend_rec),
+ .storev = dbwrap_watched_storev,
+ .delete_rec = dbwrap_watched_delete,
+ .private_data = wrec,
+ };
+
+ *wrec = (struct db_watched_record) {
+ .rec = rec,
+ .self = messaging_server_id(msg_ctx),
+ .backend = {
+ .rec = backend_rec,
+ .initial_value = backend_value,
+ .initial_valid = true,
+ },
+ };
+
+ ok = dbwrap_watch_rec_parse(backend_value,
+ &wrec->watchers.first,
+ &wrec->watchers.count,
+ &rec->value);
+ if (!ok) {
+ dbwrap_watch_log_invalid_record(rec->db, rec->key, backend_value);
+ /* wipe invalid data */
+ rec->value = (TDB_DATA) { .dptr = NULL, .dsize = 0 };
+ }
+}
+
+static struct db_record *dbwrap_watched_fetch_locked(
+ struct db_context *db, TALLOC_CTX *mem_ctx, TDB_DATA key)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct db_record *rec = NULL;
+ struct db_watched_record *wrec = NULL;
+ struct db_record *backend_rec = NULL;
+ TDB_DATA backend_value = { .dptr = NULL, };
+
+ rec = talloc_zero(mem_ctx, struct db_record);
+ if (rec == NULL) {
+ return NULL;
+ }
+ wrec = talloc_zero(rec, struct db_watched_record);
+ if (wrec == NULL) {
+ TALLOC_FREE(rec);
+ return NULL;
+ }
+
+ backend_rec = dbwrap_fetch_locked(ctx->backend, wrec, key);
+ if (backend_rec == NULL) {
+ TALLOC_FREE(rec);
+ return NULL;
+ }
+ backend_value = dbwrap_record_get_value(backend_rec);
+
+ db_watched_record_init(db, ctx->msg,
+ rec, wrec,
+ backend_rec, backend_value);
+ rec->value_valid = true;
+ talloc_set_destructor(wrec, db_watched_record_destructor);
+
+ return rec;
+}
+
+struct db_watched_record_fini_state {
+ struct db_watched_record *wrec;
+ TALLOC_CTX *frame;
+ TDB_DATA dbufs[2];
+ int num_dbufs;
+ bool ok;
+};
+
+static void db_watched_record_fini_fetcher(TDB_DATA key,
+ TDB_DATA backend_value,
+ void *private_data)
+{
+ struct db_watched_record_fini_state *state =
+ (struct db_watched_record_fini_state *)private_data;
+ struct db_watched_record *wrec = state->wrec;
+ struct db_record *rec = wrec->rec;
+ TDB_DATA value = {};
+ bool ok;
+ size_t copy_size;
+
+ /*
+ * We're within dbwrap_parse_record()
+ * and backend_value directly points into
+ * the mmap'ed tdb, so we need to copy the
+ * parts we require.
+ */
+
+ ok = dbwrap_watch_rec_parse(backend_value, NULL, NULL, &value);
+ if (!ok) {
+ struct db_context *db = dbwrap_record_get_db(rec);
+
+ dbwrap_watch_log_invalid_record(db, key, backend_value);
+
+ /* wipe invalid data */
+ value = (TDB_DATA) { .dptr = NULL, .dsize = 0 };
+ }
+
+ copy_size = MIN(rec->value.dsize, value.dsize);
+ if (copy_size != 0) {
+ /*
+ * First reuse the buffer we already had
+ * as much as we can.
+ */
+ memcpy(rec->value.dptr, value.dptr, copy_size);
+ state->dbufs[state->num_dbufs++] = rec->value;
+ value.dsize -= copy_size;
+ value.dptr += copy_size;
+ }
+
+ if (value.dsize != 0) {
+ uint8_t *p = NULL;
+
+ /*
+ * There's still new data left
+ * allocate it on callers stackframe
+ */
+ p = talloc_memdup(state->frame, value.dptr, value.dsize);
+ if (p == NULL) {
+ DBG_WARNING("failed to allocate %zu bytes\n",
+ value.dsize);
+ return;
+ }
+
+ state->dbufs[state->num_dbufs++] = (TDB_DATA) {
+ .dptr = p, .dsize = value.dsize,
+ };
+ }
+
+ state->ok = true;
+}
+
+static void db_watched_record_fini(struct db_watched_record *wrec)
+{
+ struct db_watched_record_fini_state state = { .wrec = wrec, };
+ struct db_context *backend = dbwrap_record_get_db(wrec->backend.rec);
+ struct db_record *rec = wrec->rec;
+ TDB_DATA key = dbwrap_record_get_key(wrec->backend.rec);
+ NTSTATUS status;
+
+ if (!wrec->force_fini_store) {
+ return;
+ }
+
+ if (wrec->backend.initial_valid) {
+ if (rec->value.dsize != 0) {
+ state.dbufs[state.num_dbufs++] = rec->value;
+ }
+ } else {
+ /*
+ * We need to fetch the current
+ * value from the backend again,
+ * which may need to allocate memory
+ * on the provided stackframe.
+ */
+
+ state.frame = talloc_stackframe();
+
+ status = dbwrap_parse_record(backend, key,
+ db_watched_record_fini_fetcher, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_WARNING("dbwrap_parse_record failed: %s\n",
+ nt_errstr(status));
+ TALLOC_FREE(state.frame);
+ return;
+ }
+ if (!state.ok) {
+ TALLOC_FREE(state.frame);
+ return;
+ }
+ }
+
+ /*
+ * We don't want to wake up others just because
+ * we added ourself as new watcher. But if we
+ * removed outself from the first position
+ * we need to alert the next one.
+ */
+ if (!wrec->removed_first) {
+ dbwrap_watched_watch_skip_alerting(rec);
+ }
+
+ status = dbwrap_watched_record_storev(wrec, state.dbufs, state.num_dbufs, 0);
+ TALLOC_FREE(state.frame);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_WARNING("dbwrap_watched_record_storev failed: %s\n",
+ nt_errstr(status));
+ return;
+ }
+
+ return;
+}
+
+static int db_watched_record_destructor(struct db_watched_record *wrec)
+{
+ struct db_record *rec = wrec->rec;
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ rec->db->private_data, struct db_watched_ctx);
+
+ db_watched_record_fini(wrec);
+ TALLOC_FREE(wrec->backend.rec);
+ dbwrap_watched_trigger_wakeup(ctx->msg, &wrec->wakeup.watcher);
+ return 0;
+}
+
+struct dbwrap_watched_do_locked_state {
+ struct db_context *db;
+ struct messaging_context *msg_ctx;
+ struct db_watched_record *wrec;
+ struct db_record *rec;
+ void (*fn)(struct db_record *rec,
+ TDB_DATA value,
+ void *private_data);
+ void *private_data;
+};
+
+static void dbwrap_watched_do_locked_fn(
+ struct db_record *backend_rec,
+ TDB_DATA backend_value,
+ void *private_data)
+{
+ struct dbwrap_watched_do_locked_state *state =
+ (struct dbwrap_watched_do_locked_state *)private_data;
+
+ db_watched_record_init(state->db, state->msg_ctx,
+ state->rec, state->wrec,
+ backend_rec, backend_value);
+
+ state->fn(state->rec, state->rec->value, state->private_data);
+
+ db_watched_record_fini(state->wrec);
+}
+
+static NTSTATUS dbwrap_watched_do_locked(struct db_context *db, TDB_DATA key,
+ void (*fn)(struct db_record *rec,
+ TDB_DATA value,
+ void *private_data),
+ void *private_data)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct db_watched_record wrec;
+ struct db_record rec;
+ struct dbwrap_watched_do_locked_state state = {
+ .db = db, .msg_ctx = ctx->msg,
+ .rec = &rec, .wrec = &wrec,
+ .fn = fn, .private_data = private_data,
+ };
+ NTSTATUS status;
+
+ status = dbwrap_do_locked(
+ ctx->backend, key, dbwrap_watched_do_locked_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("dbwrap_do_locked returned %s\n", nt_errstr(status));
+ return status;
+ }
+
+ DBG_DEBUG("dbwrap_watched_do_locked_fn returned\n");
+
+ dbwrap_watched_trigger_wakeup(state.msg_ctx, &wrec.wakeup.watcher);
+
+ return NT_STATUS_OK;
+}
+
+static void dbwrap_watched_record_prepare_wakeup(
+ struct db_watched_record *wrec)
+{
+ /*
+ * Wakeup only needs to happen once (if at all)
+ */
+ if (wrec->watchers.alerted) {
+ /* already done */
+ return;
+ }
+ wrec->watchers.alerted = true;
+
+ if (wrec->watchers.count == 0) {
+ DBG_DEBUG("No watchers\n");
+ return;
+ }
+
+ while (wrec->watchers.count != 0) {
+ struct server_id_buf tmp;
+ bool exists;
+
+ dbwrap_watcher_get(&wrec->wakeup.watcher, wrec->watchers.first);
+ exists = serverid_exists(&wrec->wakeup.watcher.pid);
+ if (!exists) {
+ DBG_DEBUG("Discard non-existing waiter %s:%"PRIu64"\n",
+ server_id_str_buf(wrec->wakeup.watcher.pid, &tmp),
+ wrec->wakeup.watcher.instance);
+ wrec->watchers.first += DBWRAP_WATCHER_BUF_LENGTH;
+ wrec->watchers.count -= 1;
+ continue;
+ }
+
+ /*
+ * We will only wakeup the first waiter, via
+ * dbwrap_watched_trigger_wakeup(), but keep
+ * all (including the first one) in the list that
+ * will be flushed back to the backend record
+ * again. Waiters are removing their entries
+ * via dbwrap_watched_watch_remove_instance()
+ * when they no longer want to monitor the record.
+ */
+ DBG_DEBUG("Will alert first waiter %s:%"PRIu64"\n",
+ server_id_str_buf(wrec->wakeup.watcher.pid, &tmp),
+ wrec->wakeup.watcher.instance);
+ break;
+ }
+}
+
+static void dbwrap_watched_trigger_wakeup(struct messaging_context *msg_ctx,
+ struct dbwrap_watcher *watcher)
+{
+ struct server_id_buf tmp;
+ uint8_t instance_buf[8];
+ NTSTATUS status;
+
+ if (watcher->instance == 0) {
+ DBG_DEBUG("No one to wakeup\n");
+ return;
+ }
+
+ DBG_DEBUG("Alerting %s:%"PRIu64"\n",
+ server_id_str_buf(watcher->pid, &tmp),
+ watcher->instance);
+
+ SBVAL(instance_buf, 0, watcher->instance);
+
+ status = messaging_send_buf(
+ msg_ctx,
+ watcher->pid,
+ MSG_DBWRAP_MODIFIED,
+ instance_buf,
+ sizeof(instance_buf));
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_WARNING("messaging_send_buf to %s failed: %s - ignoring...\n",
+ server_id_str_buf(watcher->pid, &tmp),
+ nt_errstr(status));
+ }
+}
+
+static NTSTATUS dbwrap_watched_record_storev(
+ struct db_watched_record *wrec,
+ const TDB_DATA *dbufs, int num_dbufs, int flags)
+{
+ uint8_t num_watchers_buf[4] = { 0 };
+ uint8_t add_buf[DBWRAP_WATCHER_BUF_LENGTH];
+ size_t num_store_watchers;
+ TDB_DATA my_dbufs[num_dbufs+3];
+ int num_my_dbufs = 0;
+ NTSTATUS status;
+ size_t add_count = 0;
+
+ dbwrap_watched_record_prepare_wakeup(wrec);
+
+ wrec->backend.initial_valid = false;
+ wrec->force_fini_store = false;
+
+ if (wrec->added.pid.pid != 0) {
+ dbwrap_watcher_put(add_buf, &wrec->added);
+ add_count = 1;
+ }
+
+ num_store_watchers = wrec->watchers.count + add_count;
+ if (num_store_watchers == 0 && num_dbufs == 0) {
+ status = dbwrap_record_delete(wrec->backend.rec);
+ return status;
+ }
+ if (num_store_watchers >= DBWRAP_MAX_WATCHERS) {
+ DBG_WARNING("Can't handle %zu watchers\n",
+ num_store_watchers);
+ return NT_STATUS_INSUFFICIENT_RESOURCES;
+ }
+
+ SIVAL(num_watchers_buf, 0, num_store_watchers);
+
+ my_dbufs[num_my_dbufs++] = (TDB_DATA) {
+ .dptr = num_watchers_buf, .dsize = sizeof(num_watchers_buf),
+ };
+ if (wrec->watchers.count != 0) {
+ my_dbufs[num_my_dbufs++] = (TDB_DATA) {
+ .dptr = wrec->watchers.first, .dsize = wrec->watchers.count * DBWRAP_WATCHER_BUF_LENGTH,
+ };
+ }
+ if (add_count != 0) {
+ my_dbufs[num_my_dbufs++] = (TDB_DATA) {
+ .dptr = add_buf,
+ .dsize = sizeof(add_buf),
+ };
+ }
+ if (num_dbufs != 0) {
+ memcpy(my_dbufs+num_my_dbufs, dbufs, num_dbufs * sizeof(*dbufs));
+ num_my_dbufs += num_dbufs;
+ }
+
+ SMB_ASSERT(num_my_dbufs <= ARRAY_SIZE(my_dbufs));
+
+ status = dbwrap_record_storev(
+ wrec->backend.rec, my_dbufs, num_my_dbufs, flags);
+ return status;
+}
+
+static NTSTATUS dbwrap_watched_storev(struct db_record *rec,
+ const TDB_DATA *dbufs, int num_dbufs,
+ int flags)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+
+ return dbwrap_watched_record_storev(wrec, dbufs, num_dbufs, flags);
+}
+
+static NTSTATUS dbwrap_watched_delete(struct db_record *rec)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+
+ /*
+ * dbwrap_watched_record_storev() will figure out
+ * if the record should be deleted or if there are still
+ * watchers to be stored.
+ */
+ return dbwrap_watched_record_storev(wrec, NULL, 0, 0);
+}
+
+struct dbwrap_watched_traverse_state {
+ int (*fn)(struct db_record *rec, void *private_data);
+ void *private_data;
+};
+
+static int dbwrap_watched_traverse_fn(struct db_record *rec,
+ void *private_data)
+{
+ struct dbwrap_watched_traverse_state *state = private_data;
+ struct db_record prec = *rec;
+ bool ok;
+
+ ok = dbwrap_watch_rec_parse(rec->value, NULL, NULL, &prec.value);
+ if (!ok) {
+ return 0;
+ }
+ if (prec.value.dsize == 0) {
+ return 0;
+ }
+ prec.value_valid = true;
+
+ return state->fn(&prec, state->private_data);
+}
+
+static int dbwrap_watched_traverse(struct db_context *db,
+ int (*fn)(struct db_record *rec,
+ void *private_data),
+ void *private_data)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct dbwrap_watched_traverse_state state = {
+ .fn = fn, .private_data = private_data };
+ NTSTATUS status;
+ int ret;
+
+ status = dbwrap_traverse(
+ ctx->backend, dbwrap_watched_traverse_fn, &state, &ret);
+ if (!NT_STATUS_IS_OK(status)) {
+ return -1;
+ }
+ return ret;
+}
+
+static int dbwrap_watched_traverse_read(struct db_context *db,
+ int (*fn)(struct db_record *rec,
+ void *private_data),
+ void *private_data)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct dbwrap_watched_traverse_state state = {
+ .fn = fn, .private_data = private_data };
+ NTSTATUS status;
+ int ret;
+
+ status = dbwrap_traverse_read(
+ ctx->backend, dbwrap_watched_traverse_fn, &state, &ret);
+ if (!NT_STATUS_IS_OK(status)) {
+ return -1;
+ }
+ return ret;
+}
+
+static int dbwrap_watched_get_seqnum(struct db_context *db)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ return dbwrap_get_seqnum(ctx->backend);
+}
+
+static int dbwrap_watched_transaction_start(struct db_context *db)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ return dbwrap_transaction_start(ctx->backend);
+}
+
+static int dbwrap_watched_transaction_commit(struct db_context *db)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ return dbwrap_transaction_commit(ctx->backend);
+}
+
+static int dbwrap_watched_transaction_cancel(struct db_context *db)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ return dbwrap_transaction_cancel(ctx->backend);
+}
+
+struct dbwrap_watched_parse_record_state {
+ struct db_context *db;
+ void (*parser)(TDB_DATA key, TDB_DATA data, void *private_data);
+ void *private_data;
+ bool ok;
+};
+
+static void dbwrap_watched_parse_record_parser(TDB_DATA key, TDB_DATA data,
+ void *private_data)
+{
+ struct dbwrap_watched_parse_record_state *state = private_data;
+ TDB_DATA userdata;
+
+ state->ok = dbwrap_watch_rec_parse(data, NULL, NULL, &userdata);
+ if (!state->ok) {
+ dbwrap_watch_log_invalid_record(state->db, key, data);
+ return;
+ }
+
+ state->parser(key, userdata, state->private_data);
+}
+
+static NTSTATUS dbwrap_watched_parse_record(
+ struct db_context *db, TDB_DATA key,
+ void (*parser)(TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct dbwrap_watched_parse_record_state state = {
+ .db = db,
+ .parser = parser,
+ .private_data = private_data,
+ };
+ NTSTATUS status;
+
+ status = dbwrap_parse_record(
+ ctx->backend, key, dbwrap_watched_parse_record_parser, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ if (!state.ok) {
+ return NT_STATUS_NOT_FOUND;
+ }
+ return NT_STATUS_OK;
+}
+
+static void dbwrap_watched_parse_record_done(struct tevent_req *subreq);
+
+static struct tevent_req *dbwrap_watched_parse_record_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct db_context *db,
+ TDB_DATA key,
+ void (*parser)(TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data,
+ enum dbwrap_req_state *req_state)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct tevent_req *req = NULL;
+ struct tevent_req *subreq = NULL;
+ struct dbwrap_watched_parse_record_state *state = NULL;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct dbwrap_watched_parse_record_state);
+ if (req == NULL) {
+ *req_state = DBWRAP_REQ_ERROR;
+ return NULL;
+ }
+
+ *state = (struct dbwrap_watched_parse_record_state) {
+ .parser = parser,
+ .private_data = private_data,
+ .ok = true,
+ };
+
+ subreq = dbwrap_parse_record_send(state,
+ ev,
+ ctx->backend,
+ key,
+ dbwrap_watched_parse_record_parser,
+ state,
+ req_state);
+ if (tevent_req_nomem(subreq, req)) {
+ *req_state = DBWRAP_REQ_ERROR;
+ return tevent_req_post(req, ev);
+ }
+
+ tevent_req_set_callback(subreq, dbwrap_watched_parse_record_done, req);
+ return req;
+}
+
+static void dbwrap_watched_parse_record_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct dbwrap_watched_parse_record_state *state = tevent_req_data(
+ req, struct dbwrap_watched_parse_record_state);
+ NTSTATUS status;
+
+ status = dbwrap_parse_record_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+
+ if (!state->ok) {
+ tevent_req_nterror(req, NT_STATUS_NOT_FOUND);
+ return;
+ }
+
+ tevent_req_done(req);
+ return;
+}
+
+static NTSTATUS dbwrap_watched_parse_record_recv(struct tevent_req *req)
+{
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+static int dbwrap_watched_exists(struct db_context *db, TDB_DATA key)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+
+ return dbwrap_exists(ctx->backend, key);
+}
+
+static size_t dbwrap_watched_id(struct db_context *db, uint8_t *id,
+ size_t idlen)
+{
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+
+ return dbwrap_db_id(ctx->backend, id, idlen);
+}
+
+struct db_context *db_open_watched(TALLOC_CTX *mem_ctx,
+ struct db_context **backend,
+ struct messaging_context *msg)
+{
+ struct db_context *db;
+ struct db_watched_ctx *ctx;
+
+ db = talloc_zero(mem_ctx, struct db_context);
+ if (db == NULL) {
+ return NULL;
+ }
+ ctx = talloc_zero(db, struct db_watched_ctx);
+ if (ctx == NULL) {
+ TALLOC_FREE(db);
+ return NULL;
+ }
+ db->private_data = ctx;
+
+ ctx->msg = msg;
+
+ ctx->backend = talloc_move(ctx, backend);
+ db->lock_order = ctx->backend->lock_order;
+ ctx->backend->lock_order = DBWRAP_LOCK_ORDER_NONE;
+
+ db->fetch_locked = dbwrap_watched_fetch_locked;
+ db->do_locked = dbwrap_watched_do_locked;
+ db->traverse = dbwrap_watched_traverse;
+ db->traverse_read = dbwrap_watched_traverse_read;
+ db->get_seqnum = dbwrap_watched_get_seqnum;
+ db->transaction_start = dbwrap_watched_transaction_start;
+ db->transaction_commit = dbwrap_watched_transaction_commit;
+ db->transaction_cancel = dbwrap_watched_transaction_cancel;
+ db->parse_record = dbwrap_watched_parse_record;
+ db->parse_record_send = dbwrap_watched_parse_record_send;
+ db->parse_record_recv = dbwrap_watched_parse_record_recv;
+ db->exists = dbwrap_watched_exists;
+ db->id = dbwrap_watched_id;
+ db->name = dbwrap_name(ctx->backend);
+
+ return db;
+}
+
+uint64_t dbwrap_watched_watch_add_instance(struct db_record *rec)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+ static uint64_t global_instance = 1;
+
+ SMB_ASSERT(wrec->added.instance == 0);
+
+ wrec->added = (struct dbwrap_watcher) {
+ .pid = wrec->self,
+ .instance = global_instance++,
+ };
+
+ wrec->force_fini_store = true;
+
+ return wrec->added.instance;
+}
+
+void dbwrap_watched_watch_remove_instance(struct db_record *rec, uint64_t instance)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+ struct dbwrap_watcher clear_watcher = {
+ .pid = wrec->self,
+ .instance = instance,
+ };
+ size_t i;
+ struct server_id_buf buf;
+
+ if (instance == 0) {
+ return;
+ }
+
+ if (wrec->added.instance == instance) {
+ SMB_ASSERT(server_id_equal(&wrec->added.pid, &wrec->self));
+ DBG_DEBUG("Watcher %s:%"PRIu64" reverted from adding\n",
+ server_id_str_buf(clear_watcher.pid, &buf),
+ clear_watcher.instance);
+ ZERO_STRUCT(wrec->added);
+ }
+
+ for (i=0; i < wrec->watchers.count; i++) {
+ struct dbwrap_watcher watcher;
+ size_t off = i*DBWRAP_WATCHER_BUF_LENGTH;
+ size_t next_off;
+ size_t full_len;
+ size_t move_len;
+
+ dbwrap_watcher_get(&watcher, wrec->watchers.first + off);
+
+ if (clear_watcher.instance != watcher.instance) {
+ continue;
+ }
+ if (!server_id_equal(&clear_watcher.pid, &watcher.pid)) {
+ continue;
+ }
+
+ wrec->force_fini_store = true;
+
+ if (i == 0) {
+ DBG_DEBUG("Watcher %s:%"PRIu64" removed from first position of %zu\n",
+ server_id_str_buf(clear_watcher.pid, &buf),
+ clear_watcher.instance,
+ wrec->watchers.count);
+ wrec->watchers.first += DBWRAP_WATCHER_BUF_LENGTH;
+ wrec->watchers.count -= 1;
+ wrec->removed_first = true;
+ return;
+ }
+ if (i == (wrec->watchers.count-1)) {
+ DBG_DEBUG("Watcher %s:%"PRIu64" removed from last position of %zu\n",
+ server_id_str_buf(clear_watcher.pid, &buf),
+ clear_watcher.instance,
+ wrec->watchers.count);
+ wrec->watchers.count -= 1;
+ return;
+ }
+
+ DBG_DEBUG("Watcher %s:%"PRIu64" cleared at position %zu from %zu\n",
+ server_id_str_buf(clear_watcher.pid, &buf),
+ clear_watcher.instance, i+1,
+ wrec->watchers.count);
+
+ next_off = off + DBWRAP_WATCHER_BUF_LENGTH;
+ full_len = wrec->watchers.count * DBWRAP_WATCHER_BUF_LENGTH;
+ move_len = full_len - next_off;
+ memmove(wrec->watchers.first + off,
+ wrec->watchers.first + next_off,
+ move_len);
+ wrec->watchers.count -= 1;
+ return;
+ }
+
+ DBG_DEBUG("Watcher %s:%"PRIu64" not found in %zu watchers\n",
+ server_id_str_buf(clear_watcher.pid, &buf),
+ clear_watcher.instance,
+ wrec->watchers.count);
+ return;
+}
+
+void dbwrap_watched_watch_skip_alerting(struct db_record *rec)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+
+ wrec->wakeup.watcher = (struct dbwrap_watcher) { .instance = 0, };
+ wrec->watchers.alerted = true;
+}
+
+void dbwrap_watched_watch_reset_alerting(struct db_record *rec)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+
+ wrec->wakeup.watcher = (struct dbwrap_watcher) { .instance = 0, };
+ wrec->watchers.alerted = false;
+}
+
+void dbwrap_watched_watch_force_alerting(struct db_record *rec)
+{
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+
+ dbwrap_watched_record_prepare_wakeup(wrec);
+}
+
+struct dbwrap_watched_watch_state {
+ struct db_context *db;
+ TDB_DATA key;
+ struct dbwrap_watcher watcher;
+ struct server_id blocker;
+ bool blockerdead;
+};
+
+static bool dbwrap_watched_msg_filter(struct messaging_rec *rec,
+ void *private_data);
+static void dbwrap_watched_watch_done(struct tevent_req *subreq);
+static void dbwrap_watched_watch_blocker_died(struct tevent_req *subreq);
+static int dbwrap_watched_watch_state_destructor(
+ struct dbwrap_watched_watch_state *state);
+
+struct tevent_req *dbwrap_watched_watch_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct db_record *rec,
+ uint64_t resumed_instance,
+ struct server_id blocker)
+{
+ struct db_context *db = dbwrap_record_get_db(rec);
+ struct db_watched_ctx *ctx = talloc_get_type_abort(
+ db->private_data, struct db_watched_ctx);
+ struct db_watched_record *wrec = db_record_get_watched_record(rec);
+ struct tevent_req *req, *subreq;
+ struct dbwrap_watched_watch_state *state;
+ uint64_t instance;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct dbwrap_watched_watch_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->db = db;
+ state->blocker = blocker;
+
+ if (ctx->msg == NULL) {
+ tevent_req_nterror(req, NT_STATUS_NOT_SUPPORTED);
+ return tevent_req_post(req, ev);
+ }
+
+ if (resumed_instance == 0 && wrec->added.instance == 0) {
+ /*
+ * Adding a new instance
+ */
+ instance = dbwrap_watched_watch_add_instance(rec);
+ } else if (resumed_instance != 0 && wrec->added.instance == 0) {
+ /*
+ * Resuming an existing instance that was
+ * already present before do_locked started
+ */
+ instance = resumed_instance;
+ } else if (resumed_instance == wrec->added.instance) {
+ /*
+ * The caller used dbwrap_watched_watch_add_instance()
+ * already during this do_locked() invocation.
+ */
+ instance = resumed_instance;
+ } else {
+ tevent_req_nterror(req, NT_STATUS_REQUEST_NOT_ACCEPTED);
+ return tevent_req_post(req, ev);
+ }
+
+ state->watcher = (struct dbwrap_watcher) {
+ .pid = messaging_server_id(ctx->msg),
+ .instance = instance,
+ };
+
+ state->key = tdb_data_talloc_copy(state, rec->key);
+ if (tevent_req_nomem(state->key.dptr, req)) {
+ return tevent_req_post(req, ev);
+ }
+
+ subreq = messaging_filtered_read_send(
+ state, ev, ctx->msg, dbwrap_watched_msg_filter, state);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, dbwrap_watched_watch_done, req);
+
+ talloc_set_destructor(state, dbwrap_watched_watch_state_destructor);
+
+ if (blocker.pid != 0) {
+ subreq = server_id_watch_send(state, ev, blocker);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(
+ subreq, dbwrap_watched_watch_blocker_died, req);
+ }
+
+ return req;
+}
+
+static void dbwrap_watched_watch_blocker_died(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct dbwrap_watched_watch_state *state = tevent_req_data(
+ req, struct dbwrap_watched_watch_state);
+ int ret;
+
+ ret = server_id_watch_recv(subreq, NULL);
+ TALLOC_FREE(subreq);
+ if (ret != 0) {
+ tevent_req_nterror(req, map_nt_error_from_unix(ret));
+ return;
+ }
+ state->blockerdead = true;
+ tevent_req_done(req);
+}
+
+static void dbwrap_watched_watch_state_destructor_fn(
+ struct db_record *rec,
+ TDB_DATA value,
+ void *private_data)
+{
+ struct dbwrap_watched_watch_state *state = talloc_get_type_abort(
+ private_data, struct dbwrap_watched_watch_state);
+
+ /*
+ * Here we just remove ourself from the in memory
+ * watchers array and let db_watched_record_fini()
+ * call dbwrap_watched_record_storev() to do the magic
+ * of writing back the modified in memory copy.
+ */
+ dbwrap_watched_watch_remove_instance(rec, state->watcher.instance);
+ return;
+}
+
+static int dbwrap_watched_watch_state_destructor(
+ struct dbwrap_watched_watch_state *state)
+{
+ NTSTATUS status;
+
+ status = dbwrap_do_locked(
+ state->db,
+ state->key,
+ dbwrap_watched_watch_state_destructor_fn,
+ state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_WARNING("dbwrap_do_locked failed: %s\n",
+ nt_errstr(status));
+ }
+ return 0;
+}
+
+static bool dbwrap_watched_msg_filter(struct messaging_rec *rec,
+ void *private_data)
+{
+ struct dbwrap_watched_watch_state *state = talloc_get_type_abort(
+ private_data, struct dbwrap_watched_watch_state);
+ uint64_t instance;
+
+ if (rec->msg_type != MSG_DBWRAP_MODIFIED) {
+ return false;
+ }
+ if (rec->num_fds != 0) {
+ return false;
+ }
+
+ if (rec->buf.length != sizeof(instance)) {
+ DBG_DEBUG("Got size %zu, expected %zu\n",
+ rec->buf.length,
+ sizeof(instance));
+ return false;
+ }
+
+ instance = BVAL(rec->buf.data, 0);
+
+ if (instance != state->watcher.instance) {
+ DBG_DEBUG("Got instance %"PRIu64", expected %"PRIu64"\n",
+ instance,
+ state->watcher.instance);
+ return false;
+ }
+
+ return true;
+}
+
+static void dbwrap_watched_watch_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct dbwrap_watched_watch_state *state = tevent_req_data(
+ req, struct dbwrap_watched_watch_state);
+ struct messaging_rec *rec;
+ int ret;
+
+ ret = messaging_filtered_read_recv(subreq, state, &rec);
+ TALLOC_FREE(subreq);
+ if (ret != 0) {
+ tevent_req_nterror(req, map_nt_error_from_unix(ret));
+ return;
+ }
+ tevent_req_done(req);
+}
+
+NTSTATUS dbwrap_watched_watch_recv(struct tevent_req *req,
+ uint64_t *pkeep_instance,
+ bool *blockerdead,
+ struct server_id *blocker)
+{
+ struct dbwrap_watched_watch_state *state = tevent_req_data(
+ req, struct dbwrap_watched_watch_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+ if (pkeep_instance != NULL) {
+ *pkeep_instance = state->watcher.instance;
+ /*
+ * No need to remove ourselves anymore,
+ * the caller will take care of removing itself.
+ */
+ talloc_set_destructor(state, NULL);
+ }
+ if (blockerdead != NULL) {
+ *blockerdead = state->blockerdead;
+ }
+ if (blocker != NULL) {
+ *blocker = state->blocker;
+ }
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
diff --git a/source3/lib/dbwrap/dbwrap_watch.h b/source3/lib/dbwrap/dbwrap_watch.h
new file mode 100644
index 0000000..1b93e13
--- /dev/null
+++ b/source3/lib/dbwrap/dbwrap_watch.h
@@ -0,0 +1,45 @@
+/*
+ Unix SMB/CIFS implementation.
+ Watch dbwrap record changes
+ Copyright (C) Volker Lendecke 2012
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __DBWRAP_WATCH_H__
+#define __DBWRAP_WATCH_H__
+
+#include <tevent.h>
+#include "dbwrap/dbwrap.h"
+#include "messages.h"
+
+struct db_context *db_open_watched(TALLOC_CTX *mem_ctx,
+ struct db_context **backend,
+ struct messaging_context *msg);
+uint64_t dbwrap_watched_watch_add_instance(struct db_record *rec);
+void dbwrap_watched_watch_remove_instance(struct db_record *rec, uint64_t instance);
+void dbwrap_watched_watch_skip_alerting(struct db_record *rec);
+void dbwrap_watched_watch_reset_alerting(struct db_record *rec);
+void dbwrap_watched_watch_force_alerting(struct db_record *rec);
+struct tevent_req *dbwrap_watched_watch_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct db_record *rec,
+ uint64_t resume_instance,
+ struct server_id blocker);
+NTSTATUS dbwrap_watched_watch_recv(struct tevent_req *req,
+ uint64_t *pkeep_instance,
+ bool *blockerdead,
+ struct server_id *blocker);
+
+#endif /* __DBWRAP_WATCH_H__ */