summaryrefslogtreecommitdiffstats
path: root/lib/tdb/common
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
commit8daa83a594a2e98f39d764422bfbdbc62c9efd44 (patch)
tree4099e8021376c7d8c05bdf8503093d80e9c7bad0 /lib/tdb/common
parentInitial commit. (diff)
downloadsamba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.tar.xz
samba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.zip
Adding upstream version 2:4.20.0+dfsg.upstream/2%4.20.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib/tdb/common')
-rw-r--r--lib/tdb/common/check.c489
-rw-r--r--lib/tdb/common/dump.c149
-rw-r--r--lib/tdb/common/error.c74
-rw-r--r--lib/tdb/common/freelist.c747
-rw-r--r--lib/tdb/common/freelistcheck.c107
-rw-r--r--lib/tdb/common/hash.c345
-rw-r--r--lib/tdb/common/io.c806
-rw-r--r--lib/tdb/common/lock.c1033
-rw-r--r--lib/tdb/common/mutex.c1078
-rw-r--r--lib/tdb/common/open.c968
-rw-r--r--lib/tdb/common/rescue.c351
-rw-r--r--lib/tdb/common/summary.c219
-rw-r--r--lib/tdb/common/tdb.c1348
-rw-r--r--lib/tdb/common/tdb_private.h370
-rw-r--r--lib/tdb/common/transaction.c1388
-rw-r--r--lib/tdb/common/traverse.c510
16 files changed, 9982 insertions, 0 deletions
diff --git a/lib/tdb/common/check.c b/lib/tdb/common/check.c
new file mode 100644
index 0000000..d7741f6
--- /dev/null
+++ b/lib/tdb/common/check.c
@@ -0,0 +1,489 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Rusty Russell 2009
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+/* Since we opened it, these shouldn't fail unless it's recent corruption. */
+static bool tdb_check_header(struct tdb_context *tdb, tdb_off_t *recovery)
+{
+ struct tdb_header hdr;
+ uint32_t h1, h2;
+
+ if (tdb->methods->tdb_read(tdb, 0, &hdr, sizeof(hdr), 0) == -1)
+ return false;
+ if (strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0)
+ goto corrupt;
+
+ CONVERT(hdr);
+ if (hdr.version != TDB_VERSION)
+ goto corrupt;
+
+ if (hdr.rwlocks != 0 &&
+ hdr.rwlocks != TDB_FEATURE_FLAG_MAGIC &&
+ hdr.rwlocks != TDB_HASH_RWLOCK_MAGIC)
+ goto corrupt;
+
+ tdb_header_hash(tdb, &h1, &h2);
+ if (hdr.magic1_hash && hdr.magic2_hash &&
+ (hdr.magic1_hash != h1 || hdr.magic2_hash != h2))
+ goto corrupt;
+
+ if (hdr.hash_size == 0)
+ goto corrupt;
+
+ if (hdr.hash_size != tdb->hash_size)
+ goto corrupt;
+
+ if (hdr.recovery_start != 0 &&
+ hdr.recovery_start < TDB_DATA_START(tdb->hash_size))
+ goto corrupt;
+
+ *recovery = hdr.recovery_start;
+ return true;
+
+corrupt:
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Header is corrupt\n"));
+ return false;
+}
+
+/* Generic record header check. */
+static bool tdb_check_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec)
+{
+ tdb_off_t tailer;
+
+ /* Check rec->next: 0 or points to record offset, aligned. */
+ if (rec->next > 0 && rec->next < TDB_DATA_START(tdb->hash_size)){
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too small next %u\n",
+ off, rec->next));
+ goto corrupt;
+ }
+ if (rec->next + sizeof(*rec) < rec->next) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too large next %u\n",
+ off, rec->next));
+ goto corrupt;
+ }
+ if ((rec->next % TDB_ALIGNMENT) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u misaligned next %u\n",
+ off, rec->next));
+ goto corrupt;
+ }
+ if (tdb_oob(tdb, rec->next, sizeof(*rec), 0))
+ goto corrupt;
+
+ /* Check rec_len: similar to rec->next, implies next record. */
+ if ((rec->rec_len % TDB_ALIGNMENT) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u misaligned length %u\n",
+ off, rec->rec_len));
+ goto corrupt;
+ }
+ /* Must fit tailer. */
+ if (rec->rec_len < sizeof(tailer)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too short length %u\n",
+ off, rec->rec_len));
+ goto corrupt;
+ }
+ /* OOB allows "right at the end" access, so this works for last rec. */
+ if (tdb_oob(tdb, off, sizeof(*rec)+rec->rec_len, 0))
+ goto corrupt;
+
+ /* Check tailer. */
+ if (tdb_ofs_read(tdb, off+sizeof(*rec)+rec->rec_len-sizeof(tailer),
+ &tailer) == -1)
+ goto corrupt;
+ if (tailer != sizeof(*rec) + rec->rec_len) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u invalid tailer\n", off));
+ goto corrupt;
+ }
+
+ return true;
+
+corrupt:
+ tdb->ecode = TDB_ERR_CORRUPT;
+ return false;
+}
+
+/* Grab some bytes: may copy if can't use mmap.
+ Caller has already done bounds check. */
+static TDB_DATA get_bytes(struct tdb_context *tdb,
+ tdb_off_t off, tdb_len_t len)
+{
+ TDB_DATA d;
+
+ d.dsize = len;
+
+ if (tdb->transaction == NULL && tdb->map_ptr != NULL)
+ d.dptr = (unsigned char *)tdb->map_ptr + off;
+ else
+ d.dptr = tdb_alloc_read(tdb, off, d.dsize);
+ return d;
+}
+
+/* Frees data if we're not able to simply use mmap. */
+static void put_bytes(struct tdb_context *tdb, TDB_DATA d)
+{
+ if (tdb->transaction == NULL && tdb->map_ptr != NULL)
+ return;
+ free(d.dptr);
+}
+
+/* We use the excellent Jenkins lookup3 hash; this is based on hash_word2.
+ * See: http://burtleburtle.net/bob/c/lookup3.c
+ */
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+static void hash(uint32_t key, uint32_t *pc, uint32_t *pb)
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + *pc;
+ c += *pb;
+ a += key;
+ c ^= b; c -= rot(b,14);
+ a ^= c; a -= rot(c,11);
+ b ^= a; b -= rot(a,25);
+ c ^= b; c -= rot(b,16);
+ a ^= c; a -= rot(c,4);
+ b ^= a; b -= rot(a,14);
+ c ^= b; c -= rot(b,24);
+ *pc=c; *pb=b;
+}
+
+/*
+ We want to check that all free records are in the free list
+ (only once), and all free list entries are free records. Similarly
+ for each hash chain of used records.
+
+ Doing that naively (without walking hash chains, since we want to be
+ linear) means keeping a list of records which have been seen in each
+ hash chain, and another of records pointed to (ie. next pointers
+ from records and the initial hash chain heads). These two lists
+ should be equal. This will take 8 bytes per record, and require
+ sorting at the end.
+
+ So instead, we record each offset in a bitmap such a way that
+ recording it twice will cancel out. Since each offset should appear
+ exactly twice, the bitmap should be zero at the end.
+
+ The approach was inspired by Bloom Filters (see Wikipedia). For
+ each value, we flip K bits in a bitmap of size N. The number of
+ distinct arrangements is:
+
+ N! / (K! * (N-K)!)
+
+ Of course, not all arrangements are actually distinct, but testing
+ shows this formula to be close enough.
+
+ So, if K == 8 and N == 256, the probability of two things flipping the same
+ bits is 1 in 409,663,695,276,000.
+
+ Given that ldb uses a hash size of 10000, using 32 bytes per hash chain
+ (320k) seems reasonable.
+*/
+#define NUM_HASHES 8
+#define BITMAP_BITS 256
+
+static void bit_flip(unsigned char bits[], unsigned int idx)
+{
+ bits[idx / CHAR_BIT] ^= (1 << (idx % CHAR_BIT));
+}
+
+/* We record offsets in a bitmap for the particular chain it should be in. */
+static void record_offset(unsigned char bits[], tdb_off_t off)
+{
+ uint32_t h1 = off, h2 = 0;
+ unsigned int i;
+
+ /* We get two good hash values out of jhash2, so we use both. Then
+ * we keep going to produce further hash values. */
+ for (i = 0; i < NUM_HASHES / 2; i++) {
+ hash(off, &h1, &h2);
+ bit_flip(bits, h1 % BITMAP_BITS);
+ bit_flip(bits, h2 % BITMAP_BITS);
+ h2++;
+ }
+}
+
+/* Check that an in-use record is valid. */
+static bool tdb_check_used_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec,
+ unsigned char **hashes,
+ int (*check)(TDB_DATA, TDB_DATA, void *),
+ void *private_data)
+{
+ TDB_DATA key, data;
+ tdb_len_t len;
+
+ if (!tdb_check_record(tdb, off, rec))
+ return false;
+
+ /* key + data + tailer must fit in record */
+ len = rec->key_len;
+ len += rec->data_len;
+ if (len < rec->data_len) {
+ /* overflow */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record lengths overflow\n"));
+ return false;
+ }
+ len += sizeof(tdb_off_t);
+ if (len < sizeof(tdb_off_t)) {
+ /* overflow */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record lengths overflow\n"));
+ return false;
+ }
+
+ if (len > rec->rec_len) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too short for contents\n", off));
+ return false;
+ }
+
+ key = get_bytes(tdb, off + sizeof(*rec), rec->key_len);
+ if (!key.dptr)
+ return false;
+
+ if (tdb->hash_fn(&key) != rec->full_hash) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u has incorrect hash\n", off));
+ goto fail_put_key;
+ }
+
+ /* Mark this offset as a known value for this hash bucket. */
+ record_offset(hashes[BUCKET(rec->full_hash)+1], off);
+ /* And similarly if the next pointer is valid. */
+ if (rec->next)
+ record_offset(hashes[BUCKET(rec->full_hash)+1], rec->next);
+
+ /* If they supply a check function and this record isn't dead,
+ get data and feed it. */
+ if (check && rec->magic != TDB_DEAD_MAGIC) {
+ data = get_bytes(tdb, off + sizeof(*rec) + rec->key_len,
+ rec->data_len);
+ if (!data.dptr)
+ goto fail_put_key;
+
+ if (check(key, data, private_data) == -1)
+ goto fail_put_data;
+ put_bytes(tdb, data);
+ }
+
+ put_bytes(tdb, key);
+ return true;
+
+fail_put_data:
+ put_bytes(tdb, data);
+fail_put_key:
+ put_bytes(tdb, key);
+ return false;
+}
+
+/* Check that an unused record is valid. */
+static bool tdb_check_free_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec,
+ unsigned char **hashes)
+{
+ if (!tdb_check_record(tdb, off, rec))
+ return false;
+
+ /* Mark this offset as a known value for the free list. */
+ record_offset(hashes[0], off);
+ /* And similarly if the next pointer is valid. */
+ if (rec->next)
+ record_offset(hashes[0], rec->next);
+ return true;
+}
+
+/* Slow, but should be very rare. */
+size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off)
+{
+ size_t len;
+
+ for (len = 0; off + len < tdb->map_size; len++) {
+ char c;
+ if (tdb->methods->tdb_read(tdb, off, &c, 1, 0))
+ return 0;
+ if (c != 0 && c != 0x42)
+ break;
+ }
+ return len;
+}
+
+_PUBLIC_ int tdb_check(struct tdb_context *tdb,
+ int (*check)(TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data)
+{
+ unsigned int h;
+ unsigned char **hashes;
+ tdb_off_t off, recovery_start;
+ struct tdb_record rec;
+ bool found_recovery = false;
+ tdb_len_t dead;
+ bool locked;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return -1;
+ locked = true;
+ }
+
+ /* Make sure we know true size of the underlying file. */
+ tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /* Header must be OK: also gets us the recovery ptr, if any. */
+ if (!tdb_check_header(tdb, &recovery_start))
+ goto unlock;
+
+ /* We should have the whole header, too. */
+ if (tdb->map_size < TDB_DATA_START(tdb->hash_size)) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "File too short for hashes\n"));
+ goto unlock;
+ }
+
+ /* One big malloc: pointers then bit arrays. */
+ hashes = (unsigned char **)calloc(
+ 1, sizeof(hashes[0]) * (1+tdb->hash_size)
+ + BITMAP_BITS / CHAR_BIT * (1+tdb->hash_size));
+ if (!hashes) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto unlock;
+ }
+
+ /* Initialize pointers */
+ hashes[0] = (unsigned char *)(&hashes[1+tdb->hash_size]);
+ for (h = 1; h < 1+tdb->hash_size; h++)
+ hashes[h] = hashes[h-1] + BITMAP_BITS / CHAR_BIT;
+
+ /* Freelist and hash headers are all in a row: read them. */
+ for (h = 0; h < 1+tdb->hash_size; h++) {
+ if (tdb_ofs_read(tdb, FREELIST_TOP + h*sizeof(tdb_off_t),
+ &off) == -1)
+ goto free;
+ if (off)
+ record_offset(hashes[h], off);
+ }
+
+ /* For each record, read it in and check it's ok. */
+ for (off = TDB_DATA_START(tdb->hash_size);
+ off < tdb->map_size;
+ off += sizeof(rec) + rec.rec_len) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ goto free;
+ switch (rec.magic) {
+ case TDB_MAGIC:
+ case TDB_DEAD_MAGIC:
+ if (!tdb_check_used_record(tdb, off, &rec, hashes,
+ check, private_data))
+ goto free;
+ break;
+ case TDB_FREE_MAGIC:
+ if (!tdb_check_free_record(tdb, off, &rec, hashes))
+ goto free;
+ break;
+ /* If we crash after ftruncate, we can get zeroes or fill. */
+ case TDB_RECOVERY_INVALID_MAGIC:
+ case 0x42424242:
+ if (recovery_start == off) {
+ found_recovery = true;
+ break;
+ }
+ dead = tdb_dead_space(tdb, off);
+ if (dead < sizeof(rec))
+ goto corrupt;
+
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Dead space at %u-%u (of %u)\n",
+ off, off + dead, tdb->map_size));
+ rec.rec_len = dead - sizeof(rec);
+ break;
+ case TDB_RECOVERY_MAGIC:
+ if (recovery_start != off) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Unexpected recovery record at offset %u\n",
+ off));
+ goto free;
+ }
+ found_recovery = true;
+ break;
+ default: ;
+ corrupt:
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Bad magic 0x%x at offset %u\n",
+ rec.magic, off));
+ goto free;
+ }
+ }
+
+ /* Now, hashes should all be empty: each record exists and is referred
+ * to by one other. */
+ for (h = 0; h < 1+tdb->hash_size; h++) {
+ unsigned int i;
+ for (i = 0; i < BITMAP_BITS / CHAR_BIT; i++) {
+ if (hashes[h][i] != 0) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Hashes do not match records\n"));
+ goto free;
+ }
+ }
+ }
+
+ /* We must have found recovery area if there was one. */
+ if (recovery_start != 0 && !found_recovery) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Expected a recovery area at %u\n",
+ recovery_start));
+ goto free;
+ }
+
+ free(hashes);
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return 0;
+
+free:
+ free(hashes);
+unlock:
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return -1;
+}
diff --git a/lib/tdb/common/dump.c b/lib/tdb/common/dump.c
new file mode 100644
index 0000000..adcf591
--- /dev/null
+++ b/lib/tdb/common/dump.c
@@ -0,0 +1,149 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+static tdb_off_t tdb_dump_record(struct tdb_context *tdb, int hash,
+ tdb_off_t offset)
+{
+ struct tdb_record rec;
+ tdb_off_t tailer_ofs, tailer;
+
+ if (tdb->methods->tdb_read(tdb, offset, (char *)&rec,
+ sizeof(rec), DOCONV()) == -1) {
+ printf("ERROR: failed to read record at %u\n", offset);
+ return 0;
+ }
+
+ printf(" rec: hash=%d offset=0x%08x next=0x%08x rec_len=%u "
+ "key_len=%u data_len=%u full_hash=0x%08x magic=0x%08x\n",
+ hash, offset, rec.next, rec.rec_len, rec.key_len, rec.data_len,
+ rec.full_hash, rec.magic);
+
+ tailer_ofs = offset + sizeof(rec) + rec.rec_len - sizeof(tdb_off_t);
+
+ if (tdb_ofs_read(tdb, tailer_ofs, &tailer) == -1) {
+ printf("ERROR: failed to read tailer at %u\n", tailer_ofs);
+ return rec.next;
+ }
+
+ if (tailer != rec.rec_len + sizeof(rec)) {
+ printf("ERROR: tailer does not match record! tailer=%u totalsize=%u\n",
+ (unsigned int)tailer, (unsigned int)(rec.rec_len + sizeof(rec)));
+ }
+ return rec.next;
+}
+
+static int tdb_dump_chain(struct tdb_context *tdb, int i)
+{
+ struct tdb_chainwalk_ctx chainwalk;
+ tdb_off_t rec_ptr, top;
+
+ if (i == -1) {
+ top = FREELIST_TOP;
+ } else {
+ top = TDB_HASH_TOP(i);
+ }
+
+ if (tdb_lock(tdb, i, F_WRLCK) != 0)
+ return -1;
+
+ if (tdb_ofs_read(tdb, top, &rec_ptr) == -1)
+ return tdb_unlock(tdb, i, F_WRLCK);
+
+ tdb_chainwalk_init(&chainwalk, rec_ptr);
+
+ if (rec_ptr)
+ printf("hash=%d\n", i);
+
+ while (rec_ptr) {
+ bool ok;
+ rec_ptr = tdb_dump_record(tdb, i, rec_ptr);
+ ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr);
+ if (!ok) {
+ printf("circular hash chain %d\n", i);
+ break;
+ }
+ }
+
+ return tdb_unlock(tdb, i, F_WRLCK);
+}
+
+_PUBLIC_ void tdb_dump_all(struct tdb_context *tdb)
+{
+ uint32_t i;
+ for (i=0;i<tdb->hash_size;i++) {
+ tdb_dump_chain(tdb, i);
+ }
+ printf("freelist:\n");
+ tdb_dump_chain(tdb, -1);
+}
+
+_PUBLIC_ int tdb_printfreelist(struct tdb_context *tdb)
+{
+ int ret;
+ long total_free = 0;
+ tdb_off_t offset, rec_ptr;
+ struct tdb_record rec;
+
+ if ((ret = tdb_lock(tdb, -1, F_WRLCK)) != 0)
+ return ret;
+
+ offset = FREELIST_TOP;
+
+ /* read in the freelist top */
+ if (tdb_ofs_read(tdb, offset, &rec_ptr) == -1) {
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return 0;
+ }
+
+ printf("freelist top=[0x%08x]\n", rec_ptr );
+ while (rec_ptr) {
+ if (tdb->methods->tdb_read(tdb, rec_ptr, (char *)&rec,
+ sizeof(rec), DOCONV()) == -1) {
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+ }
+
+ if (rec.magic != TDB_FREE_MAGIC) {
+ printf("bad magic 0x%08x in free list\n", rec.magic);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+ }
+
+ printf("entry offset=[0x%08x], rec.rec_len = [0x%08x (%u)] (end = 0x%08x)\n",
+ rec_ptr, rec.rec_len, rec.rec_len, rec_ptr + rec.rec_len);
+ total_free += rec.rec_len;
+
+ /* move to the next record */
+ rec_ptr = rec.next;
+ }
+ printf("total rec_len = [0x%08lx (%lu)]\n", total_free, total_free);
+
+ return tdb_unlock(tdb, -1, F_WRLCK);
+}
+
diff --git a/lib/tdb/common/error.c b/lib/tdb/common/error.c
new file mode 100644
index 0000000..c3ef8bd
--- /dev/null
+++ b/lib/tdb/common/error.c
@@ -0,0 +1,74 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+_PUBLIC_ enum TDB_ERROR tdb_error(struct tdb_context *tdb)
+{
+ return tdb->ecode;
+}
+
+_PUBLIC_ const char *tdb_errorstr(struct tdb_context *tdb)
+{
+ switch (tdb->ecode) {
+ case TDB_SUCCESS:
+ return "Success";
+ break;
+ case TDB_ERR_CORRUPT:
+ return "Corrupt database";
+ break;
+ case TDB_ERR_IO:
+ return "IO Error";
+ break;
+ case TDB_ERR_LOCK:
+ return "Locking error";
+ break;
+ case TDB_ERR_OOM:
+ return "Out of memory";
+ break;
+ case TDB_ERR_EXISTS:
+ return "Record exists";
+ break;
+ case TDB_ERR_NOLOCK:
+ return "Lock exists on other keys";
+ break;
+ case TDB_ERR_EINVAL:
+ return "Invalid parameter";
+ break;
+ case TDB_ERR_NOEXIST:
+ return "Record does not exist";
+ break;
+ case TDB_ERR_RDONLY:
+ return "write not permitted";
+ break;
+ default:
+ break;
+ }
+
+ return "Invalid error code";
+}
+
diff --git a/lib/tdb/common/freelist.c b/lib/tdb/common/freelist.c
new file mode 100644
index 0000000..046c747
--- /dev/null
+++ b/lib/tdb/common/freelist.c
@@ -0,0 +1,747 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/* read a freelist record and check for simple errors */
+int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct tdb_record *rec)
+{
+ if (tdb->methods->tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
+
+ if (rec->magic == TDB_MAGIC) {
+ /* this happens when a app is showdown while deleting a record - we should
+ not completely fail when this happens */
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read non-free magic 0x%x at offset=%u - fixing\n",
+ rec->magic, off));
+ rec->magic = TDB_FREE_MAGIC;
+ if (tdb_rec_write(tdb, off, rec) == -1)
+ return -1;
+ }
+
+ if (rec->magic != TDB_FREE_MAGIC) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read bad magic 0x%x at offset=%u\n",
+ rec->magic, off));
+ return -1;
+ }
+ if (tdb_oob(tdb, rec->next, sizeof(*rec), 0) != 0)
+ return -1;
+ return 0;
+}
+
+/* update a record tailer (must hold allocation lock) */
+static int update_tailer(struct tdb_context *tdb, tdb_off_t offset,
+ const struct tdb_record *rec)
+{
+ tdb_off_t totalsize;
+
+ /* Offset of tailer from record header */
+ totalsize = sizeof(*rec) + rec->rec_len;
+ return tdb_ofs_write(tdb, offset + totalsize - sizeof(tdb_off_t),
+ &totalsize);
+}
+
+/**
+ * Read the record directly on the left.
+ * Fail if there is no record on the left.
+ */
+static int read_record_on_left(struct tdb_context *tdb, tdb_off_t rec_ptr,
+ tdb_off_t *left_p,
+ struct tdb_record *left_r)
+{
+ tdb_off_t left_ptr;
+ tdb_off_t left_size;
+ struct tdb_record left_rec;
+ int ret;
+
+ left_ptr = rec_ptr - sizeof(tdb_off_t);
+
+ if (left_ptr <= TDB_DATA_START(tdb->hash_size)) {
+ /* no record on the left */
+ return -1;
+ }
+
+ /* Read in tailer and jump back to header */
+ ret = tdb_ofs_read(tdb, left_ptr, &left_size);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_free: left offset read failed at %u\n", left_ptr));
+ return -1;
+ }
+
+ /* it could be uninitialised data */
+ if (left_size == 0 || left_size == TDB_PAD_U32) {
+ return -1;
+ }
+
+ if (left_size > rec_ptr) {
+ return -1;
+ }
+
+ left_ptr = rec_ptr - left_size;
+
+ if (left_ptr < TDB_DATA_START(tdb->hash_size)) {
+ return -1;
+ }
+
+ /* Now read in the left record */
+ ret = tdb->methods->tdb_read(tdb, left_ptr, &left_rec,
+ sizeof(left_rec), DOCONV());
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_free: left read failed at %u (%u)\n",
+ left_ptr, left_size));
+ return -1;
+ }
+
+ *left_p = left_ptr;
+ *left_r = left_rec;
+
+ return 0;
+}
+
+/**
+ * Merge new freelist record with the direct left neighbour.
+ * This assumes that left_rec represents the record
+ * directly to the left of right_rec and that this is
+ * a freelist record.
+ */
+static int merge_with_left_record(struct tdb_context *tdb,
+ tdb_off_t left_ptr,
+ struct tdb_record *left_rec,
+ struct tdb_record *right_rec)
+{
+ int ret;
+
+ left_rec->rec_len += sizeof(*right_rec) + right_rec->rec_len;
+
+ ret = tdb_rec_write(tdb, left_ptr, left_rec);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "merge_with_left_record: update_left failed at %u\n",
+ left_ptr));
+ return -1;
+ }
+
+ ret = update_tailer(tdb, left_ptr, left_rec);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "merge_with_left_record: update_tailer failed at %u\n",
+ left_ptr));
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Check whether the record left of a given freelist record is
+ * also a freelist record, and if so, merge the two records.
+ *
+ * Return code:
+ * -1 upon error
+ * 0 if left was not a free record
+ * 1 if left was free and successfully merged.
+ *
+ * The current record is handed in with pointer and fully read record.
+ *
+ * The left record pointer and struct can be retrieved as result
+ * in lp and lr;
+ */
+static int check_merge_with_left_record(struct tdb_context *tdb,
+ tdb_off_t rec_ptr,
+ struct tdb_record *rec,
+ tdb_off_t *lp,
+ struct tdb_record *lr)
+{
+ tdb_off_t left_ptr;
+ struct tdb_record left_rec;
+ int ret;
+
+ ret = read_record_on_left(tdb, rec_ptr, &left_ptr, &left_rec);
+ if (ret != 0) {
+ return 0;
+ }
+
+ if (left_rec.magic != TDB_FREE_MAGIC) {
+ return 0;
+ }
+
+ /* It's free - expand to include it. */
+ ret = merge_with_left_record(tdb, left_ptr, &left_rec, rec);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (lp != NULL) {
+ *lp = left_ptr;
+ }
+
+ if (lr != NULL) {
+ *lr = left_rec;
+ }
+
+ return 1;
+}
+
+/**
+ * Check whether the record left of a given freelist record is
+ * also a freelist record, and if so, merge the two records.
+ *
+ * Return code:
+ * -1 upon error
+ * 0 if left was not a free record
+ * 1 if left was free and successfully merged.
+ *
+ * In this variant, the input record is specified just as the pointer
+ * and is read from the database if needed.
+ *
+ * next_ptr will contain the original record's next pointer after
+ * successful merging (which will be lost after merging), so that
+ * the caller can update the last pointer.
+ */
+static int check_merge_ptr_with_left_record(struct tdb_context *tdb,
+ tdb_off_t rec_ptr,
+ tdb_off_t *next_ptr)
+{
+ tdb_off_t left_ptr;
+ struct tdb_record rec, left_rec;
+ int ret;
+
+ ret = read_record_on_left(tdb, rec_ptr, &left_ptr, &left_rec);
+ if (ret != 0) {
+ return 0;
+ }
+
+ if (left_rec.magic != TDB_FREE_MAGIC) {
+ return 0;
+ }
+
+ /* It's free - expand to include it. */
+
+ ret = tdb->methods->tdb_read(tdb, rec_ptr, &rec,
+ sizeof(rec), DOCONV());
+ if (ret != 0) {
+ return -1;
+ }
+
+ ret = merge_with_left_record(tdb, left_ptr, &left_rec, &rec);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (next_ptr != NULL) {
+ *next_ptr = rec.next;
+ }
+
+ return 1;
+}
+
+/**
+ * Add an element into the freelist.
+ *
+ * We merge the new record into the left record if it is also a
+ * free record, but not with the right one. This makes the
+ * operation O(1) instead of O(n): merging with the right record
+ * requires a traverse of the freelist to find the previous
+ * record in the free list.
+ *
+ * This prevents db traverses from being O(n^2) after a lot of deletes.
+ */
+int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
+{
+ int ret;
+
+ /* Allocation and tailer lock */
+ if (tdb_lock(tdb, -1, F_WRLCK) != 0)
+ return -1;
+
+ /* set an initial tailer, so if we fail we don't leave a bogus record */
+ if (update_tailer(tdb, offset, rec) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed!\n"));
+ goto fail;
+ }
+
+ ret = check_merge_with_left_record(tdb, offset, rec, NULL, NULL);
+ if (ret == -1) {
+ goto fail;
+ }
+ if (ret == 1) {
+ /* merged */
+ goto done;
+ }
+
+ /* Nothing to merge, prepend to free list */
+
+ rec->magic = TDB_FREE_MAGIC;
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec->next) == -1 ||
+ tdb_rec_write(tdb, offset, rec) == -1 ||
+ tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free record write failed at offset=%u\n", offset));
+ goto fail;
+ }
+
+done:
+ /* And we're done. */
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return 0;
+
+ fail:
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+}
+
+
+
+/*
+ the core of tdb_allocate - called when we have decided which
+ free list entry to use
+
+ Note that we try to allocate by grabbing data from the end of an existing record,
+ not the beginning. This is so the left merge in a free is more likely to be
+ able to free up the record without fragmentation
+ */
+static tdb_off_t tdb_allocate_ofs(struct tdb_context *tdb,
+ tdb_len_t length, tdb_off_t rec_ptr,
+ struct tdb_record *rec, tdb_off_t last_ptr)
+{
+#define MIN_REC_SIZE (sizeof(struct tdb_record) + sizeof(tdb_off_t) + 8)
+
+ if (rec->rec_len < length + MIN_REC_SIZE) {
+ /* we have to grab the whole record */
+
+ /* unlink it from the previous record */
+ if (tdb_ofs_write(tdb, last_ptr, &rec->next) == -1) {
+ return 0;
+ }
+
+ /* mark it not free */
+ rec->magic = TDB_MAGIC;
+ if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+ return rec_ptr;
+ }
+
+ /* we're going to just shorten the existing record */
+ rec->rec_len -= (length + sizeof(*rec));
+ if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+ if (update_tailer(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ /* and setup the new record */
+ rec_ptr += sizeof(*rec) + rec->rec_len;
+
+ memset(rec, '\0', sizeof(*rec));
+ rec->rec_len = length;
+ rec->magic = TDB_MAGIC;
+
+ if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ if (update_tailer(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ return rec_ptr;
+}
+
+/* allocate some space from the free list. The offset returned points
+ to a unconnected tdb_record within the database with room for at
+ least length bytes of total data
+
+ 0 is returned if the space could not be allocated
+ */
+static tdb_off_t tdb_allocate_from_freelist(
+ struct tdb_context *tdb, tdb_len_t length, struct tdb_record *rec)
+{
+ tdb_off_t rec_ptr, last_ptr, newrec_ptr;
+ struct tdb_chainwalk_ctx chainwalk;
+ bool modified;
+ struct {
+ tdb_off_t rec_ptr, last_ptr;
+ tdb_len_t rec_len;
+ } bestfit;
+ float multiplier = 1.0;
+ bool merge_created_candidate;
+
+ /* over-allocate to reduce fragmentation */
+ length *= 1.25;
+
+ /* Extra bytes required for tailer */
+ length += sizeof(tdb_off_t);
+ length = TDB_ALIGN(length, TDB_ALIGNMENT);
+
+ again:
+ merge_created_candidate = false;
+ last_ptr = FREELIST_TOP;
+
+ /* read in the freelist top */
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1)
+ return 0;
+
+ modified = false;
+ tdb_chainwalk_init(&chainwalk, rec_ptr);
+
+ bestfit.rec_ptr = 0;
+ bestfit.last_ptr = 0;
+ bestfit.rec_len = 0;
+
+ /*
+ this is a best fit allocation strategy. Originally we used
+ a first fit strategy, but it suffered from massive fragmentation
+ issues when faced with a slowly increasing record size.
+ */
+ while (rec_ptr) {
+ int ret;
+ tdb_off_t left_ptr;
+ struct tdb_record left_rec;
+
+ if (tdb_rec_free_read(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ ret = check_merge_with_left_record(tdb, rec_ptr, rec,
+ &left_ptr, &left_rec);
+ if (ret == -1) {
+ return 0;
+ }
+ if (ret == 1) {
+ /* merged */
+ rec_ptr = rec->next;
+ ret = tdb_ofs_write(tdb, last_ptr, &rec->next);
+ if (ret == -1) {
+ return 0;
+ }
+
+ /*
+ * We have merged the current record into the left
+ * neighbour. So our traverse of the freelist will
+ * skip it and consider the next record in the chain.
+ *
+ * But the enlarged left neighbour may be a candidate.
+ * If it is, we can not directly use it, though.
+ * The only thing we can do and have to do here is to
+ * update the current best fit size in the chain if the
+ * current best fit is the left record. (By that we may
+ * worsen the best fit we already had, bit this is not a
+ * problem.)
+ *
+ * If the current best fit is not the left record,
+ * all we can do is remember the fact that a merge
+ * created a new candidate so that we can trigger
+ * a second walk of the freelist if at the end of
+ * the first walk we have not found any fit.
+ * This way we can avoid expanding the database.
+ */
+
+ if (bestfit.rec_ptr == left_ptr) {
+ bestfit.rec_len = left_rec.rec_len;
+ }
+
+ if (left_rec.rec_len > length) {
+ merge_created_candidate = true;
+ }
+
+ modified = true;
+
+ continue;
+ }
+
+ if (rec->rec_len >= length) {
+ if (bestfit.rec_ptr == 0 ||
+ rec->rec_len < bestfit.rec_len) {
+ bestfit.rec_len = rec->rec_len;
+ bestfit.rec_ptr = rec_ptr;
+ bestfit.last_ptr = last_ptr;
+ }
+ }
+
+ /* move to the next record */
+ last_ptr = rec_ptr;
+ rec_ptr = rec->next;
+
+ if (!modified) {
+ bool ok;
+ ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr);
+ if (!ok) {
+ return 0;
+ }
+ }
+
+ /* if we've found a record that is big enough, then
+ stop searching if its also not too big. The
+ definition of 'too big' changes as we scan
+ through */
+ if (bestfit.rec_len > 0 &&
+ bestfit.rec_len < length * multiplier) {
+ break;
+ }
+
+ /* this multiplier means we only extremely rarely
+ search more than 50 or so records. At 50 records we
+ accept records up to 11 times larger than what we
+ want */
+ multiplier *= 1.05;
+ }
+
+ if (bestfit.rec_ptr != 0) {
+ if (tdb_rec_free_read(tdb, bestfit.rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ newrec_ptr = tdb_allocate_ofs(tdb, length, bestfit.rec_ptr,
+ rec, bestfit.last_ptr);
+ return newrec_ptr;
+ }
+
+ if (merge_created_candidate) {
+ goto again;
+ }
+
+ /* we didn't find enough space. See if we can expand the
+ database and if we can then try again */
+ if (tdb_expand(tdb, length + sizeof(*rec)) == 0)
+ goto again;
+
+ return 0;
+}
+
+static bool tdb_alloc_dead(
+ struct tdb_context *tdb, int hash, tdb_len_t length,
+ tdb_off_t *rec_ptr, struct tdb_record *rec)
+{
+ tdb_off_t last_ptr;
+
+ *rec_ptr = tdb_find_dead(tdb, hash, rec, length, &last_ptr);
+ if (*rec_ptr == 0) {
+ return false;
+ }
+ /*
+ * Unlink the record from the hash chain, it's about to be moved into
+ * another one.
+ */
+ return (tdb_ofs_write(tdb, last_ptr, &rec->next) == 0);
+}
+
+static void tdb_purge_dead(struct tdb_context *tdb, uint32_t hash)
+{
+ int max_dead_records = tdb->max_dead_records;
+
+ tdb->max_dead_records = 0;
+
+ tdb_trim_dead(tdb, hash);
+
+ tdb->max_dead_records = max_dead_records;
+}
+
+/*
+ * Chain "hash" is assumed to be locked
+ */
+
+tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length,
+ struct tdb_record *rec)
+{
+ tdb_off_t ret;
+ uint32_t i;
+
+ if (tdb->max_dead_records == 0) {
+ /*
+ * No dead records to expect anywhere. Do the blocking
+ * freelist lock without trying to steal from others
+ */
+ goto blocking_freelist_allocate;
+ }
+
+ /*
+ * The following loop tries to get the freelist lock nonblocking. If
+ * it gets the lock, allocate from there. If the freelist is busy,
+ * instead of waiting we try to steal dead records from other hash
+ * chains.
+ *
+ * Be aware that we do nonblocking locks on the other hash chains as
+ * well and fail gracefully. This way we avoid deadlocks (we block two
+ * hash chains, something which is pretty bad normally)
+ */
+
+ for (i=0; i<tdb->hash_size; i++) {
+
+ int list;
+
+ list = BUCKET(hash+i);
+
+ if (tdb_lock_nonblock(tdb, list, F_WRLCK) == 0) {
+ bool got_dead;
+
+ got_dead = tdb_alloc_dead(tdb, list, length, &ret, rec);
+ tdb_unlock(tdb, list, F_WRLCK);
+
+ if (got_dead) {
+ return ret;
+ }
+ }
+
+ if (tdb_lock_nonblock(tdb, -1, F_WRLCK) == 0) {
+ /*
+ * Under the freelist lock take the chance to give
+ * back our dead records.
+ */
+ tdb_purge_dead(tdb, hash);
+
+ ret = tdb_allocate_from_freelist(tdb, length, rec);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return ret;
+ }
+ }
+
+blocking_freelist_allocate:
+
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
+ return 0;
+ }
+ /*
+ * Dead records can happen even if max_dead_records==0, they
+ * are older than the max_dead_records concept: They happen if
+ * tdb_delete happens concurrently with a traverse.
+ */
+ tdb_purge_dead(tdb, hash);
+ ret = tdb_allocate_from_freelist(tdb, length, rec);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return ret;
+}
+
+/**
+ * Merge adjacent records in the freelist.
+ */
+static int tdb_freelist_merge_adjacent(struct tdb_context *tdb,
+ int *count_records, int *count_merged)
+{
+ tdb_off_t cur, next;
+ int count = 0;
+ int merged = 0;
+ int ret;
+
+ ret = tdb_lock(tdb, -1, F_RDLCK);
+ if (ret == -1) {
+ return -1;
+ }
+
+ cur = FREELIST_TOP;
+ while (tdb_ofs_read(tdb, cur, &next) == 0 && next != 0) {
+ tdb_off_t next2;
+
+ count++;
+
+ ret = check_merge_ptr_with_left_record(tdb, next, &next2);
+ if (ret == -1) {
+ goto done;
+ }
+ if (ret == 1) {
+ /*
+ * merged:
+ * now let cur->next point to next2 instead of next
+ */
+
+ ret = tdb_ofs_write(tdb, cur, &next2);
+ if (ret != 0) {
+ goto done;
+ }
+
+ next = next2;
+ merged++;
+ }
+
+ cur = next;
+ }
+
+ if (count_records != NULL) {
+ *count_records = count;
+ }
+
+ if (count_merged != NULL) {
+ *count_merged = merged;
+ }
+
+ ret = 0;
+
+done:
+ tdb_unlock(tdb, -1, F_RDLCK);
+ return ret;
+}
+
+/**
+ * return the size of the freelist - no merging done
+ */
+static int tdb_freelist_size_no_merge(struct tdb_context *tdb)
+{
+ tdb_off_t ptr;
+ int count=0;
+
+ if (tdb_lock(tdb, -1, F_RDLCK) == -1) {
+ return -1;
+ }
+
+ ptr = FREELIST_TOP;
+ while (tdb_ofs_read(tdb, ptr, &ptr) == 0 && ptr != 0) {
+ count++;
+ }
+
+ tdb_unlock(tdb, -1, F_RDLCK);
+ return count;
+}
+
+/**
+ * return the size of the freelist - used to decide if we should repack
+ *
+ * As a side effect, adjacent records are merged unless the
+ * database is read-only, in order to reduce the fragmentation
+ * without repacking.
+ */
+_PUBLIC_ int tdb_freelist_size(struct tdb_context *tdb)
+{
+
+ int count = 0;
+
+ if (tdb->read_only) {
+ count = tdb_freelist_size_no_merge(tdb);
+ } else {
+ int ret;
+ ret = tdb_freelist_merge_adjacent(tdb, &count, NULL);
+ if (ret != 0) {
+ return -1;
+ }
+ }
+
+ return count;
+}
diff --git a/lib/tdb/common/freelistcheck.c b/lib/tdb/common/freelistcheck.c
new file mode 100644
index 0000000..2f1e6eb
--- /dev/null
+++ b/lib/tdb/common/freelistcheck.c
@@ -0,0 +1,107 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Jeremy Allison 2006
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/* Check the freelist is good and contains no loops.
+ Very memory intensive - only do this as a consistency
+ checker. Heh heh - uses an in memory tdb as the storage
+ for the "seen" record list. For some reason this strikes
+ me as extremely clever as I don't have to write another tree
+ data structure implementation :-).
+ */
+
+static int seen_insert(struct tdb_context *mem_tdb, tdb_off_t rec_ptr)
+{
+ TDB_DATA key;
+
+ key.dptr = (unsigned char *)&rec_ptr;
+ key.dsize = sizeof(rec_ptr);
+ return tdb_store(mem_tdb, key, tdb_null, TDB_INSERT);
+}
+
+_PUBLIC_ int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries)
+{
+ struct tdb_context *mem_tdb = NULL;
+ struct tdb_record rec;
+ tdb_off_t rec_ptr, last_ptr;
+ int ret = -1;
+
+ *pnum_entries = 0;
+
+ mem_tdb = tdb_open("flval", tdb->hash_size,
+ TDB_INTERNAL, O_RDWR, 0600);
+ if (!mem_tdb) {
+ return -1;
+ }
+
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
+ tdb_close(mem_tdb);
+ return 0;
+ }
+
+ last_ptr = FREELIST_TOP;
+
+ /* Store the FREELIST_TOP record. */
+ if (seen_insert(mem_tdb, last_ptr) == -1) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ ret = -1;
+ goto fail;
+ }
+
+ /* read in the freelist top */
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1) {
+ goto fail;
+ }
+
+ while (rec_ptr) {
+
+ /* If we can't store this record (we've seen it
+ before) then the free list has a loop and must
+ be corrupt. */
+
+ if (seen_insert(mem_tdb, rec_ptr)) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ ret = -1;
+ goto fail;
+ }
+
+ if (tdb_rec_free_read(tdb, rec_ptr, &rec) == -1) {
+ goto fail;
+ }
+
+ /* move to the next record */
+ rec_ptr = rec.next;
+ *pnum_entries += 1;
+ }
+
+ ret = 0;
+
+ fail:
+
+ tdb_close(mem_tdb);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return ret;
+}
diff --git a/lib/tdb/common/hash.c b/lib/tdb/common/hash.c
new file mode 100644
index 0000000..ca4cac3
--- /dev/null
+++ b/lib/tdb/common/hash.c
@@ -0,0 +1,345 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Rusty Russell 2010
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+/* This is based on the hash algorithm from gdbm */
+unsigned int tdb_old_hash(TDB_DATA *key)
+{
+ uint32_t value; /* Used to compute the hash value. */
+ uint32_t i; /* Used to cycle through random values. */
+
+ /* Set the initial value from the key size. */
+ for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
+ value = (value + (key->dptr[i] << (i*5 % 24)));
+
+ return (1103515243 * value + 12345);
+}
+
+#ifndef WORDS_BIGENDIAN
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#else
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#endif
+
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hash_word(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+*/
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ val2 : IN: can be any 4-byte value OUT: second 32 bit hash.
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values. Note that the return value is better
+mixed than val2, so use that first.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+static uint32_t hashlittle( const void *key, size_t length )
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length);
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+ const uint8_t *k8;
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; FALL_THROUGH;
+ case 10: c+=((uint32_t)k8[9])<<8; FALL_THROUGH;
+ case 9 : c+=k8[8]; FALL_THROUGH;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; FALL_THROUGH;
+ case 6 : b+=((uint32_t)k8[5])<<8; FALL_THROUGH;
+ case 5 : b+=k8[4]; FALL_THROUGH;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; FALL_THROUGH;
+ case 2 : a+=((uint32_t)k8[1])<<8; FALL_THROUGH;
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; FALL_THROUGH;
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; FALL_THROUGH;
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; FALL_THROUGH;
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; FALL_THROUGH;
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; FALL_THROUGH;
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length)
+ {
+ case 12: c+=((uint32_t)k[11])<<24; FALL_THROUGH;
+ case 11: c+=((uint32_t)k[10])<<16; FALL_THROUGH;
+ case 10: c+=((uint32_t)k[9])<<8; FALL_THROUGH;
+ case 9 : c+=k[8]; FALL_THROUGH;
+ case 8 : b+=((uint32_t)k[7])<<24; FALL_THROUGH;
+ case 7 : b+=((uint32_t)k[6])<<16; FALL_THROUGH;
+ case 6 : b+=((uint32_t)k[5])<<8; FALL_THROUGH;
+ case 5 : b+=k[4]; FALL_THROUGH;
+ case 4 : a+=((uint32_t)k[3])<<24; FALL_THROUGH;
+ case 3 : a+=((uint32_t)k[2])<<16; FALL_THROUGH;
+ case 2 : a+=((uint32_t)k[1])<<8; FALL_THROUGH;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+_PUBLIC_ unsigned int tdb_jenkins_hash(TDB_DATA *key)
+{
+ return hashlittle(key->dptr, key->dsize);
+}
diff --git a/lib/tdb/common/io.c b/lib/tdb/common/io.c
new file mode 100644
index 0000000..0de0dab
--- /dev/null
+++ b/lib/tdb/common/io.c
@@ -0,0 +1,806 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "tdb_private.h"
+
+/*
+ * We prepend the mutex area, so fixup offsets. See mutex.c for details.
+ * tdb->hdr_ofs is 0 or header.mutex_size.
+ *
+ * Note: that we only have the 4GB limit of tdb_off_t for
+ * tdb->map_size. The file size on disk can be 4GB + tdb->hdr_ofs!
+ */
+
+static bool tdb_adjust_offset(struct tdb_context *tdb, off_t *off)
+{
+ off_t tmp = tdb->hdr_ofs + *off;
+
+ if ((tmp < tdb->hdr_ofs) || (tmp < *off)) {
+ errno = EIO;
+ return false;
+ }
+
+ *off = tmp;
+ return true;
+}
+
+static ssize_t tdb_pwrite(struct tdb_context *tdb, const void *buf,
+ size_t count, off_t offset)
+{
+ ssize_t ret;
+
+ if (!tdb_adjust_offset(tdb, &offset)) {
+ return -1;
+ }
+
+ do {
+ ret = pwrite(tdb->fd, buf, count, offset);
+ } while ((ret == -1) && (errno == EINTR));
+
+ return ret;
+}
+
+static ssize_t tdb_pread(struct tdb_context *tdb, void *buf,
+ size_t count, off_t offset)
+{
+ ssize_t ret;
+
+ if (!tdb_adjust_offset(tdb, &offset)) {
+ return -1;
+ }
+
+ do {
+ ret = pread(tdb->fd, buf, count, offset);
+ } while ((ret == -1) && (errno == EINTR));
+
+ return ret;
+}
+
+static int tdb_ftruncate(struct tdb_context *tdb, off_t length)
+{
+ ssize_t ret;
+
+ if (!tdb_adjust_offset(tdb, &length)) {
+ return -1;
+ }
+
+ do {
+ ret = ftruncate(tdb->fd, length);
+ } while ((ret == -1) && (errno == EINTR));
+
+ return ret;
+}
+
+#ifdef HAVE_POSIX_FALLOCATE
+static int tdb_posix_fallocate(struct tdb_context *tdb, off_t offset,
+ off_t len)
+{
+ ssize_t ret;
+
+ if (!tdb_adjust_offset(tdb, &offset)) {
+ return -1;
+ }
+
+ do {
+ ret = posix_fallocate(tdb->fd, offset, len);
+ } while ((ret == -1) && (errno == EINTR));
+
+ return ret;
+}
+#endif
+
+static int tdb_fstat(struct tdb_context *tdb, struct stat *buf)
+{
+ int ret;
+
+ ret = fstat(tdb->fd, buf);
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (buf->st_size < tdb->hdr_ofs) {
+ errno = EIO;
+ return -1;
+ }
+ buf->st_size -= tdb->hdr_ofs;
+
+ return ret;
+}
+
+/* check for an out of bounds access - if it is out of bounds then
+ see if the database has been expanded by someone else and expand
+ if necessary
+*/
+static int tdb_notrans_oob(
+ struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe)
+{
+ struct stat st;
+ if (len + off < len) {
+ if (!probe) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob off %u len %u wrap\n",
+ off, len));
+ }
+ return -1;
+ }
+
+ /*
+ * This duplicates functionality from tdb_oob(). Don't remove:
+ * we still have direct callers of tdb->methods->tdb_oob()
+ * inside transaction.c.
+ */
+ if (off + len <= tdb->map_size)
+ return 0;
+ if (tdb->flags & TDB_INTERNAL) {
+ if (!probe) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob len %u beyond internal malloc size %u\n",
+ (int)(off + len), (int)tdb->map_size));
+ }
+ return -1;
+ }
+
+ if (tdb_fstat(tdb, &st) == -1) {
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* Beware >4G files! */
+ if ((tdb_off_t)st.st_size != st.st_size) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_oob len %llu too large!\n",
+ (long long)st.st_size));
+ return -1;
+ }
+
+ /* Unmap, update size, remap. We do this unconditionally, to handle
+ * the unusual case where the db is truncated.
+ *
+ * This can happen to a child using tdb_reopen_all(true) on a
+ * TDB_CLEAR_IF_FIRST tdb whose parent crashes: the next
+ * opener will truncate the database. */
+ if (tdb_munmap(tdb) == -1) {
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ tdb->map_size = st.st_size;
+ if (tdb_mmap(tdb) != 0) {
+ return -1;
+ }
+
+ if (st.st_size < (size_t)off + len) {
+ if (!probe) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob len %u beyond eof at %u\n",
+ (int)(off + len), (int)st.st_size));
+ }
+ return -1;
+ }
+ return 0;
+}
+
+/* write a lump of data at a specified offset */
+static int tdb_write(struct tdb_context *tdb, tdb_off_t off,
+ const void *buf, tdb_len_t len)
+{
+ if (len == 0) {
+ return 0;
+ }
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ if (tdb_oob(tdb, off, len, 0) != 0)
+ return -1;
+
+ if (tdb->map_ptr) {
+ memcpy(off + (char *)tdb->map_ptr, buf, len);
+ } else {
+#ifdef HAVE_INCOHERENT_MMAP
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+#else
+ ssize_t written;
+
+ written = tdb_pwrite(tdb, buf, len, off);
+
+ if ((written != (ssize_t)len) && (written != -1)) {
+ /* try once more */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_write: wrote only "
+ "%zi of %u bytes at %u, trying once more\n",
+ written, len, off));
+ written = tdb_pwrite(tdb, (const char *)buf+written,
+ len-written, off+written);
+ }
+ if (written == -1) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_write failed at %u "
+ "len=%u (%s)\n", off, len, strerror(errno)));
+ return -1;
+ } else if (written != (ssize_t)len) {
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_write: failed to "
+ "write %u bytes at %u in two attempts\n",
+ len, off));
+ return -1;
+ }
+#endif
+ }
+ return 0;
+}
+
+/* Endian conversion: we only ever deal with 4 byte quantities */
+void *tdb_convert(void *buf, uint32_t size)
+{
+ uint32_t i, *p = (uint32_t *)buf;
+ for (i = 0; i < size / 4; i++)
+ p[i] = TDB_BYTEREV(p[i]);
+ return buf;
+}
+
+
+/* read a lump of data at a specified offset, maybe convert */
+static int tdb_read(struct tdb_context *tdb, tdb_off_t off, void *buf,
+ tdb_len_t len, int cv)
+{
+ if (tdb_oob(tdb, off, len, 0) != 0) {
+ return -1;
+ }
+
+ if (tdb->map_ptr) {
+ memcpy(buf, off + (char *)tdb->map_ptr, len);
+ } else {
+#ifdef HAVE_INCOHERENT_MMAP
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+#else
+ ssize_t ret;
+
+ ret = tdb_pread(tdb, buf, len, off);
+ if (ret != (ssize_t)len) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_read failed at %u "
+ "len=%u ret=%zi (%s) map_size=%u\n",
+ off, len, ret, strerror(errno),
+ tdb->map_size));
+ return -1;
+ }
+#endif
+ }
+ if (cv) {
+ tdb_convert(buf, len);
+ }
+ return 0;
+}
+
+
+
+/*
+ do an unlocked scan of the hash table heads to find the next non-zero head. The value
+ will then be confirmed with the lock held
+*/
+static void tdb_next_hash_chain(struct tdb_context *tdb, uint32_t *chain)
+{
+ uint32_t h = *chain;
+ if (tdb->map_ptr) {
+ for (;h < tdb->hash_size;h++) {
+ if (0 != *(uint32_t *)(TDB_HASH_TOP(h) + (unsigned char *)tdb->map_ptr)) {
+ break;
+ }
+ }
+ } else {
+ uint32_t off=0;
+ for (;h < tdb->hash_size;h++) {
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(h), &off) != 0 || off != 0) {
+ break;
+ }
+ }
+ }
+ (*chain) = h;
+}
+
+
+int tdb_munmap(struct tdb_context *tdb)
+{
+ if (tdb->flags & TDB_INTERNAL)
+ return 0;
+
+#ifdef HAVE_MMAP
+ if (tdb->map_ptr) {
+ int ret;
+
+ ret = munmap(tdb->map_ptr, tdb->map_size);
+ if (ret != 0)
+ return ret;
+ }
+#endif
+ tdb->map_ptr = NULL;
+ return 0;
+}
+
+/* If mmap isn't coherent, *everyone* must always mmap. */
+static bool should_mmap(const struct tdb_context *tdb)
+{
+#ifdef HAVE_INCOHERENT_MMAP
+ return true;
+#else
+ return !(tdb->flags & TDB_NOMMAP);
+#endif
+}
+
+int tdb_mmap(struct tdb_context *tdb)
+{
+ if (tdb->flags & TDB_INTERNAL)
+ return 0;
+
+#ifdef HAVE_MMAP
+ if (should_mmap(tdb)) {
+ tdb->map_ptr = mmap(NULL, tdb->map_size,
+ PROT_READ|(tdb->read_only? 0:PROT_WRITE),
+ MAP_SHARED|MAP_FILE, tdb->fd,
+ tdb->hdr_ofs);
+
+ /*
+ * NB. When mmap fails it returns MAP_FAILED *NOT* NULL !!!!
+ */
+
+ if (tdb->map_ptr == MAP_FAILED) {
+ tdb->map_ptr = NULL;
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_mmap failed for size %u (%s)\n",
+ tdb->map_size, strerror(errno)));
+#ifdef HAVE_INCOHERENT_MMAP
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+#endif
+ }
+ } else {
+ tdb->map_ptr = NULL;
+ }
+#else
+ tdb->map_ptr = NULL;
+#endif
+ return 0;
+}
+
+/* expand a file. we prefer to use ftruncate, as that is what posix
+ says to use for mmap expansion */
+static int tdb_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t addition)
+{
+ char buf[8192];
+ tdb_off_t new_size;
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ if (!tdb_add_off_t(size, addition, &new_size)) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write "
+ "overflow detected current size[%u] addition[%u]!\n",
+ (unsigned)size, (unsigned)addition));
+ errno = ENOSPC;
+ return -1;
+ }
+
+#ifdef HAVE_POSIX_FALLOCATE
+ ret = tdb_posix_fallocate(tdb, size, addition);
+ if (ret == 0) {
+ return 0;
+ }
+ if (ret == ENOSPC) {
+ /*
+ * The Linux glibc (at least as of 2.24) fallback if
+ * the file system does not support fallocate does not
+ * reset the file size back to where it was. Also, to
+ * me it is unclear from the posix spec of
+ * posix_fallocate whether this is allowed or
+ * not. Better be safe than sorry and "goto fail" but
+ * "return -1" here, leaving the EOF pointer too
+ * large.
+ */
+ goto fail;
+ }
+
+ /*
+ * Retry the "old" way. Possibly unnecessary, but looking at
+ * our configure script there seem to be weird failure modes
+ * for posix_fallocate. See commit 3264a98ff16de, which
+ * probably refers to
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=1083.
+ */
+#endif
+
+ ret = tdb_ftruncate(tdb, new_size);
+ if (ret == -1) {
+ char b = 0;
+ ssize_t written = tdb_pwrite(tdb, &b, 1, new_size - 1);
+ if (written == 0) {
+ /* try once more, potentially revealing errno */
+ written = tdb_pwrite(tdb, &b, 1, new_size - 1);
+ }
+ if (written == 0) {
+ /* again - give up, guessing errno */
+ errno = ENOSPC;
+ }
+ if (written != 1) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file to %u failed (%s)\n",
+ (unsigned)new_size, strerror(errno)));
+ return -1;
+ }
+ }
+
+ /* now fill the file with something. This ensures that the
+ file isn't sparse, which would be very bad if we ran out of
+ disk. This must be done with write, not via mmap */
+ memset(buf, TDB_PAD_BYTE, sizeof(buf));
+ while (addition) {
+ size_t n = addition>sizeof(buf)?sizeof(buf):addition;
+ ssize_t written = tdb_pwrite(tdb, buf, n, size);
+ if (written == 0) {
+ /* prevent infinite loops: try _once_ more */
+ written = tdb_pwrite(tdb, buf, n, size);
+ }
+ if (written == 0) {
+ /* give up, trying to provide a useful errno */
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write "
+ "returned 0 twice: giving up!\n"));
+ errno = ENOSPC;
+ goto fail;
+ }
+ if (written == -1) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write of "
+ "%u bytes failed (%s)\n", (int)n,
+ strerror(errno)));
+ goto fail;
+ }
+ if (written != n) {
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "expand_file: wrote "
+ "only %zu of %zi bytes - retrying\n", written,
+ n));
+ }
+ addition -= written;
+ size += written;
+ }
+ return 0;
+
+fail:
+ {
+ int err = errno;
+
+ /*
+ * We're holding the freelist lock or are inside a
+ * transaction. Cutting the file is safe, the space we
+ * tried to allocate can't have been used anywhere in
+ * the meantime.
+ */
+
+ ret = tdb_ftruncate(tdb, size);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "expand_file: "
+ "retruncate to %ju failed\n",
+ (uintmax_t)size));
+ }
+ errno = err;
+ }
+
+ return -1;
+}
+
+
+/* You need 'size', this tells you how much you should expand by. */
+tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size)
+{
+ tdb_off_t new_size, top_size, increment;
+ tdb_off_t max_size = UINT32_MAX - map_size;
+
+ if (size > max_size) {
+ /*
+ * We can't round up anymore, just give back
+ * what we're asked for.
+ *
+ * The caller has to take care of the ENOSPC handling.
+ */
+ return size;
+ }
+
+ /* limit size in order to avoid using up huge amounts of memory for
+ * in memory tdbs if an oddball huge record creeps in */
+ if (size > 100 * 1024) {
+ increment = size * 2;
+ } else {
+ increment = size * 100;
+ }
+ if (increment < size) {
+ goto overflow;
+ }
+
+ if (!tdb_add_off_t(map_size, increment, &top_size)) {
+ goto overflow;
+ }
+
+ /* always make room for at least top_size more records, and at
+ least 25% more space. if the DB is smaller than 100MiB,
+ otherwise grow it by 10% only. */
+ if (map_size > 100 * 1024 * 1024) {
+ new_size = map_size * 1.10;
+ } else {
+ new_size = map_size * 1.25;
+ }
+ if (new_size < map_size) {
+ goto overflow;
+ }
+
+ /* Round the database up to a multiple of the page size */
+ new_size = MAX(top_size, new_size);
+
+ if (new_size + page_size < new_size) {
+ /* There's a "+" in TDB_ALIGN that might overflow... */
+ goto overflow;
+ }
+
+ return TDB_ALIGN(new_size, page_size) - map_size;
+
+overflow:
+ /*
+ * Somewhere in between we went over 4GB. Make one big jump to
+ * exactly 4GB database size.
+ */
+ return max_size;
+}
+
+/* expand the database at least size bytes by expanding the underlying
+ file and doing the mmap again if necessary */
+int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
+{
+ struct tdb_record rec;
+ tdb_off_t offset;
+ tdb_off_t new_size;
+
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "lock failed in tdb_expand\n"));
+ return -1;
+ }
+
+ /* must know about any previous expansions by another process */
+ tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /*
+ * Note: that we don't care about tdb->hdr_ofs != 0 here
+ *
+ * The 4GB limitation is just related to tdb->map_size
+ * and the offset calculation in the records.
+ *
+ * The file on disk can be up to 4GB + tdb->hdr_ofs
+ */
+ size = tdb_expand_adjust(tdb->map_size, size, tdb->page_size);
+
+ if (!tdb_add_off_t(tdb->map_size, size, &new_size)) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_expand "
+ "overflow detected current map_size[%u] size[%u]!\n",
+ (unsigned)tdb->map_size, (unsigned)size));
+ goto fail;
+ }
+
+ /* form a new freelist record */
+ offset = tdb->map_size;
+ memset(&rec,'\0',sizeof(rec));
+ rec.rec_len = size - sizeof(rec);
+
+ if (tdb->flags & TDB_INTERNAL) {
+ char *new_map_ptr;
+
+ new_map_ptr = (char *)realloc(tdb->map_ptr, new_size);
+ if (!new_map_ptr) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ tdb->map_ptr = new_map_ptr;
+ tdb->map_size = new_size;
+ } else {
+ int ret;
+
+ /*
+ * expand the file itself
+ */
+ ret = tdb->methods->tdb_expand_file(tdb, tdb->map_size, size);
+ if (ret != 0) {
+ goto fail;
+ }
+
+ /* Explicitly remap: if we're in a transaction, this won't
+ * happen automatically! */
+ tdb_munmap(tdb);
+ tdb->map_size = new_size;
+ if (tdb_mmap(tdb) != 0) {
+ goto fail;
+ }
+ }
+
+ /* link it into the free list */
+ if (tdb_free(tdb, offset, &rec) == -1)
+ goto fail;
+
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return 0;
+ fail:
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+}
+
+int _tdb_oob(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe)
+{
+ int ret = tdb->methods->tdb_oob(tdb, off, len, probe);
+ return ret;
+}
+
+/* read/write a tdb_off_t */
+int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d)
+{
+ return tdb->methods->tdb_read(tdb, offset, (char*)d, sizeof(*d), DOCONV());
+}
+
+int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d)
+{
+ tdb_off_t off = *d;
+ return tdb->methods->tdb_write(tdb, offset, CONVERT(off), sizeof(*d));
+}
+
+
+/* read a lump of data, allocating the space for it */
+unsigned char *tdb_alloc_read(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t len)
+{
+ unsigned char *buf;
+
+ /* some systems don't like zero length malloc */
+
+ if (!(buf = (unsigned char *)malloc(len ? len : 1))) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_alloc_read malloc failed len=%u (%s)\n",
+ len, strerror(errno)));
+ return NULL;
+ }
+ if (tdb->methods->tdb_read(tdb, offset, buf, len, 0) == -1) {
+ SAFE_FREE(buf);
+ return NULL;
+ }
+ return buf;
+}
+
+/* Give a piece of tdb data to a parser */
+
+int tdb_parse_data(struct tdb_context *tdb, TDB_DATA key,
+ tdb_off_t offset, tdb_len_t len,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data)
+{
+ TDB_DATA data;
+ int result;
+
+ data.dsize = len;
+
+ if ((tdb->transaction == NULL) && (tdb->map_ptr != NULL)) {
+ /*
+ * Optimize by avoiding the malloc/memcpy/free, point the
+ * parser directly at the mmap area.
+ */
+ if (tdb_oob(tdb, offset, len, 0) != 0) {
+ return -1;
+ }
+ data.dptr = offset + (unsigned char *)tdb->map_ptr;
+ return parser(key, data, private_data);
+ }
+
+ if (!(data.dptr = tdb_alloc_read(tdb, offset, len))) {
+ return -1;
+ }
+
+ result = parser(key, data, private_data);
+ free(data.dptr);
+ return result;
+}
+
+/* read/write a record */
+int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
+{
+ int ret;
+ tdb_len_t overall_len;
+
+ if (tdb->methods->tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
+ if (TDB_BAD_MAGIC(rec)) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_rec_read bad magic 0x%x at offset=%u\n", rec->magic, offset));
+ return -1;
+ }
+
+ overall_len = rec->key_len + rec->data_len;
+ if (overall_len < rec->data_len) {
+ /* overflow */
+ return -1;
+ }
+
+ if (overall_len > rec->rec_len) {
+ /* invalid record */
+ return -1;
+ }
+
+ ret = tdb_oob(tdb, offset, rec->key_len, 1);
+ if (ret == -1) {
+ return -1;
+ }
+ ret = tdb_oob(tdb, offset, rec->data_len, 1);
+ if (ret == -1) {
+ return -1;
+ }
+ ret = tdb_oob(tdb, offset, rec->rec_len, 1);
+ if (ret == -1) {
+ return -1;
+ }
+
+ return tdb_oob(tdb, rec->next, sizeof(*rec), 0);
+}
+
+int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
+{
+ struct tdb_record r = *rec;
+ return tdb->methods->tdb_write(tdb, offset, CONVERT(r), sizeof(r));
+}
+
+static const struct tdb_methods io_methods = {
+ tdb_read,
+ tdb_write,
+ tdb_next_hash_chain,
+ tdb_notrans_oob,
+ tdb_expand_file,
+};
+
+/*
+ initialise the default methods table
+*/
+void tdb_io_init(struct tdb_context *tdb)
+{
+ tdb->methods = &io_methods;
+}
diff --git a/lib/tdb/common/lock.c b/lib/tdb/common/lock.c
new file mode 100644
index 0000000..045ded9
--- /dev/null
+++ b/lib/tdb/common/lock.c
@@ -0,0 +1,1033 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+_PUBLIC_ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
+{
+ tdb->interrupt_sig_ptr = ptr;
+}
+
+static int fcntl_lock(struct tdb_context *tdb,
+ int rw, off_t off, off_t len, bool waitflag)
+{
+ struct flock fl;
+ int cmd;
+
+#ifdef USE_TDB_MUTEX_LOCKING
+ {
+ int ret;
+ if (tdb_mutex_lock(tdb, rw, off, len, waitflag, &ret)) {
+ return ret;
+ }
+ }
+#endif
+
+ fl.l_type = rw;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = off;
+ fl.l_len = len;
+ fl.l_pid = 0;
+
+ cmd = waitflag ? F_SETLKW : F_SETLK;
+
+ return fcntl(tdb->fd, cmd, &fl);
+}
+
+static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
+{
+ struct flock fl;
+#if 0 /* Check they matched up locks and unlocks correctly. */
+ char line[80];
+ FILE *locks;
+ bool found = false;
+
+ locks = fopen("/proc/locks", "r");
+
+ while (fgets(line, 80, locks)) {
+ char *p;
+ int type, start, l;
+
+ /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
+ p = strchr(line, ':') + 1;
+ if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
+ continue;
+ p += strlen(" FLOCK ADVISORY ");
+ if (strncmp(p, "READ ", strlen("READ ")) == 0)
+ type = F_RDLCK;
+ else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
+ type = F_WRLCK;
+ else
+ abort();
+ p += 6;
+ if (atoi(p) != getpid())
+ continue;
+ p = strchr(strchr(p, ' ') + 1, ' ') + 1;
+ start = atoi(p);
+ p = strchr(p, ' ') + 1;
+ if (strncmp(p, "EOF", 3) == 0)
+ l = 0;
+ else
+ l = atoi(p) - start + 1;
+
+ if (off == start) {
+ if (len != l) {
+ fprintf(stderr, "Len %u should be %u: %s",
+ (int)len, l, line);
+ abort();
+ }
+ if (type != rw) {
+ fprintf(stderr, "Type %s wrong: %s",
+ rw == F_RDLCK ? "READ" : "WRITE", line);
+ abort();
+ }
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ fprintf(stderr, "Unlock on %u@%u not found!\n",
+ (int)off, (int)len);
+ abort();
+ }
+
+ fclose(locks);
+#endif
+
+#ifdef USE_TDB_MUTEX_LOCKING
+ {
+ int ret;
+ if (tdb_mutex_unlock(tdb, rw, off, len, &ret)) {
+ return ret;
+ }
+ }
+#endif
+
+ fl.l_type = F_UNLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = off;
+ fl.l_len = len;
+ fl.l_pid = 0;
+
+ return fcntl(tdb->fd, F_SETLKW, &fl);
+}
+
+/*
+ * Calculate the lock offset for a list
+ *
+ * list -1 is the freelist, otherwise a hash chain.
+ *
+ * Note that we consistently (but without real reason) lock hash chains at an
+ * offset that is 4 bytes below the real offset of the corresponding list head
+ * in the db.
+ *
+ * This is the memory layout of the hashchain array:
+ *
+ * FREELIST_TOP + 0 = freelist
+ * FREELIST_TOP + 4 = hashtable list 0
+ * FREELIST_TOP + 8 = hashtable list 1
+ * ...
+ *
+ * Otoh lock_offset computes:
+ *
+ * freelist = FREELIST_TOP - 4
+ * list 0 = FREELIST_TOP + 0
+ * list 1 = FREELIST_TOP + 4
+ * ...
+ *
+ * Unfortunately we can't change this calculation in order to align the locking
+ * offset with the memory layout, as that would make the locking incompatible
+ * between different tdb versions.
+ */
+static tdb_off_t lock_offset(int list)
+{
+ return FREELIST_TOP + 4*list;
+}
+
+/* a byte range locking function - return 0 on success
+ this functions locks/unlocks "len" byte at the specified offset.
+
+ On error, errno is also set so that errors are passed back properly
+ through tdb_open().
+
+ note that a len of zero means lock to end of file
+*/
+int tdb_brlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags)
+{
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ if (flags & TDB_LOCK_MARK_ONLY) {
+ return 0;
+ }
+
+ if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ do {
+ ret = fcntl_lock(tdb, rw_type, offset, len,
+ flags & TDB_LOCK_WAIT);
+ /* Check for a sigalarm break. */
+ if (ret == -1 && errno == EINTR &&
+ tdb->interrupt_sig_ptr &&
+ *tdb->interrupt_sig_ptr) {
+ break;
+ }
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ tdb->ecode = TDB_ERR_LOCK;
+ /* Generic lock error. errno set by fcntl.
+ * EAGAIN is an expected return from non-blocking
+ * locks. */
+ if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %u rw_type=%d flags=%d len=%zu\n",
+ tdb->fd, offset, rw_type, flags, len));
+ }
+ return -1;
+ }
+ return 0;
+}
+
+int tdb_brunlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len)
+{
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ do {
+ ret = fcntl_unlock(tdb, rw_type, offset, len);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %u rw_type=%u len=%zu\n",
+ tdb->fd, offset, rw_type, len));
+ }
+ return ret;
+}
+
+/*
+ * Do a tdb_brlock in a loop. Some OSes (such as solaris) have too
+ * conservative deadlock detection and claim a deadlock when progress can be
+ * made. For those OSes we may loop for a while.
+ */
+
+static int tdb_brlock_retry(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags)
+{
+ int count = 1000;
+
+ while (count--) {
+ struct timeval tv;
+ int ret;
+
+ ret = tdb_brlock(tdb, rw_type, offset, len, flags);
+ if (ret == 0) {
+ return 0;
+ }
+ if (errno != EDEADLK) {
+ break;
+ }
+ /* sleep for as short a time as we can - more portable than usleep() */
+ tv.tv_sec = 0;
+ tv.tv_usec = 1;
+ select(0, NULL, NULL, NULL, &tv);
+ }
+ return -1;
+}
+
+/*
+ upgrade a read lock to a write lock.
+*/
+int tdb_allrecord_upgrade(struct tdb_context *tdb)
+{
+ int ret;
+
+ if (tdb->allrecord_lock.count != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_allrecord_upgrade failed: count %u too high\n",
+ tdb->allrecord_lock.count));
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.off != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_allrecord_upgrade failed: already upgraded?\n"));
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb_have_mutexes(tdb)) {
+ ret = tdb_mutex_allrecord_upgrade(tdb);
+ if (ret == -1) {
+ goto fail;
+ }
+ ret = tdb_brlock_retry(tdb, F_WRLCK, lock_offset(tdb->hash_size),
+ 0, TDB_LOCK_WAIT|TDB_LOCK_PROBE);
+ if (ret == -1) {
+ tdb_mutex_allrecord_downgrade(tdb);
+ }
+ } else {
+ ret = tdb_brlock_retry(tdb, F_WRLCK, FREELIST_TOP, 0,
+ TDB_LOCK_WAIT|TDB_LOCK_PROBE);
+ }
+
+ if (ret == 0) {
+ tdb->allrecord_lock.ltype = F_WRLCK;
+ tdb->allrecord_lock.off = 0;
+ return 0;
+ }
+fail:
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_allrecord_upgrade failed\n"));
+ return -1;
+}
+
+static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
+ tdb_off_t offset)
+{
+ int i;
+
+ for (i=0; i<tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off == offset) {
+ return &tdb->lockrecs[i];
+ }
+ }
+ return NULL;
+}
+
+/* lock an offset in the database. */
+int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ enum tdb_lock_flags flags)
+{
+ struct tdb_lock_type *new_lck;
+
+ if (offset >= lock_offset(tdb->hash_size)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n",
+ offset, ltype));
+ return -1;
+ }
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
+
+ new_lck = find_nestlock(tdb, offset);
+ if (new_lck) {
+ if ((new_lck->ltype == F_RDLCK) && (ltype == F_WRLCK)) {
+ if (!tdb_have_mutexes(tdb)) {
+ int ret;
+ /*
+ * Upgrade the underlying fcntl
+ * lock. Mutexes don't do readlocks,
+ * so this only applies to fcntl
+ * locking.
+ */
+ ret = tdb_brlock(tdb, ltype, offset, 1, flags);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+ new_lck->ltype = F_WRLCK;
+ }
+ /*
+ * Just increment the in-memory struct, posix locks
+ * don't stack.
+ */
+ new_lck->count++;
+ return 0;
+ }
+
+ if (tdb->num_lockrecs == tdb->lockrecs_array_length) {
+ new_lck = (struct tdb_lock_type *)realloc(
+ tdb->lockrecs,
+ sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
+ if (new_lck == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+ tdb->lockrecs_array_length = tdb->num_lockrecs+1;
+ tdb->lockrecs = new_lck;
+ }
+
+ /* Since fcntl locks don't nest, we do a lock for the first one,
+ and simply bump the count for future ones */
+ if (tdb_brlock(tdb, ltype, offset, 1, flags)) {
+ return -1;
+ }
+
+ new_lck = &tdb->lockrecs[tdb->num_lockrecs];
+
+ new_lck->off = offset;
+ new_lck->count = 1;
+ new_lck->ltype = ltype;
+ tdb->num_lockrecs++;
+
+ return 0;
+}
+
+static int tdb_lock_and_recover(struct tdb_context *tdb)
+{
+ int ret;
+
+ /* We need to match locking order in transaction commit. */
+ if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
+ return -1;
+ }
+
+ if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+ return -1;
+ }
+
+ ret = tdb_transaction_recover(tdb);
+
+ tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+
+ return ret;
+}
+
+static bool have_data_locks(const struct tdb_context *tdb)
+{
+ int i;
+
+ for (i = 0; i < tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off >= lock_offset(-1))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * A allrecord lock allows us to avoid per chain locks. Check if the allrecord
+ * lock is strong enough.
+ */
+static int tdb_lock_covered_by_allrecord_lock(struct tdb_context *tdb,
+ int ltype)
+{
+ if (ltype == F_RDLCK) {
+ /*
+ * The allrecord_lock is equal (F_RDLCK) or stronger
+ * (F_WRLCK). Pass.
+ */
+ return 0;
+ }
+
+ if (tdb->allrecord_lock.ltype == F_RDLCK) {
+ /*
+ * We ask for ltype==F_WRLCK, but the allrecord_lock
+ * is too weak. We can't upgrade here, so fail.
+ */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ /*
+ * Asking for F_WRLCK, allrecord is F_WRLCK as well. Pass.
+ */
+ return 0;
+}
+
+static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
+ enum tdb_lock_flags waitflag)
+{
+ int ret;
+ bool check = false;
+
+ if (tdb->allrecord_lock.count) {
+ return tdb_lock_covered_by_allrecord_lock(tdb, ltype);
+ }
+
+ /*
+ * Check for recoveries: Someone might have kill -9'ed a process
+ * during a commit.
+ */
+ check = !have_data_locks(tdb);
+ ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag);
+
+ if (ret == 0 && check && tdb_needs_recovery(tdb)) {
+ tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
+
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_lock_list(tdb, list, ltype, waitflag);
+ }
+ return ret;
+}
+
+/* lock a list in the database. list -1 is the alloc list */
+int tdb_lock(struct tdb_context *tdb, int list, int ltype)
+{
+ int ret;
+
+ ret = tdb_lock_list(tdb, list, ltype, TDB_LOCK_WAIT);
+ if (ret) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d "
+ "ltype=%d (%s)\n", list, ltype, strerror(errno)));
+ }
+ return ret;
+}
+
+/* lock a list in the database. list -1 is the alloc list. non-blocking lock */
+_PUBLIC_ int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype);
+_PUBLIC_ int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype)
+{
+ return tdb_lock_list(tdb, list, ltype, TDB_LOCK_NOWAIT);
+}
+
+
+int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ bool mark_lock)
+{
+ int ret = -1;
+ struct tdb_lock_type *lck;
+
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
+
+ /* Sanity checks */
+ if (offset >= lock_offset(tdb->hash_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->hash_size));
+ return ret;
+ }
+
+ lck = find_nestlock(tdb, offset);
+ if ((lck == NULL) || (lck->count == 0)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n"));
+ return -1;
+ }
+
+ if (lck->count > 1) {
+ lck->count--;
+ return 0;
+ }
+
+ /*
+ * This lock has count==1 left, so we need to unlock it in the
+ * kernel. We don't bother with decrementing the in-memory array
+ * element, we're about to overwrite it with the last array element
+ * anyway.
+ */
+
+ if (mark_lock) {
+ ret = 0;
+ } else {
+ ret = tdb_brunlock(tdb, ltype, offset, 1);
+ }
+
+ /*
+ * Shrink the array by overwriting the element just unlocked with the
+ * last array element.
+ */
+ *lck = tdb->lockrecs[--tdb->num_lockrecs];
+
+ /*
+ * We don't bother with realloc when the array shrinks, but if we have
+ * a completely idle tdb we should get rid of the locked array.
+ */
+
+ if (ret)
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n"));
+ return ret;
+}
+
+_PUBLIC_ int tdb_unlock(struct tdb_context *tdb, int list, int ltype);
+_PUBLIC_ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
+{
+ /* a global lock allows us to avoid per chain locks */
+ if (tdb->allrecord_lock.count) {
+ return tdb_lock_covered_by_allrecord_lock(tdb, ltype);
+ }
+
+ return tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
+}
+
+/*
+ get the transaction lock
+ */
+int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags lockflags)
+{
+ return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, lockflags);
+}
+
+/*
+ release the transaction lock
+ */
+int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
+{
+ return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
+}
+
+/* Returns 0 if all done, -1 if error, 1 if ok. */
+static int tdb_allrecord_check(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
+ /* There are no locks on read-only dbs */
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count &&
+ tdb->allrecord_lock.ltype == (uint32_t)ltype) {
+ tdb->allrecord_lock.count++;
+ return 0;
+ }
+
+ if (tdb->allrecord_lock.count) {
+ /* a global lock of a different type exists */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb_have_extra_locks(tdb)) {
+ /* can't combine global and chain locks */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (upgradable && ltype != F_RDLCK) {
+ /* tdb error: you can't upgrade a write lock! */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+ return 1;
+}
+
+/* We only need to lock individual bytes, but Linux merges consecutive locks
+ * so we lock in contiguous ranges. */
+static int tdb_chainlock_gradual(struct tdb_context *tdb,
+ int ltype, enum tdb_lock_flags flags,
+ size_t off, size_t len)
+{
+ int ret;
+ enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
+
+ if (len <= 4) {
+ /* Single record. Just do blocking lock. */
+ return tdb_brlock(tdb, ltype, off, len, flags);
+ }
+
+ /* First we try non-blocking. */
+ ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
+ if (ret == 0) {
+ return 0;
+ }
+
+ /* Try locking first half, then second. */
+ ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2);
+ if (ret == -1)
+ return -1;
+
+ ret = tdb_chainlock_gradual(tdb, ltype, flags,
+ off + len / 2, len - len / 2);
+ if (ret == -1) {
+ tdb_brunlock(tdb, ltype, off, len / 2);
+ return -1;
+ }
+ return 0;
+}
+
+/* lock/unlock entire database. It can only be upgradable if you have some
+ * other way of guaranteeing exclusivity (ie. transaction write lock).
+ * We do the locking gradually to avoid being starved by smaller locks. */
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
+ int ret;
+
+ switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) {
+ case -1:
+ return -1;
+ case 0:
+ return 0;
+ }
+
+ /* We cover two kinds of locks:
+ * 1) Normal chain locks. Taken for almost all operations.
+ * 2) Individual records locks. Taken after normal or free
+ * chain locks.
+ *
+ * It is (1) which cause the starvation problem, so we're only
+ * gradual for that. */
+
+ if (tdb_have_mutexes(tdb)) {
+ ret = tdb_mutex_allrecord_lock(tdb, ltype, flags);
+ } else {
+ ret = tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP,
+ tdb->hash_size * 4);
+ }
+
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Grab individual record locks. */
+ if (tdb_brlock(tdb, ltype, lock_offset(tdb->hash_size), 0,
+ flags) == -1) {
+ if (tdb_have_mutexes(tdb)) {
+ tdb_mutex_allrecord_unlock(tdb);
+ } else {
+ tdb_brunlock(tdb, ltype, FREELIST_TOP,
+ tdb->hash_size * 4);
+ }
+ return -1;
+ }
+
+ tdb->allrecord_lock.count = 1;
+ /* If it's upgradable, it's actually exclusive so we can treat
+ * it as a write lock. */
+ tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
+ tdb->allrecord_lock.off = upgradable;
+
+ if (tdb_needs_recovery(tdb)) {
+ bool mark = flags & TDB_LOCK_MARK_ONLY;
+ tdb_allrecord_unlock(tdb, ltype, mark);
+ if (mark) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_lockall_mark cannot do recovery\n"));
+ return -1;
+ }
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_allrecord_lock(tdb, ltype, flags, upgradable);
+ }
+
+ return 0;
+}
+
+
+
+/* unlock entire db */
+int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock)
+{
+ /* There are no locks on read-only dbs */
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count == 0) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ /* Upgradable locks are marked as write locks. */
+ if (tdb->allrecord_lock.ltype != (uint32_t)ltype
+ && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count > 1) {
+ tdb->allrecord_lock.count--;
+ return 0;
+ }
+
+ if (!mark_lock) {
+ int ret;
+
+ if (tdb_have_mutexes(tdb)) {
+ ret = tdb_mutex_allrecord_unlock(tdb);
+ if (ret == 0) {
+ ret = tdb_brunlock(tdb, ltype,
+ lock_offset(tdb->hash_size),
+ 0);
+ }
+ } else {
+ ret = tdb_brunlock(tdb, ltype, FREELIST_TOP, 0);
+ }
+
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed "
+ "(%s)\n", strerror(errno)));
+ return -1;
+ }
+ }
+
+ tdb->allrecord_lock.count = 0;
+ tdb->allrecord_lock.ltype = 0;
+
+ return 0;
+}
+
+/* lock entire database with write lock */
+_PUBLIC_ int tdb_lockall(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall");
+ return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+}
+
+/* lock entire database with write lock - mark only */
+_PUBLIC_ int tdb_lockall_mark(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall_mark");
+ return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY, false);
+}
+
+/* unlock entire database with write lock - unmark only */
+_PUBLIC_ int tdb_lockall_unmark(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall_unmark");
+ return tdb_allrecord_unlock(tdb, F_WRLCK, true);
+}
+
+/* lock entire database with write lock - nonblocking variant */
+_PUBLIC_ int tdb_lockall_nonblock(struct tdb_context *tdb)
+{
+ int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
+ tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
+ return ret;
+}
+
+/* unlock entire database with write lock */
+_PUBLIC_ int tdb_unlockall(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_unlockall");
+ return tdb_allrecord_unlock(tdb, F_WRLCK, false);
+}
+
+/* lock entire database with read lock */
+_PUBLIC_ int tdb_lockall_read(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall_read");
+ return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
+}
+
+/* lock entire database with read lock - nonblock variant */
+_PUBLIC_ int tdb_lockall_read_nonblock(struct tdb_context *tdb)
+{
+ int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false);
+ tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
+ return ret;
+}
+
+/* unlock entire database with read lock */
+_PUBLIC_ int tdb_unlockall_read(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_unlockall_read");
+ return tdb_allrecord_unlock(tdb, F_RDLCK, false);
+}
+
+/* lock/unlock one hash chain. This is meant to be used to reduce
+ contention - it cannot guarantee how many records will be locked */
+_PUBLIC_ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
+ tdb_trace_1rec(tdb, "tdb_chainlock", key);
+ return ret;
+}
+
+/* lock/unlock one hash chain, non-blocking. This is meant to be used
+ to reduce contention - it cannot guarantee how many records will be
+ locked */
+_PUBLIC_ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
+ tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret);
+ return ret;
+}
+
+/* mark a chain as locked without actually locking it. Warning! use with great caution! */
+_PUBLIC_ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
+ F_WRLCK, TDB_LOCK_MARK_ONLY);
+ tdb_trace_1rec(tdb, "tdb_chainlock_mark", key);
+ return ret;
+}
+
+/* unmark a chain as locked without actually locking it. Warning! use with great caution! */
+_PUBLIC_ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key);
+ return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
+ F_WRLCK, true);
+}
+
+_PUBLIC_ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_trace_1rec(tdb, "tdb_chainunlock", key);
+ return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
+}
+
+_PUBLIC_ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret;
+ ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
+ tdb_trace_1rec(tdb, "tdb_chainlock_read", key);
+ return ret;
+}
+
+_PUBLIC_ int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
+ return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
+}
+
+_PUBLIC_ int tdb_chainlock_read_nonblock(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
+ tdb_trace_1rec_ret(tdb, "tdb_chainlock_read_nonblock", key, ret);
+ return ret;
+}
+
+/* record lock stops delete underneath */
+int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+ return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
+}
+
+/*
+ Write locks override our own fcntl readlocks, so check it here.
+ Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
+ an error to fail to get the lock here.
+*/
+int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ struct tdb_traverse_lock *i;
+ if (tdb == NULL) {
+ return -1;
+ }
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ return -1;
+ if (tdb->allrecord_lock.count) {
+ if (tdb->allrecord_lock.ltype == F_WRLCK) {
+ return 0;
+ }
+ return -1;
+ }
+ return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+}
+
+int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+ return tdb_brunlock(tdb, F_WRLCK, off, 1);
+}
+
+/* fcntl locks don't stack: avoid unlocking someone else's */
+int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ struct tdb_traverse_lock *i;
+ uint32_t count = 0;
+
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+
+ if (off == 0)
+ return 0;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ count++;
+ return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0);
+}
+
+bool tdb_have_extra_locks(struct tdb_context *tdb)
+{
+ unsigned int extra = tdb->num_lockrecs;
+
+ /* A transaction holds the lock for all records. */
+ if (!tdb->transaction && tdb->allrecord_lock.count) {
+ return true;
+ }
+
+ /* We always hold the active lock if CLEAR_IF_FIRST. */
+ if (find_nestlock(tdb, ACTIVE_LOCK)) {
+ extra--;
+ }
+
+ /* In a transaction, we expect to hold the transaction lock */
+ if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) {
+ extra--;
+ }
+
+ return extra;
+}
+
+/* The transaction code uses this to remove all locks. */
+void tdb_release_transaction_locks(struct tdb_context *tdb)
+{
+ int i;
+ unsigned int active = 0;
+
+ if (tdb->allrecord_lock.count != 0) {
+ tdb_allrecord_unlock(tdb, tdb->allrecord_lock.ltype, false);
+ tdb->allrecord_lock.count = 0;
+ }
+
+ for (i=0;i<tdb->num_lockrecs;i++) {
+ struct tdb_lock_type *lck = &tdb->lockrecs[i];
+
+ /* Don't release the active lock! Copy it to first entry. */
+ if (lck->off == ACTIVE_LOCK) {
+ tdb->lockrecs[active++] = *lck;
+ } else {
+ tdb_brunlock(tdb, lck->ltype, lck->off, 1);
+ }
+ }
+ tdb->num_lockrecs = active;
+}
+
+/* Following functions are added specifically to support CTDB. */
+
+/* Don't do actual fcntl locking, just mark tdb locked */
+_PUBLIC_ int tdb_transaction_write_lock_mark(struct tdb_context *tdb);
+_PUBLIC_ int tdb_transaction_write_lock_mark(struct tdb_context *tdb)
+{
+ return tdb_transaction_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY);
+}
+
+/* Don't do actual fcntl unlocking, just mark tdb unlocked */
+_PUBLIC_ int tdb_transaction_write_lock_unmark(struct tdb_context *tdb);
+_PUBLIC_ int tdb_transaction_write_lock_unmark(struct tdb_context *tdb)
+{
+ return tdb_nest_unlock(tdb, TRANSACTION_LOCK, F_WRLCK, true);
+}
diff --git a/lib/tdb/common/mutex.c b/lib/tdb/common/mutex.c
new file mode 100644
index 0000000..a710616
--- /dev/null
+++ b/lib/tdb/common/mutex.c
@@ -0,0 +1,1078 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Volker Lendecke 2012,2013
+ Copyright (C) Stefan Metzmacher 2013,2014
+ Copyright (C) Michael Adam 2014
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+#include "system/threads.h"
+
+#ifdef USE_TDB_MUTEX_LOCKING
+
+/*
+ * If we run with mutexes, we store the "struct tdb_mutexes" at the
+ * beginning of the file. We store an additional tdb_header right
+ * beyond the mutex area, page aligned. All the offsets within the tdb
+ * are relative to the area behind the mutex area. tdb->map_ptr points
+ * behind the mmap area as well, so the read and write path in the
+ * mutex case can remain unchanged.
+ *
+ * Early in the mutex development the mutexes were placed between the hash
+ * chain pointers and the real tdb data. This had two drawbacks: First, it
+ * made pointer calculations more complex. Second, we had to mmap the mutex
+ * area twice. One was the normal map_ptr in the tdb. This frequently changed
+ * from within tdb_oob. At least the Linux glibc robust mutex code assumes
+ * constant pointers in memory, so a constantly changing mmap area destroys
+ * the mutex list. So we had to mmap the first bytes of the file with a second
+ * mmap call. With that scheme, very weird errors happened that could be
+ * easily fixed by doing the mutex mmap in a second file. It seemed that
+ * mapping the same memory area twice does not end up in accessing the same
+ * physical page, looking at the mutexes in gdb it seemed that old data showed
+ * up after some re-mapping. To avoid a separate mutex file, the code now puts
+ * the real content of the tdb file after the mutex area. This way we do not
+ * have overlapping mmap areas, the mutex area is mmapped once and not
+ * changed, the tdb data area's mmap is constantly changed but does not
+ * overlap.
+ */
+
+struct tdb_mutexes {
+ struct tdb_header hdr;
+
+ /* protect allrecord_lock */
+ pthread_mutex_t allrecord_mutex;
+
+ /*
+ * F_UNLCK: free,
+ * F_RDLCK: shared,
+ * F_WRLCK: exclusive
+ */
+ short int allrecord_lock;
+
+ /*
+ * Index 0 is the freelist mutex, followed by
+ * one mutex per hashchain.
+ */
+ pthread_mutex_t hashchains[1];
+};
+
+bool tdb_have_mutexes(struct tdb_context *tdb)
+{
+ return ((tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) != 0);
+}
+
+size_t tdb_mutex_size(struct tdb_context *tdb)
+{
+ size_t mutex_size;
+
+ if (!tdb_have_mutexes(tdb)) {
+ return 0;
+ }
+
+ mutex_size = sizeof(struct tdb_mutexes);
+ mutex_size += tdb->hash_size * sizeof(pthread_mutex_t);
+
+ return TDB_ALIGN(mutex_size, tdb->page_size);
+}
+
+/*
+ * Get the index for a chain mutex
+ */
+static bool tdb_mutex_index(struct tdb_context *tdb, off_t off, off_t len,
+ unsigned *idx)
+{
+ /*
+ * Weird but true: We fcntl lock 1 byte at an offset 4 bytes before
+ * the 4 bytes of the freelist start and the hash chain that is about
+ * to be locked. See lock_offset() where the freelist is -1 vs the
+ * "+1" in TDB_HASH_TOP(). Because the mutex array is represented in
+ * the tdb file itself as data, we need to adjust the offset here.
+ */
+ const off_t freelist_lock_ofs = FREELIST_TOP - sizeof(tdb_off_t);
+
+ if (!tdb_have_mutexes(tdb)) {
+ return false;
+ }
+ if (len != 1) {
+ /* Possibly the allrecord lock */
+ return false;
+ }
+ if (off < freelist_lock_ofs) {
+ /* One of the special locks */
+ return false;
+ }
+ if (tdb->hash_size == 0) {
+ /* tdb not initialized yet, called from tdb_open_ex() */
+ return false;
+ }
+ if (off >= TDB_DATA_START(tdb->hash_size)) {
+ /* Single record lock from traverses */
+ return false;
+ }
+
+ /*
+ * Now we know it's a freelist or hash chain lock. Those are always 4
+ * byte aligned. Paranoia check.
+ */
+ if ((off % sizeof(tdb_off_t)) != 0) {
+ abort();
+ }
+
+ /*
+ * Re-index the fcntl offset into an offset into the mutex array
+ */
+ off -= freelist_lock_ofs; /* rebase to index 0 */
+ off /= sizeof(tdb_off_t); /* 0 for freelist 1-n for hashchain */
+
+ *idx = off;
+ return true;
+}
+
+static bool tdb_have_mutex_chainlocks(struct tdb_context *tdb)
+{
+ int i;
+
+ for (i=0; i < tdb->num_lockrecs; i++) {
+ bool ret;
+ unsigned idx;
+
+ ret = tdb_mutex_index(tdb,
+ tdb->lockrecs[i].off,
+ tdb->lockrecs[i].count,
+ &idx);
+ if (!ret) {
+ continue;
+ }
+
+ if (idx == 0) {
+ /* this is the freelist mutex */
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static int chain_mutex_lock(pthread_mutex_t *m, bool waitflag)
+{
+ int ret;
+
+ if (waitflag) {
+ ret = pthread_mutex_lock(m);
+ } else {
+ ret = pthread_mutex_trylock(m);
+ }
+ if (ret != EOWNERDEAD) {
+ return ret;
+ }
+
+ /*
+ * For chainlocks, we don't do any cleanup (yet?)
+ */
+ return pthread_mutex_consistent(m);
+}
+
+static int allrecord_mutex_lock(struct tdb_mutexes *m, bool waitflag)
+{
+ int ret;
+
+ if (waitflag) {
+ ret = pthread_mutex_lock(&m->allrecord_mutex);
+ } else {
+ ret = pthread_mutex_trylock(&m->allrecord_mutex);
+ }
+ if (ret != EOWNERDEAD) {
+ return ret;
+ }
+
+ /*
+ * The allrecord lock holder died. We need to reset the allrecord_lock
+ * to F_UNLCK. This should also be the indication for
+ * tdb_needs_recovery.
+ */
+ m->allrecord_lock = F_UNLCK;
+
+ return pthread_mutex_consistent(&m->allrecord_mutex);
+}
+
+bool tdb_mutex_lock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ bool waitflag, int *pret)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ pthread_mutex_t *chain;
+ int ret;
+ unsigned idx;
+ bool allrecord_ok;
+
+ if (!tdb_mutex_index(tdb, off, len, &idx)) {
+ return false;
+ }
+ chain = &m->hashchains[idx];
+
+again:
+ ret = chain_mutex_lock(chain, waitflag);
+ if (ret == EBUSY) {
+ ret = EAGAIN;
+ }
+ if (ret != 0) {
+ errno = ret;
+ goto fail;
+ }
+
+ if (idx == 0) {
+ /*
+ * This is a freelist lock, which is independent to
+ * the allrecord lock. So we're done once we got the
+ * freelist mutex.
+ */
+ *pret = 0;
+ return true;
+ }
+
+ if (tdb_have_mutex_chainlocks(tdb)) {
+ /*
+ * We can only check the allrecord lock once. If we do it with
+ * one chain mutex locked, we will deadlock with the allrecord
+ * locker process in the following way: We lock the first hash
+ * chain, we check for the allrecord lock. We keep the hash
+ * chain locked. Then the allrecord locker locks the
+ * allrecord_mutex. It walks the list of chain mutexes,
+ * locking them all in sequence. Meanwhile, we have the chain
+ * mutex locked, so the allrecord locker blocks trying to lock
+ * our chain mutex. Then we come in and try to lock the second
+ * chain lock, which in most cases will be the freelist. We
+ * see that the allrecord lock is locked and put ourselves on
+ * the allrecord_mutex. This will never be signalled though
+ * because the allrecord locker waits for us to give up the
+ * chain lock.
+ */
+
+ *pret = 0;
+ return true;
+ }
+
+ /*
+ * Check if someone is has the allrecord lock: queue if so.
+ */
+
+ allrecord_ok = false;
+
+ if (m->allrecord_lock == F_UNLCK) {
+ /*
+ * allrecord lock not taken
+ */
+ allrecord_ok = true;
+ }
+
+ if ((m->allrecord_lock == F_RDLCK) && (rw == F_RDLCK)) {
+ /*
+ * allrecord shared lock taken, but we only want to read
+ */
+ allrecord_ok = true;
+ }
+
+ if (allrecord_ok) {
+ *pret = 0;
+ return true;
+ }
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(chain_mutex) failed: %s\n", strerror(ret)));
+ errno = ret;
+ goto fail;
+ }
+ ret = allrecord_mutex_lock(m, waitflag);
+ if (ret == EBUSY) {
+ ret = EAGAIN;
+ }
+ if (ret != 0) {
+ if (waitflag || (ret != EAGAIN)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_%slock"
+ "(allrecord_mutex) failed: %s\n",
+ waitflag ? "" : "try_", strerror(ret)));
+ }
+ errno = ret;
+ goto fail;
+ }
+ ret = pthread_mutex_unlock(&m->allrecord_mutex);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(allrecord_mutex) failed: %s\n", strerror(ret)));
+ errno = ret;
+ goto fail;
+ }
+ goto again;
+
+fail:
+ *pret = -1;
+ return true;
+}
+
+bool tdb_mutex_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ int *pret)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ pthread_mutex_t *chain;
+ int ret;
+ unsigned idx;
+
+ if (!tdb_mutex_index(tdb, off, len, &idx)) {
+ return false;
+ }
+ chain = &m->hashchains[idx];
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret == 0) {
+ *pret = 0;
+ return true;
+ }
+ errno = ret;
+ *pret = -1;
+ return true;
+}
+
+int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ int ret;
+ uint32_t i;
+ bool waitflag = (flags & TDB_LOCK_WAIT);
+ int saved_errno;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ if (flags & TDB_LOCK_MARK_ONLY) {
+ return 0;
+ }
+
+ ret = allrecord_mutex_lock(m, waitflag);
+ if (!waitflag && (ret == EBUSY)) {
+ errno = EAGAIN;
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+ if (ret != 0) {
+ if (!(flags & TDB_LOCK_PROBE)) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,
+ "allrecord_mutex_lock() failed: %s\n",
+ strerror(ret)));
+ }
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (m->allrecord_lock != F_UNLCK) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ goto fail_unlock_allrecord_mutex;
+ }
+ m->allrecord_lock = (ltype == F_RDLCK) ? F_RDLCK : F_WRLCK;
+
+ for (i=0; i<tdb->hash_size; i++) {
+
+ /* ignore hashchains[0], the freelist */
+ pthread_mutex_t *chain = &m->hashchains[i+1];
+
+ ret = chain_mutex_lock(chain, waitflag);
+ if (!waitflag && (ret == EBUSY)) {
+ errno = EAGAIN;
+ goto fail_unroll_allrecord_lock;
+ }
+ if (ret != 0) {
+ if (!(flags & TDB_LOCK_PROBE)) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,
+ "chain_mutex_lock() failed: %s\n",
+ strerror(ret)));
+ }
+ errno = ret;
+ goto fail_unroll_allrecord_lock;
+ }
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(chainlock) failed: %s\n", strerror(ret)));
+ errno = ret;
+ goto fail_unroll_allrecord_lock;
+ }
+ }
+ /*
+ * We leave this routine with m->allrecord_mutex locked
+ */
+ return 0;
+
+fail_unroll_allrecord_lock:
+ m->allrecord_lock = F_UNLCK;
+
+fail_unlock_allrecord_mutex:
+ saved_errno = errno;
+ ret = pthread_mutex_unlock(&m->allrecord_mutex);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(allrecord_mutex) failed: %s\n", strerror(ret)));
+ }
+ errno = saved_errno;
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ int ret;
+ uint32_t i;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ /*
+ * Our only caller tdb_allrecord_upgrade()
+ * guarantees that we already own the allrecord lock.
+ *
+ * Which means m->allrecord_mutex is still locked by us.
+ */
+
+ if (m->allrecord_lock != F_RDLCK) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ return -1;
+ }
+
+ m->allrecord_lock = F_WRLCK;
+
+ for (i=0; i<tdb->hash_size; i++) {
+
+ /* ignore hashchains[0], the freelist */
+ pthread_mutex_t *chain = &m->hashchains[i+1];
+
+ ret = chain_mutex_lock(chain, true);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_lock"
+ "(chainlock) failed: %s\n", strerror(ret)));
+ goto fail_unroll_allrecord_lock;
+ }
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(chainlock) failed: %s\n", strerror(ret)));
+ goto fail_unroll_allrecord_lock;
+ }
+ }
+
+ return 0;
+
+fail_unroll_allrecord_lock:
+ m->allrecord_lock = F_RDLCK;
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+
+ /*
+ * Our only caller tdb_allrecord_upgrade() (in the error case)
+ * guarantees that we already own the allrecord lock.
+ *
+ * Which means m->allrecord_mutex is still locked by us.
+ */
+
+ if (m->allrecord_lock != F_WRLCK) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ return;
+ }
+
+ m->allrecord_lock = F_RDLCK;
+ return;
+}
+
+
+int tdb_mutex_allrecord_unlock(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ short old;
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ /*
+ * Our only callers tdb_allrecord_unlock() and
+ * tdb_allrecord_lock() (in the error path)
+ * guarantee that we already own the allrecord lock.
+ *
+ * Which means m->allrecord_mutex is still locked by us.
+ */
+
+ if ((m->allrecord_lock != F_RDLCK) && (m->allrecord_lock != F_WRLCK)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ return -1;
+ }
+
+ old = m->allrecord_lock;
+ m->allrecord_lock = F_UNLCK;
+
+ ret = pthread_mutex_unlock(&m->allrecord_mutex);
+ if (ret != 0) {
+ m->allrecord_lock = old;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(allrecord_mutex) failed: %s\n", strerror(ret)));
+ return -1;
+ }
+ return 0;
+}
+
+int tdb_mutex_init(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m;
+ pthread_mutexattr_t ma;
+ uint32_t i;
+ int ret;
+
+ ret = tdb_mutex_mmap(tdb);
+ if (ret == -1) {
+ return -1;
+ }
+ m = tdb->mutexes;
+
+ ret = pthread_mutexattr_init(&ma);
+ if (ret != 0) {
+ goto fail_munmap;
+ }
+ ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) {
+ goto fail;
+ }
+ ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED);
+ if (ret != 0) {
+ goto fail;
+ }
+ ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
+ if (ret != 0) {
+ goto fail;
+ }
+
+ for (i=0; i<tdb->hash_size+1; i++) {
+ pthread_mutex_t *chain = &m->hashchains[i];
+
+ ret = pthread_mutex_init(chain, &ma);
+ if (ret != 0) {
+ goto fail;
+ }
+ }
+
+ m->allrecord_lock = F_UNLCK;
+
+ ret = pthread_mutex_init(&m->allrecord_mutex, &ma);
+ if (ret != 0) {
+ goto fail;
+ }
+ ret = 0;
+fail:
+ pthread_mutexattr_destroy(&ma);
+fail_munmap:
+
+ if (ret == 0) {
+ return 0;
+ }
+
+ tdb_mutex_munmap(tdb);
+
+ errno = ret;
+ return -1;
+}
+
+int tdb_mutex_mmap(struct tdb_context *tdb)
+{
+ size_t len;
+ void *ptr;
+
+ len = tdb_mutex_size(tdb);
+ if (len == 0) {
+ return 0;
+ }
+
+ if (tdb->mutexes != NULL) {
+ return 0;
+ }
+
+ ptr = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FILE,
+ tdb->fd, 0);
+ if (ptr == MAP_FAILED) {
+ return -1;
+ }
+ tdb->mutexes = (struct tdb_mutexes *)ptr;
+
+ return 0;
+}
+
+int tdb_mutex_munmap(struct tdb_context *tdb)
+{
+ size_t len;
+ int ret;
+
+ len = tdb_mutex_size(tdb);
+ if (len == 0) {
+ return 0;
+ }
+
+ ret = munmap(tdb->mutexes, len);
+ if (ret == -1) {
+ return -1;
+ }
+ tdb->mutexes = NULL;
+
+ return 0;
+}
+
+static bool tdb_mutex_locking_cached;
+
+static bool tdb_mutex_locking_supported(void)
+{
+ pthread_mutexattr_t ma;
+ pthread_mutex_t m;
+ int ret;
+ static bool initialized;
+
+ if (initialized) {
+ return tdb_mutex_locking_cached;
+ }
+
+ initialized = true;
+
+ ret = pthread_mutexattr_init(&ma);
+ if (ret != 0) {
+ return false;
+ }
+ ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutex_init(&m, &ma);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutex_lock(&m);
+ if (ret != 0) {
+ goto cleanup_m;
+ }
+ /*
+ * This makes sure we have real mutexes
+ * from a threading library instead of just
+ * stubs from libc.
+ */
+ ret = pthread_mutex_lock(&m);
+ if (ret != EDEADLK) {
+ goto cleanup_lock;
+ }
+ ret = pthread_mutex_unlock(&m);
+ if (ret != 0) {
+ goto cleanup_m;
+ }
+
+ tdb_mutex_locking_cached = true;
+ goto cleanup_m;
+
+cleanup_lock:
+ pthread_mutex_unlock(&m);
+cleanup_m:
+ pthread_mutex_destroy(&m);
+cleanup_ma:
+ pthread_mutexattr_destroy(&ma);
+ return tdb_mutex_locking_cached;
+}
+
+static void (*tdb_robust_mutext_old_handler)(int) = SIG_ERR;
+static pid_t tdb_robust_mutex_pid = -1;
+
+static bool tdb_robust_mutex_setup_sigchild(void (*handler)(int),
+ void (**p_old_handler)(int))
+{
+#ifdef HAVE_SIGACTION
+ struct sigaction act;
+ struct sigaction oldact;
+
+ memset(&act, '\0', sizeof(act));
+
+ act.sa_handler = handler;
+#ifdef SA_RESTART
+ act.sa_flags = SA_RESTART;
+#endif
+ sigemptyset(&act.sa_mask);
+ sigaddset(&act.sa_mask, SIGCHLD);
+ sigaction(SIGCHLD, &act, &oldact);
+ if (p_old_handler) {
+ *p_old_handler = oldact.sa_handler;
+ }
+ return true;
+#else /* !HAVE_SIGACTION */
+ return false;
+#endif
+}
+
+static void tdb_robust_mutex_handler(int sig)
+{
+ pid_t child_pid = tdb_robust_mutex_pid;
+
+ if (child_pid != -1) {
+ pid_t pid;
+
+ pid = waitpid(child_pid, NULL, WNOHANG);
+ if (pid == -1) {
+ switch (errno) {
+ case ECHILD:
+ tdb_robust_mutex_pid = -1;
+ return;
+
+ default:
+ return;
+ }
+ }
+ if (pid == child_pid) {
+ tdb_robust_mutex_pid = -1;
+ return;
+ }
+ }
+
+ if (tdb_robust_mutext_old_handler == SIG_DFL) {
+ return;
+ }
+ if (tdb_robust_mutext_old_handler == SIG_IGN) {
+ return;
+ }
+ if (tdb_robust_mutext_old_handler == SIG_ERR) {
+ return;
+ }
+
+ tdb_robust_mutext_old_handler(sig);
+}
+
+static void tdb_robust_mutex_wait_for_child(pid_t *child_pid)
+{
+ int options = WNOHANG;
+
+ if (*child_pid == -1) {
+ return;
+ }
+
+ while (tdb_robust_mutex_pid > 0) {
+ pid_t pid;
+
+ /*
+ * First we try with WNOHANG, as the process might not exist
+ * anymore. Once we've sent SIGKILL we block waiting for the
+ * exit.
+ */
+ pid = waitpid(*child_pid, NULL, options);
+ if (pid == -1) {
+ if (errno == EINTR) {
+ continue;
+ } else if (errno == ECHILD) {
+ break;
+ } else {
+ abort();
+ }
+ }
+ if (pid == *child_pid) {
+ break;
+ }
+
+ kill(*child_pid, SIGKILL);
+ options = 0;
+ }
+
+ tdb_robust_mutex_pid = -1;
+ *child_pid = -1;
+}
+
+_PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void)
+{
+ void *ptr = NULL;
+ pthread_mutex_t *m = NULL;
+ pthread_mutexattr_t ma;
+ int ret = 1;
+ int pipe_down[2] = { -1, -1 };
+ int pipe_up[2] = { -1, -1 };
+ ssize_t nread;
+ char c = 0;
+ bool ok;
+ static bool initialized;
+ pid_t saved_child_pid = -1;
+ bool cleanup_ma = false;
+
+ if (initialized) {
+ return tdb_mutex_locking_cached;
+ }
+
+ initialized = true;
+
+ ok = tdb_mutex_locking_supported();
+ if (!ok) {
+ return false;
+ }
+
+ tdb_mutex_locking_cached = false;
+
+ ptr = mmap(NULL, sizeof(pthread_mutex_t), PROT_READ|PROT_WRITE,
+ MAP_SHARED|MAP_ANON, -1 /* fd */, 0);
+ if (ptr == MAP_FAILED) {
+ return false;
+ }
+
+ ret = pipe(pipe_down);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ ret = pipe(pipe_up);
+ if (ret != 0) {
+ goto cleanup;
+ }
+
+ ret = pthread_mutexattr_init(&ma);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ cleanup_ma = true;
+ ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ ret = pthread_mutex_init(ptr, &ma);
+ if (ret != 0) {
+ goto cleanup;
+ }
+ m = (pthread_mutex_t *)ptr;
+
+ if (tdb_robust_mutex_setup_sigchild(tdb_robust_mutex_handler,
+ &tdb_robust_mutext_old_handler) == false) {
+ goto cleanup;
+ }
+
+ tdb_robust_mutex_pid = fork();
+ saved_child_pid = tdb_robust_mutex_pid;
+ if (tdb_robust_mutex_pid == 0) {
+ size_t nwritten;
+ close(pipe_down[1]);
+ close(pipe_up[0]);
+ ret = pthread_mutex_lock(m);
+ nwritten = write(pipe_up[1], &ret, sizeof(ret));
+ if (nwritten != sizeof(ret)) {
+ _exit(1);
+ }
+ if (ret != 0) {
+ _exit(1);
+ }
+ nread = read(pipe_down[0], &c, 1);
+ if (nread != 1) {
+ _exit(1);
+ }
+ /* leave locked */
+ _exit(0);
+ }
+ if (tdb_robust_mutex_pid == -1) {
+ goto cleanup;
+ }
+ close(pipe_down[0]);
+ pipe_down[0] = -1;
+ close(pipe_up[1]);
+ pipe_up[1] = -1;
+
+ nread = read(pipe_up[0], &ret, sizeof(ret));
+ if (nread != sizeof(ret)) {
+ goto cleanup;
+ }
+
+ ret = pthread_mutex_trylock(m);
+ if (ret != EBUSY) {
+ if (ret == 0) {
+ pthread_mutex_unlock(m);
+ }
+ goto cleanup;
+ }
+
+ if (write(pipe_down[1], &c, 1) != 1) {
+ goto cleanup;
+ }
+
+ nread = read(pipe_up[0], &c, 1);
+ if (nread != 0) {
+ goto cleanup;
+ }
+
+ tdb_robust_mutex_wait_for_child(&saved_child_pid);
+
+ ret = pthread_mutex_trylock(m);
+ if (ret != EOWNERDEAD) {
+ if (ret == 0) {
+ pthread_mutex_unlock(m);
+ }
+ goto cleanup;
+ }
+
+ ret = pthread_mutex_consistent(m);
+ if (ret != 0) {
+ goto cleanup;
+ }
+
+ ret = pthread_mutex_trylock(m);
+ if (ret != EDEADLK && ret != EBUSY) {
+ pthread_mutex_unlock(m);
+ goto cleanup;
+ }
+
+ ret = pthread_mutex_unlock(m);
+ if (ret != 0) {
+ goto cleanup;
+ }
+
+ tdb_mutex_locking_cached = true;
+
+cleanup:
+ /*
+ * Note that we don't reset the signal handler we just reset
+ * tdb_robust_mutex_pid to -1. This is ok as this code path is only
+ * called once per process.
+ *
+ * Leaving our signal handler avoids races with other threads potentially
+ * setting up their SIGCHLD handlers.
+ *
+ * The worst thing that can happen is that the other newer signal
+ * handler will get the SIGCHLD signal for our child and/or reap the
+ * child with a wait() function. tdb_robust_mutex_wait_for_child()
+ * handles the case where waitpid returns ECHILD.
+ */
+ tdb_robust_mutex_wait_for_child(&saved_child_pid);
+
+ if (m != NULL) {
+ pthread_mutex_destroy(m);
+ }
+ if (cleanup_ma) {
+ pthread_mutexattr_destroy(&ma);
+ }
+ if (pipe_down[0] != -1) {
+ close(pipe_down[0]);
+ }
+ if (pipe_down[1] != -1) {
+ close(pipe_down[1]);
+ }
+ if (pipe_up[0] != -1) {
+ close(pipe_up[0]);
+ }
+ if (pipe_up[1] != -1) {
+ close(pipe_up[1]);
+ }
+ if (ptr != NULL) {
+ munmap(ptr, sizeof(pthread_mutex_t));
+ }
+
+ return tdb_mutex_locking_cached;
+}
+
+#else
+
+size_t tdb_mutex_size(struct tdb_context *tdb)
+{
+ return 0;
+}
+
+bool tdb_have_mutexes(struct tdb_context *tdb)
+{
+ return false;
+}
+
+int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags)
+{
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+int tdb_mutex_allrecord_unlock(struct tdb_context *tdb)
+{
+ return -1;
+}
+
+int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb)
+{
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb)
+{
+ return;
+}
+
+int tdb_mutex_mmap(struct tdb_context *tdb)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int tdb_mutex_munmap(struct tdb_context *tdb)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int tdb_mutex_init(struct tdb_context *tdb)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+_PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void)
+{
+ return false;
+}
+
+#endif
diff --git a/lib/tdb/common/open.c b/lib/tdb/common/open.c
new file mode 100644
index 0000000..3fa7ce1
--- /dev/null
+++ b/lib/tdb/common/open.c
@@ -0,0 +1,968 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
+static struct tdb_context *tdbs = NULL;
+
+/* We use two hashes to double-check they're using the right hash function. */
+void tdb_header_hash(struct tdb_context *tdb,
+ uint32_t *magic1_hash, uint32_t *magic2_hash)
+{
+ TDB_DATA hash_key;
+ uint32_t tdb_magic = TDB_MAGIC;
+
+ hash_key.dptr = discard_const_p(unsigned char, TDB_MAGIC_FOOD);
+ hash_key.dsize = sizeof(TDB_MAGIC_FOOD);
+ *magic1_hash = tdb->hash_fn(&hash_key);
+
+ hash_key.dptr = (unsigned char *)CONVERT(tdb_magic);
+ hash_key.dsize = sizeof(tdb_magic);
+ *magic2_hash = tdb->hash_fn(&hash_key);
+
+ /* Make sure at least one hash is non-zero! */
+ if (*magic1_hash == 0 && *magic2_hash == 0)
+ *magic1_hash = 1;
+}
+
+/* initialise a new database with a specified hash size */
+static int tdb_new_database(struct tdb_context *tdb, struct tdb_header *header,
+ int hash_size)
+{
+ struct tdb_header *newdb;
+ size_t size;
+ int ret = -1;
+
+ /* We make it up in memory, then write it out if not internal */
+ size = sizeof(struct tdb_header) + (hash_size+1)*sizeof(tdb_off_t);
+ if (!(newdb = (struct tdb_header *)calloc(size, 1))) {
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ /* Fill in the header */
+ newdb->version = TDB_VERSION;
+ newdb->hash_size = hash_size;
+
+ tdb_header_hash(tdb, &newdb->magic1_hash, &newdb->magic2_hash);
+
+ /* Make sure older tdbs (which don't check the magic hash fields)
+ * will refuse to open this TDB. */
+ if (tdb->flags & TDB_INCOMPATIBLE_HASH)
+ newdb->rwlocks = TDB_HASH_RWLOCK_MAGIC;
+
+ /*
+ * We create a tdb with TDB_FEATURE_FLAG_MUTEX support,
+ * the flag combination and runtime feature checks
+ * are done by the caller already.
+ */
+ if (tdb->flags & TDB_MUTEX_LOCKING) {
+ newdb->feature_flags |= TDB_FEATURE_FLAG_MUTEX;
+ }
+
+ /*
+ * If we have any features we add the FEATURE_FLAG_MAGIC, overwriting the
+ * TDB_HASH_RWLOCK_MAGIC above.
+ */
+ if (newdb->feature_flags != 0) {
+ newdb->rwlocks = TDB_FEATURE_FLAG_MAGIC;
+ }
+
+ /*
+ * It's required for some following code paths
+ * to have the fields on 'tdb' up-to-date.
+ *
+ * E.g. tdb_mutex_size() requires it
+ */
+ tdb->feature_flags = newdb->feature_flags;
+ tdb->hash_size = newdb->hash_size;
+
+ if (tdb->flags & TDB_INTERNAL) {
+ tdb->map_size = size;
+ tdb->map_ptr = (char *)newdb;
+ memcpy(header, newdb, sizeof(*header));
+ /* Convert the `ondisk' version if asked. */
+ CONVERT(*newdb);
+ return 0;
+ }
+ if (lseek(tdb->fd, 0, SEEK_SET) == -1)
+ goto fail;
+
+ if (ftruncate(tdb->fd, 0) == -1)
+ goto fail;
+
+ if (newdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+ newdb->mutex_size = tdb_mutex_size(tdb);
+ tdb->hdr_ofs = newdb->mutex_size;
+ }
+
+ /* This creates an endian-converted header, as if read from disk */
+ CONVERT(*newdb);
+ memcpy(header, newdb, sizeof(*header));
+ /* Don't endian-convert the magic food! */
+ memcpy(newdb->magic_food, TDB_MAGIC_FOOD, strlen(TDB_MAGIC_FOOD)+1);
+
+ if (!tdb_write_all(tdb->fd, newdb, size))
+ goto fail;
+
+ if (newdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+
+ /*
+ * Now we init the mutex area
+ * followed by a second header.
+ */
+
+ ret = ftruncate(
+ tdb->fd,
+ newdb->mutex_size + sizeof(struct tdb_header));
+ if (ret == -1) {
+ goto fail;
+ }
+ ret = tdb_mutex_init(tdb);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ /*
+ * Write a second header behind the mutexes. That's the area
+ * that will be mmapp'ed.
+ */
+ ret = lseek(tdb->fd, newdb->mutex_size, SEEK_SET);
+ if (ret == -1) {
+ goto fail;
+ }
+ if (!tdb_write_all(tdb->fd, newdb, size)) {
+ goto fail;
+ }
+ }
+
+ ret = 0;
+ fail:
+ SAFE_FREE(newdb);
+ return ret;
+}
+
+
+
+static int tdb_already_open(dev_t device,
+ ino_t ino)
+{
+ struct tdb_context *i;
+
+ for (i = tdbs; i; i = i->next) {
+ if (i->device == device && i->inode == ino) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* open the database, creating it if necessary
+
+ The open_flags and mode are passed straight to the open call on the
+ database file. A flags value of O_WRONLY is invalid. The hash size
+ is advisory, use zero for a default value.
+
+ Return is NULL on error, in which case errno is also set. Don't
+ try to call tdb_error or tdb_errname, just do strerror(errno).
+
+ @param name may be NULL for internal databases. */
+_PUBLIC_ struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode)
+{
+ return tdb_open_ex(name, hash_size, tdb_flags, open_flags, mode, NULL, NULL);
+}
+
+/* a default logging function */
+static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) PRINTF_ATTRIBUTE(3, 4);
+static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...)
+{
+}
+
+static bool check_header_hash(struct tdb_context *tdb,
+ struct tdb_header *header,
+ bool default_hash, uint32_t *m1, uint32_t *m2)
+{
+ tdb_header_hash(tdb, m1, m2);
+ if (header->magic1_hash == *m1 &&
+ header->magic2_hash == *m2) {
+ return true;
+ }
+
+ /* If they explicitly set a hash, always respect it. */
+ if (!default_hash)
+ return false;
+
+ /* Otherwise, try the other inbuilt hash. */
+ if (tdb->hash_fn == tdb_old_hash)
+ tdb->hash_fn = tdb_jenkins_hash;
+ else
+ tdb->hash_fn = tdb_old_hash;
+ return check_header_hash(tdb, header, false, m1, m2);
+}
+
+static bool tdb_mutex_open_ok(struct tdb_context *tdb,
+ const struct tdb_header *header)
+{
+ if (tdb->flags & TDB_NOLOCK) {
+ /*
+ * We don't look at locks, so it does not matter to have a
+ * compatible mutex implementation. Allow the open.
+ */
+ return true;
+ }
+
+ if (!(tdb->flags & TDB_MUTEX_LOCKING)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok[%s]: "
+ "Can use mutexes only with "
+ "MUTEX_LOCKING or NOLOCK\n",
+ tdb->name));
+ return false;
+ }
+
+ if (tdb_mutex_size(tdb) != header->mutex_size) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok[%s]: "
+ "Mutex size changed from %"PRIu32" to %zu\n.",
+ tdb->name,
+ header->mutex_size,
+ tdb_mutex_size(tdb)));
+ return false;
+ }
+
+ return true;
+}
+
+_PUBLIC_ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ const struct tdb_logging_context *log_ctx,
+ tdb_hash_func hash_fn)
+{
+ int orig_errno = errno;
+ struct tdb_header header = {
+ .version = 0,
+ };
+ struct tdb_context *tdb;
+ struct stat st;
+ int rev = 0;
+ bool locked = false;
+ unsigned char *vp;
+ uint32_t vertest;
+ unsigned v;
+ const char *hash_alg;
+ uint32_t magic1, magic2;
+ int ret;
+
+ if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
+ /* Can't log this */
+ errno = ENOMEM;
+ goto fail;
+ }
+ tdb_io_init(tdb);
+
+ if (tdb_flags & TDB_INTERNAL) {
+ tdb_flags |= TDB_INCOMPATIBLE_HASH;
+ }
+ if (tdb_flags & TDB_MUTEX_LOCKING) {
+ tdb_flags |= TDB_INCOMPATIBLE_HASH;
+ }
+
+ tdb->fd = -1;
+#ifdef TDB_TRACE
+ tdb->tracefd = -1;
+#endif
+ tdb->name = NULL;
+ tdb->map_ptr = NULL;
+ tdb->flags = tdb_flags;
+ tdb->open_flags = open_flags;
+ if (log_ctx) {
+ tdb->log = *log_ctx;
+ } else {
+ tdb->log.log_fn = null_log_fn;
+ tdb->log.log_private = NULL;
+ }
+
+ if (name == NULL && (tdb_flags & TDB_INTERNAL)) {
+ name = "__TDB_INTERNAL__";
+ }
+
+ if (name == NULL) {
+ tdb->name = discard_const_p(char, "__NULL__");
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: called with name == NULL\n"));
+ tdb->name = NULL;
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /* now make a copy of the name, as the caller memory might go away */
+ if (!(tdb->name = (char *)strdup(name))) {
+ /*
+ * set the name as the given string, so that tdb_name() will
+ * work in case of an error.
+ */
+ tdb->name = discard_const_p(char, name);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't strdup(%s)\n",
+ name));
+ tdb->name = NULL;
+ errno = ENOMEM;
+ goto fail;
+ }
+
+ if (hash_fn) {
+ tdb->hash_fn = hash_fn;
+ hash_alg = "the user defined";
+ } else {
+ /* This controls what we use when creating a tdb. */
+ if (tdb->flags & TDB_INCOMPATIBLE_HASH) {
+ tdb->hash_fn = tdb_jenkins_hash;
+ } else {
+ tdb->hash_fn = tdb_old_hash;
+ }
+ hash_alg = "either default";
+ }
+
+ /* cache the page size */
+ tdb->page_size = getpagesize();
+ if (tdb->page_size <= 0) {
+ tdb->page_size = 0x2000;
+ }
+
+ tdb->max_dead_records = (tdb_flags & TDB_VOLATILE) ? 5 : 0;
+
+ if ((open_flags & O_ACCMODE) == O_WRONLY) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't open tdb %s write-only\n",
+ name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (hash_size == 0)
+ hash_size = DEFAULT_HASH_SIZE;
+ if ((open_flags & O_ACCMODE) == O_RDONLY) {
+ tdb->read_only = 1;
+ /* read only databases don't do locking or clear if first */
+ tdb->flags |= TDB_NOLOCK;
+ tdb->flags &= ~(TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING);
+ }
+
+ if ((tdb->flags & TDB_ALLOW_NESTING) &&
+ (tdb->flags & TDB_DISALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "allow_nesting and disallow_nesting are not allowed together!"));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->flags & TDB_MUTEX_LOCKING) {
+ /*
+ * Here we catch bugs in the callers,
+ * the runtime check for existing tdb's comes later.
+ */
+
+ if (tdb->flags & TDB_INTERNAL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING and "
+ "TDB_INTERNAL are not allowed together\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->flags & TDB_NOMMAP) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING and "
+ "TDB_NOMMAP are not allowed together\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->read_only) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING "
+ "not allowed read only\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /*
+ * The callers should have called
+ * tdb_runtime_check_for_robust_mutexes()
+ * before using TDB_MUTEX_LOCKING!
+ *
+ * This makes sure the caller understands
+ * that the locking may behave a bit differently
+ * than with pure fcntl locking. E.g. multiple
+ * read locks are not supported.
+ */
+ if (!tdb_runtime_check_for_robust_mutexes()) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING "
+ "requires support for robust_mutexes\n",
+ name));
+ errno = ENOSYS;
+ goto fail;
+ }
+ }
+
+ if (getenv("TDB_NO_FSYNC")) {
+ tdb->flags |= TDB_NOSYNC;
+ }
+
+ /*
+ * TDB_ALLOW_NESTING is the default behavior.
+ * Note: this may change in future versions!
+ */
+ if (!(tdb->flags & TDB_DISALLOW_NESTING)) {
+ tdb->flags |= TDB_ALLOW_NESTING;
+ }
+
+ /* internal databases don't mmap or lock, and start off cleared */
+ if (tdb->flags & TDB_INTERNAL) {
+ tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
+ tdb->flags &= ~TDB_CLEAR_IF_FIRST;
+ if (tdb_new_database(tdb, &header, hash_size) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: tdb_new_database failed!"));
+ goto fail;
+ }
+ tdb->hash_size = hash_size;
+ goto internal;
+ }
+
+ if ((tdb->fd = open(name, open_flags, mode)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_open_ex: could not open file %s: %s\n",
+ name, strerror(errno)));
+ goto fail; /* errno set by open(2) */
+ }
+
+ /* on exec, don't inherit the fd */
+ v = fcntl(tdb->fd, F_GETFD, 0);
+ fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
+
+ /* ensure there is only one process initialising at once */
+ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to get open lock on %s: %s\n",
+ name, strerror(errno)));
+ goto fail; /* errno set by tdb_brlock */
+ }
+
+ /* we need to zero database if we are the only one with it open */
+ if ((tdb_flags & TDB_CLEAR_IF_FIRST) &&
+ (!tdb->read_only)) {
+ ret = tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK,
+ TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+ locked = (ret == 0);
+
+ if (locked) {
+ ret = tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0,
+ TDB_LOCK_WAIT);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "tdb_brlock failed for %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ ret = tdb_new_database(tdb, &header, hash_size);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "tdb_new_database failed for "
+ "%s: %s\n", name, strerror(errno)));
+ tdb_unlockall(tdb);
+ goto fail;
+ }
+ ret = tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "tdb_unlockall failed for %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ ret = lseek(tdb->fd, 0, SEEK_SET);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "lseek failed for %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ }
+ }
+
+ errno = 0;
+ if (read(tdb->fd, &header, sizeof(header)) != sizeof(header)
+ /*
+ * Call strncmp() rather than strcmp() in case header.magic_food is
+ * not zero‐terminated. We’re still checking the full string for
+ * equality, as tdb_header::magic_food is larger than
+ * TDB_MAGIC_FOOD.
+ */
+ || strncmp(header.magic_food, TDB_MAGIC_FOOD, sizeof(header.magic_food)) != 0) {
+ if (!(open_flags & O_CREAT) ||
+ tdb_new_database(tdb, &header, hash_size) == -1) {
+ if (errno == 0) {
+ errno = EIO; /* ie bad format or something */
+ }
+ goto fail;
+ }
+ rev = (tdb->flags & TDB_CONVERT);
+ } else if (header.version != TDB_VERSION
+ && !(rev = (header.version==TDB_BYTEREV(TDB_VERSION)))) {
+ /* wrong version */
+ errno = EIO;
+ goto fail;
+ }
+ vp = (unsigned char *)&header.version;
+ vertest = (((uint32_t)vp[0]) << 24) | (((uint32_t)vp[1]) << 16) |
+ (((uint32_t)vp[2]) << 8) | (uint32_t)vp[3];
+ tdb->flags |= (vertest==TDB_VERSION) ? TDB_BIGENDIAN : 0;
+ if (!rev)
+ tdb->flags &= ~TDB_CONVERT;
+ else {
+ tdb->flags |= TDB_CONVERT;
+ tdb_convert(&header, sizeof(header));
+ }
+
+ /*
+ * We only use st.st_dev and st.st_ino from the raw fstat()
+ * call, everything else needs to use tdb_fstat() in order
+ * to skip tdb->hdr_ofs!
+ */
+ if (fstat(tdb->fd, &st) == -1) {
+ goto fail;
+ }
+ tdb->device = st.st_dev;
+ tdb->inode = st.st_ino;
+ ZERO_STRUCT(st);
+
+ if (header.rwlocks != 0 &&
+ header.rwlocks != TDB_FEATURE_FLAG_MAGIC &&
+ header.rwlocks != TDB_HASH_RWLOCK_MAGIC) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: spinlocks no longer supported\n"));
+ errno = ENOSYS;
+ goto fail;
+ }
+
+ if (header.hash_size == 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: invalid database: 0 hash_size\n"));
+ errno = ENOSYS;
+ goto fail;
+ }
+
+ tdb->hash_size = header.hash_size;
+
+ if (header.rwlocks == TDB_FEATURE_FLAG_MAGIC) {
+ tdb->feature_flags = header.feature_flags;
+ }
+
+ if (tdb->feature_flags & ~TDB_SUPPORTED_FEATURE_FLAGS) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: unsupported "
+ "features in tdb %s: 0x%08x (supported: 0x%08x)\n",
+ name, (unsigned)tdb->feature_flags,
+ (unsigned)TDB_SUPPORTED_FEATURE_FLAGS));
+ errno = ENOSYS;
+ goto fail;
+ }
+
+ if (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+ if (!tdb_mutex_open_ok(tdb, &header)) {
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /*
+ * We need to remember the hdr_ofs
+ * also for the TDB_NOLOCK case
+ * if the current library doesn't support
+ * mutex locking.
+ */
+ tdb->hdr_ofs = header.mutex_size;
+
+ if ((!(tdb_flags & TDB_CLEAR_IF_FIRST)) && (!tdb->read_only)) {
+ /*
+ * Open an existing mutexed tdb, but without
+ * CLEAR_IF_FIRST. We need to initialize the
+ * mutex array and keep the CLEAR_IF_FIRST
+ * lock locked.
+ */
+ ret = tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK,
+ TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+ locked = (ret == 0);
+
+ if (locked) {
+ ret = tdb_mutex_init(tdb);
+ if (ret == -1) {
+ TDB_LOG((tdb,
+ TDB_DEBUG_FATAL,
+ "tdb_open_ex: tdb_mutex_init "
+ "failed for ""%s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ }
+ }
+ }
+
+ if ((header.magic1_hash == 0) && (header.magic2_hash == 0)) {
+ /* older TDB without magic hash references */
+ tdb->hash_fn = tdb_old_hash;
+ } else if (!check_header_hash(tdb, &header, !hash_fn,
+ &magic1, &magic2)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "%s was not created with %s hash function we are using\n"
+ "magic1_hash[0x%08X %s 0x%08X] "
+ "magic2_hash[0x%08X %s 0x%08X]\n",
+ name, hash_alg,
+ header.magic1_hash,
+ (header.magic1_hash == magic1) ? "==" : "!=",
+ magic1,
+ header.magic2_hash,
+ (header.magic2_hash == magic2) ? "==" : "!=",
+ magic2));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /* Is it already in the open list? If so, fail. */
+ if (tdb_already_open(tdb->device, tdb->inode)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "%s (%d,%d) is already open in this process\n",
+ name, (int)tdb->device, (int)tdb->inode));
+ errno = EBUSY;
+ goto fail;
+ }
+
+ /*
+ * We had tdb_mmap(tdb) here before,
+ * but we need to use tdb_fstat(),
+ * which is triggered from tdb_oob() before calling tdb_mmap().
+ * As this skips tdb->hdr_ofs.
+ */
+ tdb->map_size = 0;
+ ret = tdb_oob(tdb, 0, 1, 0);
+ if (ret == -1) {
+ errno = EIO;
+ goto fail;
+ }
+
+ if (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+ if (!(tdb->flags & TDB_NOLOCK)) {
+ ret = tdb_mutex_mmap(tdb);
+ if (ret != 0) {
+ goto fail;
+ }
+ }
+ }
+
+ if (tdb->hash_size > UINT32_MAX/4) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "hash size %"PRIu32" too large\n", tdb->hash_size));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ ret = tdb_oob(tdb, FREELIST_TOP, 4*tdb->hash_size, 1);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "hash size %"PRIu32" does not fit\n", tdb->hash_size));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (locked) {
+ if (tdb_nest_unlock(tdb, ACTIVE_LOCK, F_WRLCK, false) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "failed to release ACTIVE_LOCK on %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+
+
+ }
+
+ if (locked || (tdb_flags & TDB_CLEAR_IF_FIRST)) {
+ /*
+ * We always need to do this if the CLEAR_IF_FIRST
+ * flag is set, even if we didn't get the initial
+ * exclusive lock as we need to let all other users
+ * know we're using it.
+ */
+
+ ret = tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT);
+ if (ret == -1) {
+ goto fail;
+ }
+ }
+
+ /* if needed, run recovery */
+ if (tdb_transaction_recover(tdb) == -1) {
+ goto fail;
+ }
+
+#ifdef TDB_TRACE
+ {
+ char tracefile[strlen(name) + 32];
+
+ snprintf(tracefile, sizeof(tracefile),
+ "%s.trace.%li", name, (long)getpid());
+ tdb->tracefd = open(tracefile, O_WRONLY|O_CREAT|O_EXCL, 0600);
+ if (tdb->tracefd >= 0) {
+ tdb_enable_seqnum(tdb);
+ tdb_trace_open(tdb, "tdb_open", hash_size, tdb_flags,
+ open_flags);
+ } else
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to open trace file %s!\n", tracefile));
+ }
+#endif
+
+ internal:
+ /* Internal (memory-only) databases skip all the code above to
+ * do with disk files, and resume here by releasing their
+ * open lock and hooking into the active list. */
+ if (tdb_nest_unlock(tdb, OPEN_LOCK, F_WRLCK, false) == -1) {
+ goto fail;
+ }
+ tdb->next = tdbs;
+ tdbs = tdb;
+ errno = orig_errno;
+ return tdb;
+
+ fail:
+ { int save_errno = errno;
+
+ if (!tdb)
+ return NULL;
+
+#ifdef TDB_TRACE
+ close(tdb->tracefd);
+#endif
+ if (tdb->map_ptr) {
+ if (tdb->flags & TDB_INTERNAL)
+ SAFE_FREE(tdb->map_ptr);
+ else
+ tdb_munmap(tdb);
+ }
+ if (tdb->fd != -1)
+ if (close(tdb->fd) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to close tdb->fd on error!\n"));
+ SAFE_FREE(tdb->lockrecs);
+ SAFE_FREE(tdb->name);
+ SAFE_FREE(tdb);
+ errno = save_errno;
+ return NULL;
+ }
+}
+
+/*
+ * Set the maximum number of dead records per hash chain
+ */
+
+_PUBLIC_ void tdb_set_max_dead(struct tdb_context *tdb, int max_dead)
+{
+ tdb->max_dead_records = max_dead;
+}
+
+/**
+ * Close a database.
+ *
+ * @returns -1 for error; 0 for success.
+ **/
+_PUBLIC_ int tdb_close(struct tdb_context *tdb)
+{
+ struct tdb_context **i;
+ int ret = 0;
+
+ if (tdb->transaction) {
+ tdb_transaction_cancel(tdb);
+ }
+ tdb_trace(tdb, "tdb_close");
+
+ if (tdb->map_ptr) {
+ if (tdb->flags & TDB_INTERNAL)
+ SAFE_FREE(tdb->map_ptr);
+ else
+ tdb_munmap(tdb);
+ }
+
+ tdb_mutex_munmap(tdb);
+
+ SAFE_FREE(tdb->name);
+ if (tdb->fd != -1) {
+ ret = close(tdb->fd);
+ tdb->fd = -1;
+ }
+ SAFE_FREE(tdb->lockrecs);
+
+ /* Remove from contexts list */
+ for (i = &tdbs; *i; i = &(*i)->next) {
+ if (*i == tdb) {
+ *i = tdb->next;
+ break;
+ }
+ }
+
+#ifdef TDB_TRACE
+ close(tdb->tracefd);
+#endif
+ memset(tdb, 0, sizeof(*tdb));
+ SAFE_FREE(tdb);
+
+ return ret;
+}
+
+/* register a logging function */
+_PUBLIC_ void tdb_set_logging_function(struct tdb_context *tdb,
+ const struct tdb_logging_context *log_ctx)
+{
+ tdb->log = *log_ctx;
+}
+
+_PUBLIC_ void *tdb_get_logging_private(struct tdb_context *tdb)
+{
+ return tdb->log.log_private;
+}
+
+static int tdb_reopen_internal(struct tdb_context *tdb, bool active_lock)
+{
+#if !defined(LIBREPLACE_PREAD_NOT_REPLACED) || \
+ !defined(LIBREPLACE_PWRITE_NOT_REPLACED)
+ struct stat st;
+#endif
+
+ if (tdb->flags & TDB_INTERNAL) {
+ return 0; /* Nothing to do. */
+ }
+
+ if (tdb_have_extra_locks(tdb)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed with locks held\n"));
+ goto fail;
+ }
+
+ if (tdb->transaction != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed inside a transaction\n"));
+ goto fail;
+ }
+
+/* If we have real pread & pwrite, we can skip reopen. */
+#if !defined(LIBREPLACE_PREAD_NOT_REPLACED) || \
+ !defined(LIBREPLACE_PWRITE_NOT_REPLACED)
+ if (tdb_munmap(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: munmap failed (%s)\n", strerror(errno)));
+ goto fail;
+ }
+ if (close(tdb->fd) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: WARNING closing tdb->fd failed!\n"));
+ tdb->fd = open(tdb->name, tdb->open_flags & ~(O_CREAT|O_TRUNC), 0);
+ if (tdb->fd == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: open failed (%s)\n", strerror(errno)));
+ goto fail;
+ }
+ /*
+ * We only use st.st_dev and st.st_ino from the raw fstat()
+ * call, everything else needs to use tdb_fstat() in order
+ * to skip tdb->hdr_ofs!
+ */
+ if (fstat(tdb->fd, &st) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: fstat failed (%s)\n", strerror(errno)));
+ goto fail;
+ }
+ if (st.st_ino != tdb->inode || st.st_dev != tdb->device) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: file dev/inode has changed!\n"));
+ goto fail;
+ }
+ ZERO_STRUCT(st);
+
+ /*
+ * We had tdb_mmap(tdb) here before,
+ * but we need to use tdb_fstat(),
+ * which is triggered from tdb_oob() before calling tdb_mmap().
+ * As this skips tdb->hdr_ofs.
+ */
+ tdb->map_size = 0;
+ if (tdb_oob(tdb, 0, 1, 0) != 0) {
+ goto fail;
+ }
+#endif /* fake pread or pwrite */
+
+ /* We may still think we hold the active lock. */
+ tdb->num_lockrecs = 0;
+ SAFE_FREE(tdb->lockrecs);
+ tdb->lockrecs_array_length = 0;
+
+ if (active_lock && tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: failed to obtain active lock\n"));
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ tdb_close(tdb);
+ return -1;
+}
+
+/* reopen a tdb - this can be used after a fork to ensure that we have an independent
+ seek pointer from our parent and to re-establish locks */
+_PUBLIC_ int tdb_reopen(struct tdb_context *tdb)
+{
+ bool active_lock;
+ active_lock = (tdb->flags & (TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING));
+
+ return tdb_reopen_internal(tdb, active_lock);
+}
+
+/* reopen all tdb's */
+_PUBLIC_ int tdb_reopen_all(int parent_longlived)
+{
+ struct tdb_context *tdb;
+
+ for (tdb=tdbs; tdb; tdb = tdb->next) {
+ bool active_lock;
+
+ active_lock =
+ (tdb->flags & (TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING));
+
+ /*
+ * If the parent is longlived (ie. a
+ * parent daemon architecture), we know
+ * it will keep it's active lock on a
+ * tdb opened with CLEAR_IF_FIRST. Thus
+ * for child processes we don't have to
+ * add an active lock. This is essential
+ * to improve performance on systems that
+ * keep POSIX locks as a non-scalable data
+ * structure in the kernel.
+ */
+ if (parent_longlived) {
+ /* Ensure no clear-if-first. */
+ active_lock = false;
+ }
+
+ if (tdb_reopen_internal(tdb, active_lock) != 0)
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/lib/tdb/common/rescue.c b/lib/tdb/common/rescue.c
new file mode 100644
index 0000000..7a85ebc
--- /dev/null
+++ b/lib/tdb/common/rescue.c
@@ -0,0 +1,351 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library, rescue attempt code.
+
+ Copyright (C) Rusty Russell 2012
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+#include <assert.h>
+
+
+struct found {
+ tdb_off_t head; /* 0 -> invalid. */
+ struct tdb_record rec;
+ TDB_DATA key;
+ bool in_hash;
+ bool in_free;
+};
+
+struct found_table {
+ /* As an ordered array (by head offset). */
+ struct found *arr;
+ unsigned int num, max;
+};
+
+static bool looks_like_valid_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec,
+ TDB_DATA *key)
+{
+ unsigned int hval;
+
+ if (rec->magic != TDB_MAGIC)
+ return false;
+
+ if (rec->key_len + rec->data_len > rec->rec_len)
+ return false;
+
+ if (rec->rec_len % TDB_ALIGNMENT)
+ return false;
+
+ /* Next pointer must make some sense. */
+ if (rec->next > 0 && rec->next < TDB_DATA_START(tdb->hash_size))
+ return false;
+
+ if (tdb_oob(tdb, rec->next, sizeof(*rec), 1))
+ return false;
+
+ key->dsize = rec->key_len;
+ key->dptr = tdb_alloc_read(tdb, off + sizeof(*rec), key->dsize);
+ if (!key->dptr)
+ return false;
+
+ hval = tdb->hash_fn(key);
+ if (hval != rec->full_hash) {
+ free(key->dptr);
+ return false;
+ }
+
+ /* Caller frees up key->dptr */
+ return true;
+}
+
+static bool add_to_table(struct found_table *found,
+ tdb_off_t off,
+ struct tdb_record *rec,
+ TDB_DATA key)
+{
+ if (found->num + 1 > found->max) {
+ struct found *new;
+ found->max = (found->max ? found->max * 2 : 128);
+ new = realloc(found->arr, found->max * sizeof(found->arr[0]));
+ if (!new)
+ return false;
+ found->arr = new;
+ }
+
+ found->arr[found->num].head = off;
+ found->arr[found->num].rec = *rec;
+ found->arr[found->num].key = key;
+ found->arr[found->num].in_hash = false;
+ found->arr[found->num].in_free = false;
+
+ found->num++;
+ return true;
+}
+
+static bool walk_record(struct tdb_context *tdb,
+ const struct found *f,
+ void (*walk)(TDB_DATA, TDB_DATA, void *private_data),
+ void *private_data)
+{
+ TDB_DATA data;
+
+ data.dsize = f->rec.data_len;
+ data.dptr = tdb_alloc_read(tdb,
+ f->head + sizeof(f->rec) + f->rec.key_len,
+ data.dsize);
+ if (!data.dptr) {
+ if (tdb->ecode == TDB_ERR_OOM)
+ return false;
+ /* I/O errors are expected. */
+ return true;
+ }
+
+ walk(f->key, data, private_data);
+ free(data.dptr);
+ return true;
+}
+
+/* First entry which has offset >= this one. */
+static unsigned int find_entry(struct found_table *found, tdb_off_t off)
+{
+ unsigned int start = 0, end = found->num;
+
+ while (start < end) {
+ /* We can't overflow here. */
+ unsigned int mid = (start + end) / 2;
+
+ if (off < found->arr[mid].head) {
+ end = mid;
+ } else if (off > found->arr[mid].head) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+
+ assert(start == end);
+ return end;
+}
+
+static void found_in_hashchain(struct found_table *found, tdb_off_t head)
+{
+ unsigned int match;
+
+ match = find_entry(found, head);
+ if (match < found->num && found->arr[match].head == head) {
+ found->arr[match].in_hash = true;
+ }
+}
+
+static void mark_free_area(struct found_table *found, tdb_off_t head,
+ tdb_len_t len)
+{
+ unsigned int match;
+
+ match = find_entry(found, head);
+ /* Mark everything within this free entry. */
+ while (match < found->num) {
+ if (found->arr[match].head >= head + len) {
+ break;
+ }
+ found->arr[match].in_free = true;
+ match++;
+ }
+}
+
+static int cmp_key(const void *a, const void *b)
+{
+ const struct found *fa = a, *fb = b;
+
+ if (fa->key.dsize < fb->key.dsize) {
+ return -1;
+ } else if (fa->key.dsize > fb->key.dsize) {
+ return 1;
+ }
+ return memcmp(fa->key.dptr, fb->key.dptr, fa->key.dsize);
+}
+
+static bool key_eq(TDB_DATA a, TDB_DATA b)
+{
+ return a.dsize == b.dsize
+ && memcmp(a.dptr, b.dptr, a.dsize) == 0;
+}
+
+static void free_table(struct found_table *found)
+{
+ unsigned int i;
+
+ for (i = 0; i < found->num; i++) {
+ free(found->arr[i].key.dptr);
+ }
+ free(found->arr);
+}
+
+static void logging_suppressed(struct tdb_context *tdb,
+ enum tdb_debug_level level, const char *fmt, ...)
+{
+}
+
+_PUBLIC_ int tdb_rescue(struct tdb_context *tdb,
+ void (*walk)(TDB_DATA, TDB_DATA, void *private_data),
+ void *private_data)
+{
+ struct found_table found = { NULL, 0, 0 };
+ tdb_off_t h, off, i;
+ tdb_log_func oldlog = tdb->log.log_fn;
+ struct tdb_record rec;
+ TDB_DATA key;
+ bool locked;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return -1;
+ locked = true;
+ }
+
+ /* Make sure we know true size of the underlying file. */
+ tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /* Suppress logging, since we anticipate errors. */
+ tdb->log.log_fn = logging_suppressed;
+
+ /* Now walk entire db looking for records. */
+ for (off = TDB_DATA_START(tdb->hash_size);
+ off < tdb->map_size;
+ off += TDB_ALIGNMENT) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ continue;
+
+ if (looks_like_valid_record(tdb, off, &rec, &key)) {
+ if (!add_to_table(&found, off, &rec, key)) {
+ goto oom;
+ }
+ }
+ }
+
+ /* Walk hash chains to positive vet. */
+ for (h = 0; h < 1+tdb->hash_size; h++) {
+ bool slow_chase = false;
+ tdb_off_t slow_off = FREELIST_TOP + h*sizeof(tdb_off_t);
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP + h*sizeof(tdb_off_t),
+ &off) == -1)
+ continue;
+
+ while (off && off != slow_off) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) != 0) {
+ break;
+ }
+
+ /* 0 is the free list, rest are hash chains. */
+ if (h == 0) {
+ /* Don't mark garbage as free. */
+ if (rec.magic != TDB_FREE_MAGIC) {
+ break;
+ }
+ mark_free_area(&found, off,
+ sizeof(rec) + rec.rec_len);
+ } else {
+ found_in_hashchain(&found, off);
+ }
+
+ off = rec.next;
+
+ /* Loop detection using second pointer at half-speed */
+ if (slow_chase) {
+ /* First entry happens to be next ptr */
+ tdb_ofs_read(tdb, slow_off, &slow_off);
+ }
+ slow_chase = !slow_chase;
+ }
+ }
+
+ /* Recovery area: must be marked as free, since it often has old
+ * records in there! */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off) == 0 && off != 0) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == 0) {
+ mark_free_area(&found, off, sizeof(rec) + rec.rec_len);
+ }
+ }
+
+ /* Now sort by key! */
+ if (found.arr != NULL) {
+ qsort(found.arr, found.num, sizeof(found.arr[0]), cmp_key);
+ }
+
+ for (i = 0; (found.arr != NULL) && i < found.num; ) {
+ unsigned int num, num_in_hash = 0;
+
+ /* How many are identical? */
+ for (num = 0; num < found.num - i; num++) {
+ if (!key_eq(found.arr[i].key, found.arr[i+num].key)) {
+ break;
+ }
+ if (found.arr[i+num].in_hash) {
+ if (!walk_record(tdb, &found.arr[i+num],
+ walk, private_data))
+ goto oom;
+ num_in_hash++;
+ }
+ }
+ assert(num);
+
+ /* If none were in the hash, we print any not in free list. */
+ if (num_in_hash == 0) {
+ unsigned int j;
+
+ for (j = i; j < i + num; j++) {
+ if (!found.arr[j].in_free) {
+ if (!walk_record(tdb, &found.arr[j],
+ walk, private_data))
+ goto oom;
+ }
+ }
+ }
+
+ i += num;
+ }
+
+ tdb->log.log_fn = oldlog;
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return 0;
+
+oom:
+ tdb->log.log_fn = oldlog;
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_rescue: failed allocating\n"));
+ free_table(&found);
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return -1;
+}
diff --git a/lib/tdb/common/summary.c b/lib/tdb/common/summary.c
new file mode 100644
index 0000000..a93eb93
--- /dev/null
+++ b/lib/tdb/common/summary.c
@@ -0,0 +1,219 @@
+ /*
+ Trivial Database: human-readable summary code
+ Copyright (C) Rusty Russell 2010
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+#define SUMMARY_FORMAT \
+ "Size of file/data: %llu/%zu\n" \
+ "Header offset/logical size: %zu/%zu\n" \
+ "Number of records: %zu\n" \
+ "Incompatible hash: %s\n" \
+ "Active/supported feature flags: 0x%08x/0x%08x\n" \
+ "Robust mutexes locking: %s\n" \
+ "Smallest/average/largest keys: %zu/%zu/%zu\n" \
+ "Smallest/average/largest data: %zu/%zu/%zu\n" \
+ "Smallest/average/largest padding: %zu/%zu/%zu\n" \
+ "Number of dead records: %zu\n" \
+ "Smallest/average/largest dead records: %zu/%zu/%zu\n" \
+ "Number of free records: %zu\n" \
+ "Smallest/average/largest free records: %zu/%zu/%zu\n" \
+ "Number of hash chains: %zu\n" \
+ "Smallest/average/largest hash chains: %zu/%zu/%zu\n" \
+ "Number of uncoalesced records: %zu\n" \
+ "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n" \
+ "Percentage keys/data/padding/free/dead/rechdrs&tailers/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+
+/* We don't use tally module, to keep upstream happy. */
+struct tally {
+ size_t min, max, total;
+ size_t num;
+};
+
+static void tally_init(struct tally *tally)
+{
+ tally->total = 0;
+ tally->num = 0;
+ tally->min = tally->max = 0;
+}
+
+static void tally_add(struct tally *tally, size_t len)
+{
+ if (tally->num == 0)
+ tally->max = tally->min = len;
+ else if (len > tally->max)
+ tally->max = len;
+ else if (len < tally->min)
+ tally->min = len;
+ tally->num++;
+ tally->total += len;
+}
+
+static size_t tally_mean(const struct tally *tally)
+{
+ if (!tally->num)
+ return 0;
+ return tally->total / tally->num;
+}
+
+static size_t get_hash_length(struct tdb_context *tdb, unsigned int i)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_chainwalk_ctx chainwalk;
+ size_t count = 0;
+
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(i), &rec_ptr) == -1)
+ return 0;
+
+ tdb_chainwalk_init(&chainwalk, rec_ptr);
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ struct tdb_record r;
+ bool ok;
+ ++count;
+ if (tdb_rec_read(tdb, rec_ptr, &r) == -1)
+ return 0;
+ rec_ptr = r.next;
+ ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr);
+ if (!ok) {
+ return SIZE_MAX;
+ }
+ }
+ return count;
+}
+
+_PUBLIC_ char *tdb_summary(struct tdb_context *tdb)
+{
+ off_t file_size;
+ tdb_off_t off, rec_off;
+ struct tally freet, keys, data, dead, extra, hashval, uncoal;
+ struct tdb_record rec;
+ char *ret = NULL;
+ bool locked;
+ size_t unc = 0;
+ int len;
+ struct tdb_record recovery;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return NULL;
+ locked = true;
+ }
+
+ if (tdb_recovery_area(tdb, tdb->methods, &rec_off, &recovery) != 0) {
+ goto unlock;
+ }
+
+ tally_init(&freet);
+ tally_init(&keys);
+ tally_init(&data);
+ tally_init(&dead);
+ tally_init(&extra);
+ tally_init(&hashval);
+ tally_init(&uncoal);
+
+ for (off = TDB_DATA_START(tdb->hash_size);
+ off < tdb->map_size - 1;
+ off += sizeof(rec) + rec.rec_len) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ goto unlock;
+ switch (rec.magic) {
+ case TDB_MAGIC:
+ tally_add(&keys, rec.key_len);
+ tally_add(&data, rec.data_len);
+ tally_add(&extra, rec.rec_len - (rec.key_len
+ + rec.data_len));
+ if (unc > 1)
+ tally_add(&uncoal, unc - 1);
+ unc = 0;
+ break;
+ case TDB_FREE_MAGIC:
+ tally_add(&freet, rec.rec_len);
+ unc++;
+ break;
+ /* If we crash after ftruncate, we can get zeroes or fill. */
+ case TDB_RECOVERY_INVALID_MAGIC:
+ case 0x42424242:
+ unc++;
+ /* If it's a valid recovery, we can trust rec_len. */
+ if (off != rec_off) {
+ rec.rec_len = tdb_dead_space(tdb, off)
+ - sizeof(rec);
+ }
+
+ FALL_THROUGH;
+ case TDB_DEAD_MAGIC:
+ tally_add(&dead, rec.rec_len);
+ break;
+ default:
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Unexpected record magic 0x%x at offset %u\n",
+ rec.magic, off));
+ goto unlock;
+ }
+ }
+ if (unc > 1)
+ tally_add(&uncoal, unc - 1);
+
+ for (off = 0; off < tdb->hash_size; off++)
+ tally_add(&hashval, get_hash_length(tdb, off));
+
+ file_size = tdb->hdr_ofs + tdb->map_size;
+
+ len = asprintf(&ret, SUMMARY_FORMAT,
+ (unsigned long long)file_size, keys.total+data.total,
+ (size_t)tdb->hdr_ofs, (size_t)tdb->map_size,
+ keys.num,
+ (tdb->hash_fn == tdb_jenkins_hash)?"yes":"no",
+ (unsigned)tdb->feature_flags, TDB_SUPPORTED_FEATURE_FLAGS,
+ (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX)?"yes":"no",
+ keys.min, tally_mean(&keys), keys.max,
+ data.min, tally_mean(&data), data.max,
+ extra.min, tally_mean(&extra), extra.max,
+ dead.num,
+ dead.min, tally_mean(&dead), dead.max,
+ freet.num,
+ freet.min, tally_mean(&freet), freet.max,
+ hashval.num,
+ hashval.min, tally_mean(&hashval), hashval.max,
+ uncoal.total,
+ uncoal.min, tally_mean(&uncoal), uncoal.max,
+ keys.total * 100.0 / file_size,
+ data.total * 100.0 / file_size,
+ extra.total * 100.0 / file_size,
+ freet.total * 100.0 / file_size,
+ dead.total * 100.0 / file_size,
+ (keys.num + freet.num + dead.num)
+ * (sizeof(struct tdb_record) + sizeof(uint32_t))
+ * 100.0 / file_size,
+ tdb->hash_size * sizeof(tdb_off_t)
+ * 100.0 / file_size);
+ if (len == -1) {
+ goto unlock;
+ }
+
+unlock:
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return ret;
+}
diff --git a/lib/tdb/common/tdb.c b/lib/tdb/common/tdb.c
new file mode 100644
index 0000000..de829bb
--- /dev/null
+++ b/lib/tdb/common/tdb.c
@@ -0,0 +1,1348 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+_PUBLIC_ TDB_DATA tdb_null;
+
+/*
+ non-blocking increment of the tdb sequence number if the tdb has been opened using
+ the TDB_SEQNUM flag
+*/
+_PUBLIC_ void tdb_increment_seqnum_nonblock(struct tdb_context *tdb)
+{
+ tdb_off_t seqnum=0;
+
+ if (!(tdb->flags & TDB_SEQNUM)) {
+ return;
+ }
+
+ /* we ignore errors from this, as we have no sane way of
+ dealing with them.
+ */
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ seqnum++;
+ tdb_ofs_write(tdb, TDB_SEQNUM_OFS, &seqnum);
+}
+
+/*
+ increment the tdb sequence number if the tdb has been opened using
+ the TDB_SEQNUM flag
+*/
+static void tdb_increment_seqnum(struct tdb_context *tdb)
+{
+ if (!(tdb->flags & TDB_SEQNUM)) {
+ return;
+ }
+
+ if (tdb->transaction != NULL) {
+ tdb_increment_seqnum_nonblock(tdb);
+ return;
+ }
+
+#if defined(HAVE___ATOMIC_ADD_FETCH) && defined(HAVE___ATOMIC_ADD_LOAD)
+ if (tdb->map_ptr != NULL) {
+ uint32_t *pseqnum = (uint32_t *)(
+ TDB_SEQNUM_OFS + (char *)tdb->map_ptr);
+ __atomic_add_fetch(pseqnum, 1, __ATOMIC_SEQ_CST);
+ return;
+ }
+#endif
+
+ if (tdb_nest_lock(tdb, TDB_SEQNUM_OFS, F_WRLCK,
+ TDB_LOCK_WAIT|TDB_LOCK_PROBE) != 0) {
+ return;
+ }
+
+ tdb_increment_seqnum_nonblock(tdb);
+
+ tdb_nest_unlock(tdb, TDB_SEQNUM_OFS, F_WRLCK, false);
+}
+
+static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ return memcmp(data.dptr, key.dptr, data.dsize);
+}
+
+void tdb_chainwalk_init(struct tdb_chainwalk_ctx *ctx, tdb_off_t ptr)
+{
+ *ctx = (struct tdb_chainwalk_ctx) { .slow_ptr = ptr };
+}
+
+bool tdb_chainwalk_check(struct tdb_context *tdb,
+ struct tdb_chainwalk_ctx *ctx,
+ tdb_off_t next_ptr)
+{
+ int ret;
+
+ if (ctx->slow_chase) {
+ ret = tdb_ofs_read(tdb, ctx->slow_ptr, &ctx->slow_ptr);
+ if (ret == -1) {
+ return false;
+ }
+ }
+ ctx->slow_chase = !ctx->slow_chase;
+
+ if (next_ptr == ctx->slow_ptr) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_chainwalk_check: circular chain\n"));
+ return false;
+ }
+
+ return true;
+}
+
+/* Returns 0 on fail. On success, return offset of record, and fills
+ in rec */
+static tdb_off_t tdb_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash,
+ struct tdb_record *r)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_chainwalk_ctx chainwalk;
+
+ /* read in the hash top */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
+ return 0;
+
+ tdb_chainwalk_init(&chainwalk, rec_ptr);
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ bool ok;
+
+ if (tdb_rec_read(tdb, rec_ptr, r) == -1)
+ return 0;
+
+ if (!TDB_DEAD(r) && hash==r->full_hash
+ && key.dsize==r->key_len
+ && tdb_parse_data(tdb, key, rec_ptr + sizeof(*r),
+ r->key_len, tdb_key_compare,
+ NULL) == 0) {
+ return rec_ptr;
+ }
+ rec_ptr = r->next;
+
+ ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr);
+ if (!ok) {
+ return 0;
+ }
+ }
+ tdb->ecode = TDB_ERR_NOEXIST;
+ return 0;
+}
+
+/* As tdb_find, but if you succeed, keep the lock */
+tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype,
+ struct tdb_record *rec)
+{
+ uint32_t rec_ptr;
+
+ if (tdb_lock(tdb, BUCKET(hash), locktype) == -1)
+ return 0;
+ if (!(rec_ptr = tdb_find(tdb, key, hash, rec)))
+ tdb_unlock(tdb, BUCKET(hash), locktype);
+ return rec_ptr;
+}
+
+static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key);
+
+struct tdb_update_hash_state {
+ const TDB_DATA *dbufs;
+ int num_dbufs;
+ tdb_len_t dbufs_len;
+};
+
+static int tdb_update_hash_cmp(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct tdb_update_hash_state *state = private_data;
+ unsigned char *dptr = data.dptr;
+ int i;
+
+ if (state->dbufs_len != data.dsize) {
+ return -1;
+ }
+
+ for (i=0; i<state->num_dbufs; i++) {
+ TDB_DATA dbuf = state->dbufs[i];
+ if( dbuf.dsize > 0) {
+ int ret;
+ ret = memcmp(dptr, dbuf.dptr, dbuf.dsize);
+ if (ret != 0) {
+ return -1;
+ }
+ dptr += dbuf.dsize;
+ }
+ }
+
+ return 0;
+}
+
+/* update an entry in place - this only works if the new data size
+ is <= the old data size and the key exists.
+ on failure return -1.
+*/
+static int tdb_update_hash(struct tdb_context *tdb, TDB_DATA key,
+ uint32_t hash,
+ const TDB_DATA *dbufs, int num_dbufs,
+ tdb_len_t dbufs_len)
+{
+ struct tdb_record rec;
+ tdb_off_t rec_ptr, ofs;
+ int i;
+
+ /* find entry */
+ if (!(rec_ptr = tdb_find(tdb, key, hash, &rec)))
+ return -1;
+
+ /* it could be an exact duplicate of what is there - this is
+ * surprisingly common (eg. with a ldb re-index). */
+ if (rec.data_len == dbufs_len) {
+ struct tdb_update_hash_state state = {
+ .dbufs = dbufs, .num_dbufs = num_dbufs,
+ .dbufs_len = dbufs_len
+ };
+ int ret;
+
+ ret = tdb_parse_record(tdb, key, tdb_update_hash_cmp, &state);
+ if (ret == 0) {
+ return 0;
+ }
+ }
+
+ /* must be long enough key, data and tailer */
+ if (rec.rec_len < key.dsize + dbufs_len + sizeof(tdb_off_t)) {
+ tdb->ecode = TDB_SUCCESS; /* Not really an error */
+ return -1;
+ }
+
+ ofs = rec_ptr + sizeof(rec) + rec.key_len;
+
+ for (i=0; i<num_dbufs; i++) {
+ TDB_DATA dbuf = dbufs[i];
+ int ret;
+
+ ret = tdb->methods->tdb_write(tdb, ofs, dbuf.dptr, dbuf.dsize);
+ if (ret == -1) {
+ return -1;
+ }
+ ofs += dbuf.dsize;
+ }
+
+ if (dbufs_len != rec.data_len) {
+ /* update size */
+ rec.data_len = dbufs_len;
+ return tdb_rec_write(tdb, rec_ptr, &rec);
+ }
+
+ return 0;
+}
+
+/* find an entry in the database given a key */
+/* If an entry doesn't exist tdb_err will be set to
+ * TDB_ERR_NOEXIST. If a key has no data attached
+ * then the TDB_DATA will have zero length but
+ * a non-zero pointer
+ */
+static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ TDB_DATA ret;
+ uint32_t hash;
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec)))
+ return tdb_null;
+
+ ret.dptr = tdb_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len,
+ rec.data_len);
+ ret.dsize = rec.data_len;
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+ return ret;
+}
+
+_PUBLIC_ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
+{
+ TDB_DATA ret = _tdb_fetch(tdb, key);
+
+ tdb_trace_1rec_retrec(tdb, "tdb_fetch", key, ret);
+ return ret;
+}
+
+/*
+ * Find an entry in the database and hand the record's data to a parsing
+ * function. The parsing function is executed under the chain read lock, so it
+ * should be fast and should not block on other syscalls.
+ *
+ * DON'T CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS.
+ *
+ * For mmapped tdb's that do not have a transaction open it points the parsing
+ * function directly at the mmap area, it avoids the malloc/memcpy in this
+ * case. If a transaction is open or no mmap is available, it has to do
+ * malloc/read/parse/free.
+ *
+ * This is interesting for all readers of potentially large data structures in
+ * the tdb records, ldb indexes being one example.
+ *
+ * Return -1 if the record was not found.
+ */
+
+_PUBLIC_ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ int ret;
+ uint32_t hash;
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+
+ if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) {
+ /* record not found */
+ tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, -1);
+ tdb->ecode = TDB_ERR_NOEXIST;
+ return -1;
+ }
+ tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, 0);
+
+ ret = tdb_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len,
+ rec.data_len, parser, private_data);
+
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+
+ return ret;
+}
+
+/* check if an entry in the database exists
+
+ note that 1 is returned if the key is found and 0 is returned if not found
+ this doesn't match the conventions in the rest of this module, but is
+ compatible with gdbm
+*/
+static int tdb_exists_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash)
+{
+ struct tdb_record rec;
+
+ if (tdb_find_lock_hash(tdb, key, hash, F_RDLCK, &rec) == 0)
+ return 0;
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+ return 1;
+}
+
+_PUBLIC_ int tdb_exists(struct tdb_context *tdb, TDB_DATA key)
+{
+ uint32_t hash = tdb->hash_fn(&key);
+ int ret;
+
+ ret = tdb_exists_hash(tdb, key, hash);
+ tdb_trace_1rec_ret(tdb, "tdb_exists", key, ret);
+ return ret;
+}
+
+/*
+ * Move a dead record to the freelist. The hash chain and freelist
+ * must be locked.
+ */
+static int tdb_del_dead(struct tdb_context *tdb,
+ uint32_t last_ptr,
+ uint32_t rec_ptr,
+ struct tdb_record *rec,
+ bool *deleted)
+{
+ int ret;
+
+ ret = tdb_write_lock_record(tdb, rec_ptr);
+ if (ret == -1) {
+ /* Someone traversing here: Just leave it dead */
+ return 0;
+ }
+ ret = tdb_write_unlock_record(tdb, rec_ptr);
+ if (ret == -1) {
+ return -1;
+ }
+ ret = tdb_ofs_write(tdb, last_ptr, &rec->next);
+ if (ret == -1) {
+ return -1;
+ }
+
+ *deleted = true;
+
+ ret = tdb_free(tdb, rec_ptr, rec);
+ return ret;
+}
+
+/*
+ * Walk the hash chain and leave tdb->max_dead_records around. Move
+ * the rest of dead records to the freelist.
+ */
+int tdb_trim_dead(struct tdb_context *tdb, uint32_t hash)
+{
+ struct tdb_chainwalk_ctx chainwalk;
+ struct tdb_record rec;
+ tdb_off_t last_ptr, rec_ptr;
+ bool locked_freelist = false;
+ int num_dead = 0;
+ int ret;
+
+ last_ptr = TDB_HASH_TOP(hash);
+
+ /*
+ * Init chainwalk with the pointer to the hash top. It might
+ * be that the very first record in the chain is a dead one
+ * that we have to delete.
+ */
+ tdb_chainwalk_init(&chainwalk, last_ptr);
+
+ ret = tdb_ofs_read(tdb, last_ptr, &rec_ptr);
+ if (ret == -1) {
+ return -1;
+ }
+
+ while (rec_ptr != 0) {
+ bool deleted = false;
+ uint32_t next;
+
+ ret = tdb_rec_read(tdb, rec_ptr, &rec);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ /*
+ * Make a copy of rec.next: Further down we might
+ * delete and put the record on the freelist. Make
+ * sure that modifications in that code path can't
+ * break the chainwalk here.
+ */
+ next = rec.next;
+
+ if (rec.magic == TDB_DEAD_MAGIC) {
+ num_dead += 1;
+
+ if (num_dead > tdb->max_dead_records) {
+
+ if (!locked_freelist) {
+ /*
+ * Lock the freelist only if
+ * it's really required.
+ */
+ ret = tdb_lock(tdb, -1, F_WRLCK);
+ if (ret == -1) {
+ goto fail;
+ };
+ locked_freelist = true;
+ }
+
+ ret = tdb_del_dead(
+ tdb,
+ last_ptr,
+ rec_ptr,
+ &rec,
+ &deleted);
+
+ if (ret == -1) {
+ goto fail;
+ }
+ }
+ }
+
+ /*
+ * Don't do the chainwalk check if "rec_ptr" was
+ * deleted. We reduced the chain, and the chainwalk
+ * check might catch up early. Imagine a valid chain
+ * with just dead records: We never can bump the
+ * "slow" pointer in chainwalk_check, as there isn't
+ * anything left to jump to and compare.
+ */
+ if (!deleted) {
+ bool ok;
+
+ last_ptr = rec_ptr;
+
+ ok = tdb_chainwalk_check(tdb, &chainwalk, next);
+ if (!ok) {
+ ret = -1;
+ goto fail;
+ }
+ }
+ rec_ptr = next;
+ }
+ ret = 0;
+fail:
+ if (locked_freelist) {
+ tdb_unlock(tdb, -1, F_WRLCK);
+ }
+ return ret;
+}
+
+/* delete an entry in the database given a key */
+static int tdb_delete_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ rec_ptr = tdb_find_lock_hash(tdb, key, hash, F_WRLCK, &rec);
+ if (rec_ptr == 0) {
+ return -1;
+ }
+
+ /*
+ * Mark the record dead
+ */
+ rec.magic = TDB_DEAD_MAGIC;
+ ret = tdb_rec_write(tdb, rec_ptr, &rec);
+ if (ret == -1) {
+ goto done;
+ }
+
+ tdb_increment_seqnum(tdb);
+
+ ret = tdb_trim_dead(tdb, hash);
+done:
+ if (tdb_unlock(tdb, BUCKET(hash), F_WRLCK) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_delete: WARNING tdb_unlock failed!\n"));
+ return ret;
+}
+
+_PUBLIC_ int tdb_delete(struct tdb_context *tdb, TDB_DATA key)
+{
+ uint32_t hash = tdb->hash_fn(&key);
+ int ret;
+
+ ret = tdb_delete_hash(tdb, key, hash);
+ tdb_trace_1rec_ret(tdb, "tdb_delete", key, ret);
+ return ret;
+}
+
+/*
+ * See if we have a dead record around with enough space
+ */
+tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash,
+ struct tdb_record *r, tdb_len_t length,
+ tdb_off_t *p_last_ptr)
+{
+ tdb_off_t rec_ptr, last_ptr;
+ struct tdb_chainwalk_ctx chainwalk;
+ tdb_off_t best_rec_ptr = 0;
+ tdb_off_t best_last_ptr = 0;
+ struct tdb_record best = { .rec_len = UINT32_MAX };
+
+ length += sizeof(tdb_off_t); /* tailer */
+
+ last_ptr = TDB_HASH_TOP(hash);
+
+ /* read in the hash top */
+ if (tdb_ofs_read(tdb, last_ptr, &rec_ptr) == -1)
+ return 0;
+
+ tdb_chainwalk_init(&chainwalk, rec_ptr);
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ bool ok;
+
+ if (tdb_rec_read(tdb, rec_ptr, r) == -1)
+ return 0;
+
+ if (TDB_DEAD(r) && (r->rec_len >= length) &&
+ (r->rec_len < best.rec_len)) {
+ best_rec_ptr = rec_ptr;
+ best_last_ptr = last_ptr;
+ best = *r;
+ }
+ last_ptr = rec_ptr;
+ rec_ptr = r->next;
+
+ ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr);
+ if (!ok) {
+ return 0;
+ }
+ }
+
+ if (best.rec_len == UINT32_MAX) {
+ return 0;
+ }
+
+ *r = best;
+ *p_last_ptr = best_last_ptr;
+ return best_rec_ptr;
+}
+
+static int _tdb_storev(struct tdb_context *tdb, TDB_DATA key,
+ const TDB_DATA *dbufs, int num_dbufs,
+ int flag, uint32_t hash)
+{
+ struct tdb_record rec;
+ tdb_off_t rec_ptr, ofs;
+ tdb_len_t rec_len, dbufs_len;
+ int i;
+ int ret = -1;
+
+ dbufs_len = 0;
+
+ for (i=0; i<num_dbufs; i++) {
+ size_t dsize = dbufs[i].dsize;
+
+ if ((dsize != 0) && (dbufs[i].dptr == NULL)) {
+ tdb->ecode = TDB_ERR_EINVAL;
+ goto fail;
+ }
+
+ dbufs_len += dsize;
+ if (dbufs_len < dsize) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ }
+
+ rec_len = key.dsize + dbufs_len;
+ if ((rec_len < key.dsize) || (rec_len < dbufs_len)) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+
+ /* check for it existing, on insert. */
+ if (flag == TDB_INSERT) {
+ if (tdb_exists_hash(tdb, key, hash)) {
+ tdb->ecode = TDB_ERR_EXISTS;
+ goto fail;
+ }
+ } else {
+ /* first try in-place update, on modify or replace. */
+ if (tdb_update_hash(tdb, key, hash, dbufs, num_dbufs,
+ dbufs_len) == 0) {
+ goto done;
+ }
+ if (tdb->ecode == TDB_ERR_NOEXIST &&
+ flag == TDB_MODIFY) {
+ /* if the record doesn't exist and we are in TDB_MODIFY mode then
+ we should fail the store */
+ goto fail;
+ }
+ }
+ /* reset the error code potentially set by the tdb_update_hash() */
+ tdb->ecode = TDB_SUCCESS;
+
+ /* delete any existing record - if it doesn't exist we don't
+ care. Doing this first reduces fragmentation, and avoids
+ coalescing with `allocated' block before it's updated. */
+ if (flag != TDB_INSERT)
+ tdb_delete_hash(tdb, key, hash);
+
+ /* we have to allocate some space */
+ rec_ptr = tdb_allocate(tdb, hash, rec_len, &rec);
+
+ if (rec_ptr == 0) {
+ goto fail;
+ }
+
+ /* Read hash top into next ptr */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1)
+ goto fail;
+
+ rec.key_len = key.dsize;
+ rec.data_len = dbufs_len;
+ rec.full_hash = hash;
+ rec.magic = TDB_MAGIC;
+
+ ofs = rec_ptr;
+
+ /* write out and point the top of the hash chain at it */
+ ret = tdb_rec_write(tdb, ofs, &rec);
+ if (ret == -1) {
+ goto fail;
+ }
+ ofs += sizeof(rec);
+
+ ret = tdb->methods->tdb_write(tdb, ofs, key.dptr, key.dsize);
+ if (ret == -1) {
+ goto fail;
+ }
+ ofs += key.dsize;
+
+ for (i=0; i<num_dbufs; i++) {
+ if (dbufs[i].dsize == 0) {
+ continue;
+ }
+
+ ret = tdb->methods->tdb_write(tdb, ofs, dbufs[i].dptr,
+ dbufs[i].dsize);
+ if (ret == -1) {
+ goto fail;
+ }
+ ofs += dbufs[i].dsize;
+ }
+
+ ret = tdb_ofs_write(tdb, TDB_HASH_TOP(hash), &rec_ptr);
+ if (ret == -1) {
+ /* Need to tdb_unallocate() here */
+ goto fail;
+ }
+
+ done:
+ ret = 0;
+ fail:
+ if (ret == 0) {
+ tdb_increment_seqnum(tdb);
+ }
+ return ret;
+}
+
+static int _tdb_store(struct tdb_context *tdb, TDB_DATA key,
+ TDB_DATA dbuf, int flag, uint32_t hash)
+{
+ return _tdb_storev(tdb, key, &dbuf, 1, flag, hash);
+}
+
+/* store an element in the database, replacing any existing element
+ with the same key
+
+ return 0 on success, -1 on failure
+*/
+_PUBLIC_ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
+{
+ uint32_t hash;
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ tdb_trace_2rec_flag_ret(tdb, "tdb_store", key, dbuf, flag, -1);
+ return -1;
+ }
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
+
+ ret = _tdb_store(tdb, key, dbuf, flag, hash);
+ tdb_trace_2rec_flag_ret(tdb, "tdb_store", key, dbuf, flag, ret);
+ tdb_unlock(tdb, BUCKET(hash), F_WRLCK);
+ return ret;
+}
+
+_PUBLIC_ int tdb_storev(struct tdb_context *tdb, TDB_DATA key,
+ const TDB_DATA *dbufs, int num_dbufs, int flag)
+{
+ uint32_t hash;
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ tdb_trace_1plusn_rec_flag_ret(tdb, "tdb_storev", key,
+ dbufs, num_dbufs, flag, -1);
+ return -1;
+ }
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
+
+ ret = _tdb_storev(tdb, key, dbufs, num_dbufs, flag, hash);
+ tdb_trace_1plusn_rec_flag_ret(tdb, "tdb_storev", key,
+ dbufs, num_dbufs, flag, -1);
+ tdb_unlock(tdb, BUCKET(hash), F_WRLCK);
+ return ret;
+}
+
+/* Append to an entry. Create if not exist. */
+_PUBLIC_ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf)
+{
+ uint32_t hash;
+ TDB_DATA dbufs[2];
+ int ret = -1;
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
+
+ dbufs[0] = _tdb_fetch(tdb, key);
+ dbufs[1] = new_dbuf;
+
+ ret = _tdb_storev(tdb, key, dbufs, 2, 0, hash);
+ tdb_trace_2rec_retrec(tdb, "tdb_append", key, dbufs[0], dbufs[1]);
+
+ tdb_unlock(tdb, BUCKET(hash), F_WRLCK);
+ SAFE_FREE(dbufs[0].dptr);
+ return ret;
+}
+
+
+/*
+ return the name of the current tdb file
+ useful for external logging functions
+*/
+_PUBLIC_ const char *tdb_name(struct tdb_context *tdb)
+{
+ return tdb->name;
+}
+
+/*
+ return the underlying file descriptor being used by tdb, or -1
+ useful for external routines that want to check the device/inode
+ of the fd
+*/
+_PUBLIC_ int tdb_fd(struct tdb_context *tdb)
+{
+ return tdb->fd;
+}
+
+/*
+ return the current logging function
+ useful for external tdb routines that wish to log tdb errors
+*/
+_PUBLIC_ tdb_log_func tdb_log_fn(struct tdb_context *tdb)
+{
+ return tdb->log.log_fn;
+}
+
+
+/*
+ get the tdb sequence number. Only makes sense if the writers opened
+ with TDB_SEQNUM set. Note that this sequence number will wrap quite
+ quickly, so it should only be used for a 'has something changed'
+ test, not for code that relies on the count of the number of changes
+ made. If you want a counter then use a tdb record.
+
+ The aim of this sequence number is to allow for a very lightweight
+ test of a possible tdb change.
+*/
+_PUBLIC_ int tdb_get_seqnum(struct tdb_context *tdb)
+{
+ tdb_off_t seqnum=0;
+
+ if (tdb->transaction != NULL) {
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ return seqnum;
+ }
+
+#if defined(HAVE___ATOMIC_ADD_FETCH) && defined(HAVE___ATOMIC_ADD_LOAD)
+ if (tdb->map_ptr != NULL) {
+ uint32_t *pseqnum = (uint32_t *)(
+ TDB_SEQNUM_OFS + (char *)tdb->map_ptr);
+ uint32_t ret;
+ __atomic_load(pseqnum, &ret,__ATOMIC_SEQ_CST);
+ return ret;
+ }
+#endif
+
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ return seqnum;
+}
+
+_PUBLIC_ int tdb_hash_size(struct tdb_context *tdb)
+{
+ return tdb->hash_size;
+}
+
+_PUBLIC_ size_t tdb_map_size(struct tdb_context *tdb)
+{
+ return tdb->map_size;
+}
+
+_PUBLIC_ int tdb_get_flags(struct tdb_context *tdb)
+{
+ return tdb->flags;
+}
+
+_PUBLIC_ void tdb_add_flags(struct tdb_context *tdb, unsigned flags)
+{
+ if ((flags & TDB_ALLOW_NESTING) &&
+ (flags & TDB_DISALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_add_flags: "
+ "allow_nesting and disallow_nesting are not allowed together!"));
+ return;
+ }
+
+ if (flags & TDB_ALLOW_NESTING) {
+ tdb->flags &= ~TDB_DISALLOW_NESTING;
+ }
+ if (flags & TDB_DISALLOW_NESTING) {
+ tdb->flags &= ~TDB_ALLOW_NESTING;
+ }
+
+ tdb->flags |= flags;
+}
+
+_PUBLIC_ void tdb_remove_flags(struct tdb_context *tdb, unsigned flags)
+{
+ if ((flags & TDB_ALLOW_NESTING) &&
+ (flags & TDB_DISALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_remove_flags: "
+ "allow_nesting and disallow_nesting are not allowed together!"));
+ return;
+ }
+
+ if ((flags & TDB_NOLOCK) &&
+ (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) &&
+ (tdb->mutexes == NULL)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_remove_flags: "
+ "Can not remove NOLOCK flag on mutexed databases"));
+ return;
+ }
+
+ if (flags & TDB_ALLOW_NESTING) {
+ tdb->flags |= TDB_DISALLOW_NESTING;
+ }
+ if (flags & TDB_DISALLOW_NESTING) {
+ tdb->flags |= TDB_ALLOW_NESTING;
+ }
+
+ tdb->flags &= ~flags;
+}
+
+
+/*
+ enable sequence number handling on an open tdb
+*/
+_PUBLIC_ void tdb_enable_seqnum(struct tdb_context *tdb)
+{
+ tdb->flags |= TDB_SEQNUM;
+}
+
+
+/*
+ add a region of the file to the freelist. Length is the size of the region in bytes,
+ which includes the free list header that needs to be added
+ */
+static int tdb_free_region(struct tdb_context *tdb, tdb_off_t offset, ssize_t length)
+{
+ struct tdb_record rec;
+ if (length <= sizeof(rec)) {
+ /* the region is not worth adding */
+ return 0;
+ }
+ if (length + offset > tdb->map_size) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_free_region: adding region beyond end of file\n"));
+ return -1;
+ }
+ memset(&rec,'\0',sizeof(rec));
+ rec.rec_len = length - sizeof(rec);
+ if (tdb_free(tdb, offset, &rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_free_region: failed to add free record\n"));
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ wipe the entire database, deleting all records. This can be done
+ very fast by using a allrecord lock. The entire data portion of the
+ file becomes a single entry in the freelist.
+
+ This code carefully steps around the recovery area, leaving it alone
+ */
+_PUBLIC_ int tdb_wipe_all(struct tdb_context *tdb)
+{
+ uint32_t i;
+ tdb_off_t offset = 0;
+ ssize_t data_len;
+ tdb_off_t recovery_head;
+ tdb_len_t recovery_size = 0;
+
+ if (tdb_lockall(tdb) != 0) {
+ return -1;
+ }
+
+ tdb_trace(tdb, "tdb_wipe_all");
+
+ /* see if the tdb has a recovery area, and remember its size
+ if so. We don't want to lose this as otherwise each
+ tdb_wipe_all() in a transaction will increase the size of
+ the tdb by the size of the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_wipe_all: failed to read recovery head\n"));
+ goto failed;
+ }
+
+ if (recovery_head != 0) {
+ struct tdb_record rec;
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_wipe_all: failed to read recovery record\n"));
+ return -1;
+ }
+ recovery_size = rec.rec_len + sizeof(rec);
+ }
+
+ /* wipe the hashes */
+ for (i=0;i<tdb->hash_size;i++) {
+ if (tdb_ofs_write(tdb, TDB_HASH_TOP(i), &offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write hash %d\n", i));
+ goto failed;
+ }
+ }
+
+ /* wipe the freelist */
+ if (tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write freelist\n"));
+ goto failed;
+ }
+
+ /* add all the rest of the file to the freelist, possibly leaving a gap
+ for the recovery area */
+ if (recovery_size == 0) {
+ /* the simple case - the whole file can be used as a freelist */
+ data_len = (tdb->map_size - TDB_DATA_START(tdb->hash_size));
+ if (tdb_free_region(tdb, TDB_DATA_START(tdb->hash_size), data_len) != 0) {
+ goto failed;
+ }
+ } else {
+ /* we need to add two freelist entries - one on either
+ side of the recovery area
+
+ Note that we cannot shift the recovery area during
+ this operation. Only the transaction.c code may
+ move the recovery area or we risk subtle data
+ corruption
+ */
+ data_len = (recovery_head - TDB_DATA_START(tdb->hash_size));
+ if (tdb_free_region(tdb, TDB_DATA_START(tdb->hash_size), data_len) != 0) {
+ goto failed;
+ }
+ /* and the 2nd free list entry after the recovery area - if any */
+ data_len = tdb->map_size - (recovery_head+recovery_size);
+ if (tdb_free_region(tdb, recovery_head+recovery_size, data_len) != 0) {
+ goto failed;
+ }
+ }
+
+ tdb_increment_seqnum_nonblock(tdb);
+
+ if (tdb_unlockall(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to unlock\n"));
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tdb_unlockall(tdb);
+ return -1;
+}
+
+struct traverse_state {
+ bool error;
+ struct tdb_context *dest_db;
+};
+
+/*
+ traverse function for repacking
+ */
+static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct traverse_state *state = (struct traverse_state *)private_data;
+ if (tdb_store(state->dest_db, key, data, TDB_INSERT) != 0) {
+ state->error = true;
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ repack a tdb
+ */
+_PUBLIC_ int tdb_repack(struct tdb_context *tdb)
+{
+ struct tdb_context *tmp_db;
+ struct traverse_state state;
+
+ tdb_trace(tdb, "tdb_repack");
+
+ if (tdb_transaction_start(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to start transaction\n"));
+ return -1;
+ }
+
+ tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb), TDB_INTERNAL, O_RDWR|O_CREAT, 0);
+ if (tmp_db == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to create tmp_db\n"));
+ tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ state.error = false;
+ state.dest_db = tmp_db;
+
+ if (tdb_traverse_read(tdb, repack_traverse, &state) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to traverse copying out\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ if (state.error) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Error during traversal\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ if (tdb_wipe_all(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to wipe database\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ state.error = false;
+ state.dest_db = tdb;
+
+ if (tdb_traverse_read(tmp_db, repack_traverse, &state) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to traverse copying back\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ if (state.error) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Error during second traversal\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ tdb_close(tmp_db);
+
+ if (tdb_transaction_commit(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to commit\n"));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Even on files, we can get partial writes due to signals. */
+bool tdb_write_all(int fd, const void *buf, size_t count)
+{
+ while (count) {
+ ssize_t ret;
+ ret = write(fd, buf, count);
+ if (ret < 0)
+ return false;
+ buf = (const char *)buf + ret;
+ count -= ret;
+ }
+ return true;
+}
+
+bool tdb_add_off_t(tdb_off_t a, tdb_off_t b, tdb_off_t *pret)
+{
+ tdb_off_t ret = a + b;
+
+ if ((ret < a) || (ret < b)) {
+ return false;
+ }
+ *pret = ret;
+ return true;
+}
+
+#ifdef TDB_TRACE
+static void tdb_trace_write(struct tdb_context *tdb, const char *str)
+{
+ if (!tdb_write_all(tdb->tracefd, str, strlen(str))) {
+ close(tdb->tracefd);
+ tdb->tracefd = -1;
+ }
+}
+
+static void tdb_trace_start(struct tdb_context *tdb)
+{
+ tdb_off_t seqnum=0;
+ char msg[sizeof(tdb_off_t) * 4 + 1];
+
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ snprintf(msg, sizeof(msg), "%u ", seqnum);
+ tdb_trace_write(tdb, msg);
+}
+
+static void tdb_trace_end(struct tdb_context *tdb)
+{
+ tdb_trace_write(tdb, "\n");
+}
+
+static void tdb_trace_end_ret(struct tdb_context *tdb, int ret)
+{
+ char msg[sizeof(ret) * 4 + 4];
+ snprintf(msg, sizeof(msg), " = %i\n", ret);
+ tdb_trace_write(tdb, msg);
+}
+
+static void tdb_trace_record(struct tdb_context *tdb, TDB_DATA rec)
+{
+ char msg[20 + rec.dsize*2], *p;
+ unsigned int i;
+
+ /* We differentiate zero-length records from non-existent ones. */
+ if (rec.dptr == NULL) {
+ tdb_trace_write(tdb, " NULL");
+ return;
+ }
+
+ /* snprintf here is purely cargo-cult programming. */
+ p = msg;
+ p += snprintf(p, sizeof(msg), " %zu:", rec.dsize);
+ for (i = 0; i < rec.dsize; i++)
+ p += snprintf(p, 2, "%02x", rec.dptr[i]);
+
+ tdb_trace_write(tdb, msg);
+}
+
+void tdb_trace(struct tdb_context *tdb, const char *op)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op)
+{
+ char msg[sizeof(tdb_off_t) * 4 + 1];
+
+ snprintf(msg, sizeof(msg), "%u ", seqnum);
+ tdb_trace_write(tdb, msg);
+ tdb_trace_write(tdb, op);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_open(struct tdb_context *tdb, const char *op,
+ unsigned hash_size, unsigned tdb_flags, unsigned open_flags)
+{
+ char msg[128];
+
+ snprintf(msg, sizeof(msg),
+ "%s %u 0x%x 0x%x", op, hash_size, tdb_flags, open_flags);
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, msg);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_ret(struct tdb_context *tdb, const char *op, int ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_retrec(struct tdb_context *tdb, const char *op, TDB_DATA ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_write(tdb, " =");
+ tdb_trace_record(tdb, ret);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_1rec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_1rec_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, int ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_1rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, TDB_DATA ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ tdb_trace_write(tdb, " =");
+ tdb_trace_record(tdb, ret);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_2rec_flag_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, unsigned flag,
+ int ret)
+{
+ char msg[1 + sizeof(ret) * 4];
+
+ snprintf(msg, sizeof(msg), " %#x", flag);
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec1);
+ tdb_trace_record(tdb, rec2);
+ tdb_trace_write(tdb, msg);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_1plusn_rec_flag_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec,
+ const TDB_DATA *recs, int num_recs,
+ unsigned flag, int ret)
+{
+ char msg[1 + sizeof(ret) * 4];
+ int i;
+
+ snprintf(msg, sizeof(msg), " %#x", flag);
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ for (i=0; i<num_recs; i++) {
+ tdb_trace_record(tdb, recs[i]);
+ }
+ tdb_trace_write(tdb, msg);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_2rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, TDB_DATA ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec1);
+ tdb_trace_record(tdb, rec2);
+ tdb_trace_write(tdb, " =");
+ tdb_trace_record(tdb, ret);
+ tdb_trace_end(tdb);
+}
+#endif
diff --git a/lib/tdb/common/tdb_private.h b/lib/tdb/common/tdb_private.h
new file mode 100644
index 0000000..2979043
--- /dev/null
+++ b/lib/tdb/common/tdb_private.h
@@ -0,0 +1,370 @@
+#ifndef TDB_PRIVATE_H
+#define TDB_PRIVATE_H
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library - private includes
+
+ Copyright (C) Andrew Tridgell 2005
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/filesys.h"
+#include "system/time.h"
+#include "system/shmem.h"
+#include "system/select.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+/* #define TDB_TRACE 1 */
+#ifndef HAVE_GETPAGESIZE
+#define getpagesize() 0x2000
+#endif
+
+typedef uint32_t tdb_len_t;
+typedef uint32_t tdb_off_t;
+
+#ifndef offsetof
+#define offsetof(t,f) ((unsigned int)&((t *)0)->f)
+#endif
+
+#define TDB_MAGIC_FOOD "TDB file\n"
+#define TDB_VERSION (0x26011967 + 6)
+#define TDB_MAGIC (0x26011999U)
+#define TDB_FREE_MAGIC (~TDB_MAGIC)
+#define TDB_DEAD_MAGIC (0xFEE1DEAD)
+#define TDB_RECOVERY_MAGIC (0xf53bc0e7U)
+#define TDB_RECOVERY_INVALID_MAGIC (0x0)
+#define TDB_HASH_RWLOCK_MAGIC (0xbad1a51U)
+#define TDB_FEATURE_FLAG_MAGIC (0xbad1a52U)
+#define TDB_ALIGNMENT 4
+#define DEFAULT_HASH_SIZE 131
+#define FREELIST_TOP (sizeof(struct tdb_header))
+#define TDB_ALIGN(x,a) (((x) + (a)-1) & ~((a)-1))
+#define TDB_BYTEREV(x) (((((x)&0xff)<<24)|((x)&0xFF00)<<8)|(((x)>>8)&0xFF00)|((x)>>24))
+#define TDB_DEAD(r) ((r)->magic == TDB_DEAD_MAGIC)
+#define TDB_BAD_MAGIC(r) ((r)->magic != TDB_MAGIC && !TDB_DEAD(r))
+#define TDB_HASH_TOP(hash) (FREELIST_TOP + (BUCKET(hash)+1)*sizeof(tdb_off_t))
+#define TDB_HASHTABLE_SIZE(tdb) ((tdb->hash_size+1)*sizeof(tdb_off_t))
+#define TDB_DATA_START(hash_size) (TDB_HASH_TOP(hash_size-1) + sizeof(tdb_off_t))
+#define TDB_RECOVERY_HEAD offsetof(struct tdb_header, recovery_start)
+#define TDB_SEQNUM_OFS offsetof(struct tdb_header, sequence_number)
+#define TDB_PAD_BYTE 0x42
+#define TDB_PAD_U32 0x42424242
+
+#define TDB_FEATURE_FLAG_MUTEX 0x00000001
+
+#define TDB_SUPPORTED_FEATURE_FLAGS ( \
+ TDB_FEATURE_FLAG_MUTEX | \
+ 0)
+
+/* NB assumes there is a local variable called "tdb" that is the
+ * current context, also takes doubly-parenthesized print-style
+ * argument. */
+#define TDB_LOG(x) tdb->log.log_fn x
+
+#ifdef TDB_TRACE
+void tdb_trace(struct tdb_context *tdb, const char *op);
+void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op);
+void tdb_trace_open(struct tdb_context *tdb, const char *op,
+ unsigned hash_size, unsigned tdb_flags, unsigned open_flags);
+void tdb_trace_ret(struct tdb_context *tdb, const char *op, int ret);
+void tdb_trace_retrec(struct tdb_context *tdb, const char *op, TDB_DATA ret);
+void tdb_trace_1rec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec);
+void tdb_trace_1rec_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, int ret);
+void tdb_trace_1rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, TDB_DATA ret);
+void tdb_trace_2rec_flag_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, unsigned flag,
+ int ret);
+void tdb_trace_1plusn_rec_flag_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec,
+ const TDB_DATA *recs, int num_recs,
+ unsigned flag, int ret);
+void tdb_trace_2rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, TDB_DATA ret);
+#else
+#define tdb_trace(tdb, op)
+#define tdb_trace_seqnum(tdb, seqnum, op)
+#define tdb_trace_open(tdb, op, hash_size, tdb_flags, open_flags)
+#define tdb_trace_ret(tdb, op, ret)
+#define tdb_trace_retrec(tdb, op, ret)
+#define tdb_trace_1rec(tdb, op, rec)
+#define tdb_trace_1rec_ret(tdb, op, rec, ret)
+#define tdb_trace_1rec_retrec(tdb, op, rec, ret)
+#define tdb_trace_2rec_flag_ret(tdb, op, rec1, rec2, flag, ret)
+#define tdb_trace_1plusn_rec_flag_ret(tdb, op, rec, recs, num_recs, flag, ret);
+#define tdb_trace_2rec_retrec(tdb, op, rec1, rec2, ret)
+#endif /* !TDB_TRACE */
+
+/* lock offsets */
+#define OPEN_LOCK 0
+#define ACTIVE_LOCK 4
+#define TRANSACTION_LOCK 8
+
+/* free memory if the pointer is valid and zero the pointer */
+#ifndef SAFE_FREE
+#define SAFE_FREE(x) do { if ((x) != NULL) {free(x); (x)=NULL;} } while(0)
+#endif
+
+/*
+ * Note: the BUCKET macro is broken as it returns an unexpected result when
+ * called as BUCKET(-1) for the freelist:
+ *
+ * -1 is sign converted to an unsigned int 4294967295 and then the modulo
+ * tdb->hashtable_size is computed. So with a hashtable_size of 10 the result
+ * is
+ *
+ * 4294967295 % hashtable_size = 5.
+ *
+ * where it should be -1 (C uses symmetric modulo).
+ *
+ * As all callers will lock the same wrong list consistently locking is still
+ * consistent. We can not change this without an incompatible on-disk format
+ * change, otherwise different tdb versions would use incompatible locking.
+ */
+#define BUCKET(hash) ((hash) % tdb->hash_size)
+
+#define DOCONV() (tdb->flags & TDB_CONVERT)
+#define CONVERT(x) (DOCONV() ? tdb_convert(&x, sizeof(x)) : &x)
+
+
+/* the body of the database is made of one tdb_record for the free space
+ plus a separate data list for each hash value */
+struct tdb_record {
+ tdb_off_t next; /* offset of the next record in the list */
+ tdb_len_t rec_len; /* total byte length of record */
+ tdb_len_t key_len; /* byte length of key */
+ tdb_len_t data_len; /* byte length of data */
+ uint32_t full_hash; /* the full 32 bit hash of the key */
+ uint32_t magic; /* try to catch errors */
+ /* the following union is implied:
+ union {
+ char record[rec_len];
+ struct {
+ char key[key_len];
+ char data[data_len];
+ }
+ uint32_t totalsize; (tailer)
+ }
+ */
+};
+
+
+/* this is stored at the front of every database */
+struct tdb_header {
+ char magic_food[32]; /* for /etc/magic */
+ uint32_t version; /* version of the code */
+ uint32_t hash_size; /* number of hash entries */
+ tdb_off_t rwlocks; /* obsolete - kept to detect old formats */
+ tdb_off_t recovery_start; /* offset of transaction recovery region */
+ tdb_off_t sequence_number; /* used when TDB_SEQNUM is set */
+ uint32_t magic1_hash; /* hash of TDB_MAGIC_FOOD. */
+ uint32_t magic2_hash; /* hash of TDB_MAGIC. */
+ uint32_t feature_flags;
+ tdb_len_t mutex_size; /* set if TDB_FEATURE_FLAG_MUTEX is set */
+ tdb_off_t reserved[25];
+};
+
+struct tdb_lock_type {
+ uint32_t off;
+ uint32_t count;
+ uint32_t ltype;
+};
+
+struct tdb_chainwalk_ctx {
+ tdb_off_t slow_ptr;
+ bool slow_chase;
+};
+
+struct tdb_traverse_lock {
+ struct tdb_traverse_lock *next;
+ uint32_t off;
+ uint32_t list;
+ int lock_rw;
+};
+
+void tdb_chainwalk_init(struct tdb_chainwalk_ctx *ctx, tdb_off_t ptr);
+bool tdb_chainwalk_check(struct tdb_context *tdb,
+ struct tdb_chainwalk_ctx *ctx,
+ tdb_off_t next_ptr);
+
+enum tdb_lock_flags {
+ /* WAIT == F_SETLKW, NOWAIT == F_SETLK */
+ TDB_LOCK_NOWAIT = 0,
+ TDB_LOCK_WAIT = 1,
+ /* If set, don't log an error on failure. */
+ TDB_LOCK_PROBE = 2,
+ /* If set, don't actually lock at all. */
+ TDB_LOCK_MARK_ONLY = 4,
+};
+
+struct tdb_methods {
+ int (*tdb_read)(struct tdb_context *, tdb_off_t , void *, tdb_len_t , int );
+ int (*tdb_write)(struct tdb_context *, tdb_off_t, const void *, tdb_len_t);
+ void (*next_hash_chain)(struct tdb_context *, uint32_t *);
+ int (*tdb_oob)(struct tdb_context *, tdb_off_t , tdb_len_t, int );
+ int (*tdb_expand_file)(struct tdb_context *, tdb_off_t , tdb_off_t );
+};
+
+struct tdb_mutexes;
+
+struct tdb_context {
+ char *name; /* the name of the database */
+ void *map_ptr; /* where it is currently mapped */
+ int fd; /* open file descriptor for the database */
+ tdb_len_t map_size; /* how much space has been mapped */
+ int read_only; /* opened read-only */
+ int traverse_read; /* read-only traversal */
+ int traverse_write; /* read-write traversal */
+ struct tdb_lock_type allrecord_lock; /* .offset == upgradable */
+ int num_lockrecs;
+ struct tdb_lock_type *lockrecs; /* only real locks, all with count>0 */
+ int lockrecs_array_length;
+
+ tdb_off_t hdr_ofs; /* this is 0 or header.mutex_size */
+ struct tdb_mutexes *mutexes; /* mmap of the mutex area */
+
+ enum TDB_ERROR ecode; /* error code for last tdb error */
+ uint32_t hash_size;
+ uint32_t feature_flags;
+ uint32_t flags; /* the flags passed to tdb_open */
+ struct tdb_traverse_lock travlocks; /* current traversal locks */
+ struct tdb_context *next; /* all tdbs to avoid multiple opens */
+ dev_t device; /* uniquely identifies this tdb */
+ ino_t inode; /* uniquely identifies this tdb */
+ struct tdb_logging_context log;
+ unsigned int (*hash_fn)(TDB_DATA *key);
+ int open_flags; /* flags used in the open - needed by reopen */
+ const struct tdb_methods *methods;
+ struct tdb_transaction *transaction;
+ int page_size;
+ int max_dead_records;
+#ifdef TDB_TRACE
+ int tracefd;
+#endif
+ volatile sig_atomic_t *interrupt_sig_ptr;
+};
+
+
+/*
+ internal prototypes
+*/
+int tdb_munmap(struct tdb_context *tdb);
+int tdb_mmap(struct tdb_context *tdb);
+int tdb_lock(struct tdb_context *tdb, int list, int ltype);
+int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype);
+int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ enum tdb_lock_flags flags);
+int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ bool mark_lock);
+int tdb_unlock(struct tdb_context *tdb, int list, int ltype);
+int tdb_brlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags);
+int tdb_brunlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len);
+bool tdb_have_extra_locks(struct tdb_context *tdb);
+void tdb_release_transaction_locks(struct tdb_context *tdb);
+int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags lockflags);
+int tdb_transaction_unlock(struct tdb_context *tdb, int ltype);
+int tdb_recovery_area(struct tdb_context *tdb,
+ const struct tdb_methods *methods,
+ tdb_off_t *recovery_offset,
+ struct tdb_record *rec);
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable);
+int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock);
+int tdb_allrecord_upgrade(struct tdb_context *tdb);
+int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off);
+int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off);
+int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+void *tdb_convert(void *buf, uint32_t size);
+int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
+tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length,
+ struct tdb_record *rec);
+
+int _tdb_oob(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe);
+
+static inline int tdb_oob(
+ struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe)
+{
+ if (likely((off + len >= off) && (off + len <= tdb->map_size))) {
+ return 0;
+ }
+ return _tdb_oob(tdb, off, len, probe);
+}
+
+
+int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off);
+int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off);
+bool tdb_needs_recovery(struct tdb_context *tdb);
+int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
+int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
+unsigned char *tdb_alloc_read(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t len);
+int tdb_parse_data(struct tdb_context *tdb, TDB_DATA key,
+ tdb_off_t offset, tdb_len_t len,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data);
+tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype,
+ struct tdb_record *rec);
+tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash,
+ struct tdb_record *r, tdb_len_t length,
+ tdb_off_t *p_last_ptr);
+int tdb_trim_dead(struct tdb_context *tdb, uint32_t hash);
+void tdb_io_init(struct tdb_context *tdb);
+int tdb_expand(struct tdb_context *tdb, tdb_off_t size);
+tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size);
+int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off,
+ struct tdb_record *rec);
+bool tdb_write_all(int fd, const void *buf, size_t count);
+int tdb_transaction_recover(struct tdb_context *tdb);
+void tdb_header_hash(struct tdb_context *tdb,
+ uint32_t *magic1_hash, uint32_t *magic2_hash);
+unsigned int tdb_old_hash(TDB_DATA *key);
+size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off);
+bool tdb_add_off_t(tdb_off_t a, tdb_off_t b, tdb_off_t *pret);
+
+/* tdb_off_t and tdb_len_t right now are both uint32_t */
+#define tdb_add_len_t tdb_add_off_t
+
+size_t tdb_mutex_size(struct tdb_context *tdb);
+bool tdb_have_mutexes(struct tdb_context *tdb);
+int tdb_mutex_init(struct tdb_context *tdb);
+int tdb_mutex_mmap(struct tdb_context *tdb);
+int tdb_mutex_munmap(struct tdb_context *tdb);
+bool tdb_mutex_lock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ bool waitflag, int *pret);
+bool tdb_mutex_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ int *pret);
+int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags);
+int tdb_mutex_allrecord_unlock(struct tdb_context *tdb);
+int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb);
+void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb);
+
+#endif /* TDB_PRIVATE_H */
diff --git a/lib/tdb/common/transaction.c b/lib/tdb/common/transaction.c
new file mode 100644
index 0000000..78bbd7a
--- /dev/null
+++ b/lib/tdb/common/transaction.c
@@ -0,0 +1,1388 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 2005
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/*
+ transaction design:
+
+ - only allow a single transaction at a time per database. This makes
+ using the transaction API simpler, as otherwise the caller would
+ have to cope with temporary failures in transactions that conflict
+ with other current transactions
+
+ - keep the transaction recovery information in the same file as the
+ database, using a special 'transaction recovery' record pointed at
+ by the header. This removes the need for extra journal files as
+ used by some other databases
+
+ - dynamically allocated the transaction recover record, re-using it
+ for subsequent transactions. If a larger record is needed then
+ tdb_free() the old record to place it on the normal tdb freelist
+ before allocating the new record
+
+ - during transactions, keep a linked list of all writes that have
+ been performed by intercepting all tdb_write() calls. The hooked
+ transaction versions of tdb_read() and tdb_write() check this
+ linked list and try to use the elements of the list in preference
+ to the real database.
+
+ - don't allow any locks to be held when a transaction starts,
+ otherwise we can end up with deadlock (plus lack of lock nesting
+ in posix locks would mean the lock is lost)
+
+ - if the caller gains a lock during the transaction but doesn't
+ release it then fail the commit
+
+ - allow for nested calls to tdb_transaction_start(), re-using the
+ existing transaction record. If the inner transaction is cancelled
+ then a subsequent commit will fail
+
+ - keep a mirrored copy of the tdb hash chain heads to allow for the
+ fast hash heads scan on traverse, updating the mirrored copy in
+ the transaction version of tdb_write
+
+ - allow callers to mix transaction and non-transaction use of tdb,
+ although once a transaction is started then an exclusive lock is
+ gained until the transaction is committed or cancelled
+
+ - the commit strategy involves first saving away all modified data
+ into a linearised buffer in the transaction recovery area, then
+ marking the transaction recovery area with a magic value to
+ indicate a valid recovery record. In total 4 fsync/msync calls are
+ needed per commit to prevent race conditions. It might be possible
+ to reduce this to 3 or even 2 with some more work.
+
+ - check for a valid recovery record on open of the tdb, while the
+ open lock is held. Automatically recover from the transaction
+ recovery area if needed, then continue with the open as
+ usual. This allows for smooth crash recovery with no administrator
+ intervention.
+
+ - if TDB_NOSYNC is passed to flags in tdb_open then transactions are
+ still available, but no fsync/msync calls are made. This means we
+ are still proof against a process dying during transaction commit,
+ but not against machine reboot.
+
+ - if TDB_ALLOW_NESTING is passed to flags in tdb open, or added using
+ tdb_add_flags() transaction nesting is enabled.
+ It resets the TDB_DISALLOW_NESTING flag, as both cannot be used together.
+ The default is that transaction nesting is allowed.
+ Note: this default may change in future versions of tdb.
+
+ Beware. when transactions are nested a transaction successfully
+ completed with tdb_transaction_commit() can be silently unrolled later.
+
+ - if TDB_DISALLOW_NESTING is passed to flags in tdb open, or added using
+ tdb_add_flags() transaction nesting is disabled.
+ It resets the TDB_ALLOW_NESTING flag, as both cannot be used together.
+ An attempt create a nested transaction will fail with TDB_ERR_NESTING.
+ The default is that transaction nesting is allowed.
+ Note: this default may change in future versions of tdb.
+*/
+
+
+/*
+ hold the context of any current transaction
+*/
+struct tdb_transaction {
+ /* we keep a mirrored copy of the tdb hash heads here so
+ tdb_next_hash_chain() can operate efficiently */
+ uint32_t *hash_heads;
+
+ /* the original io methods - used to do IOs to the real db */
+ const struct tdb_methods *io_methods;
+
+ /* the list of transaction blocks. When a block is first
+ written to, it gets created in this list */
+ uint8_t **blocks;
+ uint32_t num_blocks;
+ uint32_t block_size; /* bytes in each block */
+ uint32_t last_block_size; /* number of valid bytes in the last block */
+
+ /* non-zero when an internal transaction error has
+ occurred. All write operations will then fail until the
+ transaction is ended */
+ int transaction_error;
+
+ /* when inside a transaction we need to keep track of any
+ nested tdb_transaction_start() calls, as these are allowed,
+ but don't create a new transaction */
+ int nesting;
+
+ /* set when a prepare has already occurred */
+ bool prepared;
+ tdb_off_t magic_offset;
+
+ /* old file size before transaction */
+ tdb_len_t old_map_size;
+
+ /* did we expand in this transaction */
+ bool expanded;
+};
+
+
+/*
+ read while in a transaction. We need to check first if the data is in our list
+ of transaction elements, then if not do a real read
+*/
+static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf,
+ tdb_len_t len, int cv)
+{
+ uint32_t blk;
+
+ /* break it down into block sized ops */
+ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
+ if (transaction_read(tdb, off, buf, len2, cv) != 0) {
+ return -1;
+ }
+ len -= len2;
+ off += len2;
+ buf = (void *)(len2 + (char *)buf);
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ blk = off / tdb->transaction->block_size;
+
+ /* see if we have it in the block list */
+ if (tdb->transaction->num_blocks <= blk ||
+ tdb->transaction->blocks[blk] == NULL) {
+ /* nope, do a real read */
+ if (tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv) != 0) {
+ goto fail;
+ }
+ return 0;
+ }
+
+ /* it is in the block list. Now check for the last block */
+ if (blk == tdb->transaction->num_blocks-1) {
+ if (len > tdb->transaction->last_block_size) {
+ goto fail;
+ }
+ }
+
+ /* now copy it out of this block */
+ memcpy(buf, tdb->transaction->blocks[blk] + (off % tdb->transaction->block_size), len);
+ if (cv) {
+ tdb_convert(buf, len);
+ }
+ return 0;
+
+fail:
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_read: failed at off=%u len=%u\n", off, len));
+ tdb->ecode = TDB_ERR_IO;
+ tdb->transaction->transaction_error = 1;
+ return -1;
+}
+
+
+/*
+ write while in a transaction
+*/
+static int transaction_write(struct tdb_context *tdb, tdb_off_t off,
+ const void *buf, tdb_len_t len)
+{
+ uint32_t blk;
+
+ if (buf == NULL) {
+ return -1;
+ }
+
+ /* Only a commit is allowed on a prepared transaction */
+ if (tdb->transaction->prepared) {
+ tdb->ecode = TDB_ERR_EINVAL;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: transaction already prepared, write not allowed\n"));
+ tdb->transaction->transaction_error = 1;
+ return -1;
+ }
+
+ /* if the write is to a hash head, then update the transaction
+ hash heads */
+ if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP &&
+ off < FREELIST_TOP+TDB_HASHTABLE_SIZE(tdb)) {
+ uint32_t chain = (off-FREELIST_TOP) / sizeof(tdb_off_t);
+ memcpy(&tdb->transaction->hash_heads[chain], buf, len);
+ }
+
+ /* break it up into block sized chunks */
+ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
+ if (transaction_write(tdb, off, buf, len2) != 0) {
+ return -1;
+ }
+ len -= len2;
+ off += len2;
+ buf = (const void *)(len2 + (const char *)buf);
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ blk = off / tdb->transaction->block_size;
+ off = off % tdb->transaction->block_size;
+
+ if (tdb->transaction->num_blocks <= blk) {
+ uint8_t **new_blocks;
+ /* expand the blocks array */
+ new_blocks = (uint8_t **)realloc(tdb->transaction->blocks,
+ (blk+1)*sizeof(uint8_t *));
+ if (new_blocks == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ memset(&new_blocks[tdb->transaction->num_blocks], 0,
+ (1+(blk - tdb->transaction->num_blocks))*sizeof(uint8_t *));
+ tdb->transaction->blocks = new_blocks;
+ tdb->transaction->num_blocks = blk+1;
+ tdb->transaction->last_block_size = 0;
+ }
+
+ /* allocate and fill a block? */
+ if (tdb->transaction->blocks[blk] == NULL) {
+ tdb->transaction->blocks[blk] = (uint8_t *)calloc(tdb->transaction->block_size, 1);
+ if (tdb->transaction->blocks[blk] == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ tdb->transaction->transaction_error = 1;
+ return -1;
+ }
+ if (tdb->transaction->old_map_size > blk * tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size;
+ if (len2 + (blk * tdb->transaction->block_size) > tdb->transaction->old_map_size) {
+ len2 = tdb->transaction->old_map_size - (blk * tdb->transaction->block_size);
+ }
+ if (tdb->transaction->io_methods->tdb_read(tdb, blk * tdb->transaction->block_size,
+ tdb->transaction->blocks[blk],
+ len2, 0) != 0) {
+ SAFE_FREE(tdb->transaction->blocks[blk]);
+ tdb->ecode = TDB_ERR_IO;
+ goto fail;
+ }
+ if (blk == tdb->transaction->num_blocks-1) {
+ tdb->transaction->last_block_size = len2;
+ }
+ }
+ }
+
+ /* overwrite part of an existing block */
+ memcpy(tdb->transaction->blocks[blk] + off, buf, len);
+ if (blk == tdb->transaction->num_blocks-1) {
+ if (len + off > tdb->transaction->last_block_size) {
+ tdb->transaction->last_block_size = len + off;
+ }
+ }
+
+ return 0;
+
+fail:
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%u len=%u\n",
+ (blk*tdb->transaction->block_size) + off, len));
+ tdb->transaction->transaction_error = 1;
+ return -1;
+}
+
+
+/*
+ write while in a transaction - this variant never expands the transaction blocks, it only
+ updates existing blocks. This means it cannot change the recovery size
+*/
+static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off,
+ const void *buf, tdb_len_t len)
+{
+ uint32_t blk;
+
+ /* break it up into block sized chunks */
+ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
+ if (transaction_write_existing(tdb, off, buf, len2) != 0) {
+ return -1;
+ }
+ len -= len2;
+ off += len2;
+ if (buf != NULL) {
+ buf = (const void *)(len2 + (const char *)buf);
+ }
+ }
+
+ if (len == 0 || buf == NULL) {
+ return 0;
+ }
+
+ blk = off / tdb->transaction->block_size;
+ off = off % tdb->transaction->block_size;
+
+ if (tdb->transaction->num_blocks <= blk ||
+ tdb->transaction->blocks[blk] == NULL) {
+ return 0;
+ }
+
+ if (blk == tdb->transaction->num_blocks-1 &&
+ off + len > tdb->transaction->last_block_size) {
+ if (off >= tdb->transaction->last_block_size) {
+ return 0;
+ }
+ len = tdb->transaction->last_block_size - off;
+ }
+
+ /* overwrite part of an existing block */
+ memcpy(tdb->transaction->blocks[blk] + off, buf, len);
+
+ return 0;
+}
+
+
+/*
+ accelerated hash chain head search, using the cached hash heads
+*/
+static void transaction_next_hash_chain(struct tdb_context *tdb, uint32_t *chain)
+{
+ uint32_t h = *chain;
+ for (;h < tdb->hash_size;h++) {
+ /* the +1 takes account of the freelist */
+ if (0 != tdb->transaction->hash_heads[h+1]) {
+ break;
+ }
+ }
+ (*chain) = h;
+}
+
+/*
+ out of bounds check during a transaction
+*/
+static int transaction_oob(struct tdb_context *tdb, tdb_off_t off,
+ tdb_len_t len, int probe)
+{
+ /*
+ * This duplicates functionality from tdb_oob(). Don't remove:
+ * we still have direct callers of tdb->methods->tdb_oob()
+ * inside transaction.c.
+ */
+ if (off + len >= off && off + len <= tdb->map_size) {
+ return 0;
+ }
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+}
+
+/*
+ transaction version of tdb_expand().
+*/
+static int transaction_expand_file(struct tdb_context *tdb, tdb_off_t size,
+ tdb_off_t addition)
+{
+ const char buf_zero[8192] = {0};
+ size_t buf_len = sizeof(buf_zero);
+
+ while (addition > 0) {
+ size_t n = MIN(addition, buf_len);
+ int ret;
+
+ ret = transaction_write(tdb, size, buf_zero, n);
+ if (ret != 0) {
+ return ret;
+ }
+
+ addition -= n;
+ size += n;
+ }
+
+ tdb->transaction->expanded = true;
+
+ return 0;
+}
+
+static const struct tdb_methods transaction_methods = {
+ transaction_read,
+ transaction_write,
+ transaction_next_hash_chain,
+ transaction_oob,
+ transaction_expand_file,
+};
+
+/*
+ * Is a transaction currently active on this context?
+ *
+ */
+_PUBLIC_ bool tdb_transaction_active(struct tdb_context *tdb)
+{
+ return (tdb->transaction != NULL);
+}
+
+/*
+ start a tdb transaction. No token is returned, as only a single
+ transaction is allowed to be pending per tdb_context
+*/
+static int _tdb_transaction_start(struct tdb_context *tdb,
+ enum tdb_lock_flags lockflags)
+{
+ /* some sanity checks */
+ if (tdb->read_only || (tdb->flags & TDB_INTERNAL)
+ || tdb->traverse_read) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction on a read-only or internal db\n"));
+ tdb->ecode = TDB_ERR_EINVAL;
+ return -1;
+ }
+
+ /* cope with nested tdb_transaction_start() calls */
+ if (tdb->transaction != NULL) {
+ if (!(tdb->flags & TDB_ALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ return -1;
+ }
+ tdb->transaction->nesting++;
+ TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_start: nesting %d\n",
+ tdb->transaction->nesting));
+ return 0;
+ }
+
+ if (tdb_have_extra_locks(tdb)) {
+ /* the caller must not have any locks when starting a
+ transaction as otherwise we'll be screwed by lack
+ of nested locks in posix */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction with locks held\n"));
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->travlocks.next != NULL) {
+ /* you cannot use transactions inside a traverse (although you can use
+ traverse inside a transaction) as otherwise you can end up with
+ deadlock */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction within a traverse\n"));
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ tdb->transaction = (struct tdb_transaction *)
+ calloc(sizeof(struct tdb_transaction), 1);
+ if (tdb->transaction == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ /* a page at a time seems like a reasonable compromise between compactness and efficiency */
+ tdb->transaction->block_size = tdb->page_size;
+
+ /* get the transaction write lock. This is a blocking lock. As
+ discussed with Volker, there are a number of ways we could
+ make this async, which we will probably do in the future */
+ if (tdb_transaction_lock(tdb, F_WRLCK, lockflags) == -1) {
+ SAFE_FREE(tdb->transaction->blocks);
+ SAFE_FREE(tdb->transaction);
+ if ((lockflags & TDB_LOCK_WAIT) == 0) {
+ tdb->ecode = TDB_ERR_NOLOCK;
+ } else {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_transaction_start: "
+ "failed to get transaction lock\n"));
+ }
+ return -1;
+ }
+
+ /* get a read lock from the freelist to the end of file. This
+ is upgraded to a write lock during the commit */
+ if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, true) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get hash locks\n"));
+ goto fail_allrecord_lock;
+ }
+
+ /* setup a copy of the hash table heads so the hash scan in
+ traverse can be fast */
+ tdb->transaction->hash_heads = (uint32_t *)
+ calloc(tdb->hash_size+1, sizeof(uint32_t));
+ if (tdb->transaction->hash_heads == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ if (tdb->methods->tdb_read(tdb, FREELIST_TOP, tdb->transaction->hash_heads,
+ TDB_HASHTABLE_SIZE(tdb), 0) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to read hash heads\n"));
+ tdb->ecode = TDB_ERR_IO;
+ goto fail;
+ }
+
+ /* make sure we know about any file expansions already done by
+ anyone else */
+ tdb_oob(tdb, tdb->map_size, 1, 1);
+ tdb->transaction->old_map_size = tdb->map_size;
+
+ /* finally hook the io methods, replacing them with
+ transaction specific methods */
+ tdb->transaction->io_methods = tdb->methods;
+ tdb->methods = &transaction_methods;
+
+ /* Trace at the end, so we get sequence number correct. */
+ tdb_trace(tdb, "tdb_transaction_start");
+ return 0;
+
+fail:
+ tdb_allrecord_unlock(tdb, F_RDLCK, false);
+fail_allrecord_lock:
+ tdb_transaction_unlock(tdb, F_WRLCK);
+ SAFE_FREE(tdb->transaction->blocks);
+ SAFE_FREE(tdb->transaction->hash_heads);
+ SAFE_FREE(tdb->transaction);
+ return -1;
+}
+
+_PUBLIC_ int tdb_transaction_start(struct tdb_context *tdb)
+{
+ return _tdb_transaction_start(tdb, TDB_LOCK_WAIT);
+}
+
+_PUBLIC_ int tdb_transaction_start_nonblock(struct tdb_context *tdb)
+{
+ return _tdb_transaction_start(tdb, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+}
+
+/*
+ sync to disk
+*/
+static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t length)
+{
+ if (tdb->flags & TDB_NOSYNC) {
+ return 0;
+ }
+
+#ifdef HAVE_FDATASYNC
+ if (fdatasync(tdb->fd) != 0) {
+#else
+ if (fsync(tdb->fd) != 0) {
+#endif
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: fsync failed\n"));
+ return -1;
+ }
+#ifdef HAVE_MMAP
+ if (tdb->map_ptr) {
+ tdb_off_t moffset = offset & ~(tdb->page_size-1);
+ if (msync(moffset + (char *)tdb->map_ptr,
+ length + (offset - moffset), MS_SYNC) != 0) {
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: msync failed - %s\n",
+ strerror(errno)));
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+
+static int _tdb_transaction_cancel(struct tdb_context *tdb)
+{
+ uint32_t i;
+ int ret = 0;
+
+ if (tdb->transaction == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_cancel: no transaction\n"));
+ return -1;
+ }
+
+ if (tdb->transaction->nesting != 0) {
+ tdb->transaction->transaction_error = 1;
+ tdb->transaction->nesting--;
+ return 0;
+ }
+
+ tdb->map_size = tdb->transaction->old_map_size;
+
+ /* free all the transaction blocks */
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ if ((tdb->transaction->blocks != NULL) &&
+ tdb->transaction->blocks[i] != NULL) {
+ free(tdb->transaction->blocks[i]);
+ }
+ }
+ SAFE_FREE(tdb->transaction->blocks);
+
+ if (tdb->transaction->magic_offset) {
+ const struct tdb_methods *methods = tdb->transaction->io_methods;
+ const uint32_t invalid = TDB_RECOVERY_INVALID_MAGIC;
+
+ /* remove the recovery marker */
+ if (methods->tdb_write(tdb, tdb->transaction->magic_offset, &invalid, 4) == -1 ||
+ transaction_sync(tdb, tdb->transaction->magic_offset, 4) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_cancel: failed to remove recovery magic\n"));
+ ret = -1;
+ }
+ }
+
+ /* This also removes the OPEN_LOCK, if we have it. */
+ tdb_release_transaction_locks(tdb);
+
+ /* restore the normal io methods */
+ tdb->methods = tdb->transaction->io_methods;
+
+ SAFE_FREE(tdb->transaction->hash_heads);
+ SAFE_FREE(tdb->transaction);
+
+ return ret;
+}
+
+/*
+ cancel the current transaction
+*/
+_PUBLIC_ int tdb_transaction_cancel(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_transaction_cancel");
+ return _tdb_transaction_cancel(tdb);
+}
+
+/*
+ work out how much space the linearised recovery data will consume
+*/
+static bool tdb_recovery_size(struct tdb_context *tdb, tdb_len_t *result)
+{
+ tdb_len_t recovery_size = 0;
+ uint32_t i;
+
+ recovery_size = sizeof(uint32_t);
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ tdb_len_t block_size;
+ if (i * tdb->transaction->block_size >= tdb->transaction->old_map_size) {
+ break;
+ }
+ if (tdb->transaction->blocks[i] == NULL) {
+ continue;
+ }
+ if (!tdb_add_len_t(recovery_size, 2*sizeof(tdb_off_t),
+ &recovery_size)) {
+ return false;
+ }
+ if (i == tdb->transaction->num_blocks-1) {
+ block_size = tdb->transaction->last_block_size;
+ } else {
+ block_size = tdb->transaction->block_size;
+ }
+ if (!tdb_add_len_t(recovery_size, block_size,
+ &recovery_size)) {
+ return false;
+ }
+ }
+
+ *result = recovery_size;
+ return true;
+}
+
+int tdb_recovery_area(struct tdb_context *tdb,
+ const struct tdb_methods *methods,
+ tdb_off_t *recovery_offset,
+ struct tdb_record *rec)
+{
+ int ret;
+
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, recovery_offset) == -1) {
+ return -1;
+ }
+
+ if (*recovery_offset == 0) {
+ rec->rec_len = 0;
+ return 0;
+ }
+
+ if (methods->tdb_read(tdb, *recovery_offset, rec, sizeof(*rec),
+ DOCONV()) == -1) {
+ return -1;
+ }
+
+ /* ignore invalid recovery regions: can happen in crash */
+ if (rec->magic != TDB_RECOVERY_MAGIC &&
+ rec->magic != TDB_RECOVERY_INVALID_MAGIC) {
+ *recovery_offset = 0;
+ rec->rec_len = 0;
+ }
+
+ ret = methods->tdb_oob(tdb, *recovery_offset, rec->rec_len, 1);
+ if (ret == -1) {
+ *recovery_offset = 0;
+ rec->rec_len = 0;
+ }
+
+ return 0;
+}
+
+/*
+ allocate the recovery area, or use an existing recovery area if it is
+ large enough
+*/
+static int tdb_recovery_allocate(struct tdb_context *tdb,
+ tdb_len_t *recovery_size,
+ tdb_off_t *recovery_offset,
+ tdb_len_t *recovery_max_size)
+{
+ struct tdb_record rec;
+ const struct tdb_methods *methods = tdb->transaction->io_methods;
+ tdb_off_t recovery_head, new_end;
+
+ if (tdb_recovery_area(tdb, methods, &recovery_head, &rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery head\n"));
+ return -1;
+ }
+
+ if (!tdb_recovery_size(tdb, recovery_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: "
+ "overflow recovery size\n"));
+ return -1;
+ }
+
+ /* Existing recovery area? */
+ if (recovery_head != 0 && *recovery_size <= rec.rec_len) {
+ /* it fits in the existing area */
+ *recovery_max_size = rec.rec_len;
+ *recovery_offset = recovery_head;
+ return 0;
+ }
+
+ /* If recovery area in middle of file, we need a new one. */
+ if (recovery_head == 0
+ || recovery_head + sizeof(rec) + rec.rec_len != tdb->map_size) {
+ /* we need to free up the old recovery area, then allocate a
+ new one at the end of the file. Note that we cannot use
+ tdb_allocate() to allocate the new one as that might return
+ us an area that is being currently used (as of the start of
+ the transaction) */
+ if (recovery_head) {
+ if (tdb_free(tdb, recovery_head, &rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_recovery_allocate: failed to"
+ " free previous recovery area\n"));
+ return -1;
+ }
+
+ /* the tdb_free() call might have increased
+ * the recovery size */
+ if (!tdb_recovery_size(tdb, recovery_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_recovery_allocate: "
+ "overflow recovery size\n"));
+ return -1;
+ }
+ }
+
+ /* New head will be at end of file. */
+ recovery_head = tdb->map_size;
+ }
+
+ /* Now we know where it will be. */
+ *recovery_offset = recovery_head;
+
+ /* Expand by more than we need, so we don't do it often. */
+ *recovery_max_size = tdb_expand_adjust(tdb->map_size,
+ *recovery_size,
+ tdb->page_size)
+ - sizeof(rec);
+
+ if (!tdb_add_off_t(recovery_head, sizeof(rec), &new_end) ||
+ !tdb_add_off_t(new_end, *recovery_max_size, &new_end)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: "
+ "overflow recovery area\n"));
+ return -1;
+ }
+
+ if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size,
+ new_end - tdb->transaction->old_map_size)
+ == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to create recovery area\n"));
+ return -1;
+ }
+
+ /* remap the file (if using mmap) */
+ methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /* we have to reset the old map size so that we don't try to expand the file
+ again in the transaction commit, which would destroy the recovery area */
+ tdb->transaction->old_map_size = tdb->map_size;
+
+ /* write the recovery header offset and sync - we can sync without a race here
+ as the magic ptr in the recovery record has not been set */
+ CONVERT(recovery_head);
+ if (methods->tdb_write(tdb, TDB_RECOVERY_HEAD,
+ &recovery_head, sizeof(tdb_off_t)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n"));
+ return -1;
+ }
+ if (transaction_write_existing(tdb, TDB_RECOVERY_HEAD, &recovery_head, sizeof(tdb_off_t)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n"));
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ setup the recovery data that will be used on a crash during commit
+*/
+static int transaction_setup_recovery(struct tdb_context *tdb,
+ tdb_off_t *magic_offset)
+{
+ tdb_len_t recovery_size;
+ unsigned char *data, *p;
+ const struct tdb_methods *methods = tdb->transaction->io_methods;
+ struct tdb_record *rec;
+ tdb_off_t recovery_offset, recovery_max_size;
+ tdb_off_t old_map_size = tdb->transaction->old_map_size;
+ uint32_t magic, tailer;
+ uint32_t i;
+
+ /*
+ check that the recovery area has enough space
+ */
+ if (tdb_recovery_allocate(tdb, &recovery_size,
+ &recovery_offset, &recovery_max_size) == -1) {
+ return -1;
+ }
+
+ rec = malloc(recovery_size + sizeof(*rec));
+ if (rec == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ memset(rec, 0, sizeof(*rec));
+
+ rec->magic = TDB_RECOVERY_INVALID_MAGIC;
+ rec->data_len = recovery_size;
+ rec->rec_len = recovery_max_size;
+ rec->key_len = old_map_size;
+ CONVERT(*rec);
+
+ data = (unsigned char *)rec;
+
+ /* build the recovery data into a single blob to allow us to do a single
+ large write, which should be more efficient */
+ p = data + sizeof(*rec);
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ tdb_off_t offset;
+ tdb_len_t length;
+
+ if (tdb->transaction->blocks[i] == NULL) {
+ continue;
+ }
+
+ offset = i * tdb->transaction->block_size;
+ length = tdb->transaction->block_size;
+ if (i == tdb->transaction->num_blocks-1) {
+ length = tdb->transaction->last_block_size;
+ }
+
+ if (offset >= old_map_size) {
+ continue;
+ }
+ if (offset + length > tdb->transaction->old_map_size) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: transaction data over new region boundary\n"));
+ free(data);
+ tdb->ecode = TDB_ERR_CORRUPT;
+ return -1;
+ }
+ memcpy(p, &offset, 4);
+ memcpy(p+4, &length, 4);
+ if (DOCONV()) {
+ tdb_convert(p, 8);
+ }
+ /* the recovery area contains the old data, not the
+ new data, so we have to call the original tdb_read
+ method to get it */
+ if (methods->tdb_read(tdb, offset, p + 8, length, 0) != 0) {
+ free(data);
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ p += 8 + length;
+ }
+
+ /* and the tailer */
+ tailer = sizeof(*rec) + recovery_max_size;
+ memcpy(p, &tailer, 4);
+ if (DOCONV()) {
+ tdb_convert(p, 4);
+ }
+
+ /* write the recovery data to the recovery area */
+ if (methods->tdb_write(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery data\n"));
+ free(data);
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ if (transaction_write_existing(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery data\n"));
+ free(data);
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* as we don't have ordered writes, we have to sync the recovery
+ data before we update the magic to indicate that the recovery
+ data is present */
+ if (transaction_sync(tdb, recovery_offset, sizeof(*rec) + recovery_size) == -1) {
+ free(data);
+ return -1;
+ }
+
+ free(data);
+
+ magic = TDB_RECOVERY_MAGIC;
+ CONVERT(magic);
+
+ *magic_offset = recovery_offset + offsetof(struct tdb_record, magic);
+
+ if (methods->tdb_write(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery magic\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ if (transaction_write_existing(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery magic\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* ensure the recovery magic marker is on disk */
+ if (transaction_sync(tdb, *magic_offset, sizeof(magic)) == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _tdb_transaction_prepare_commit(struct tdb_context *tdb)
+{
+ const struct tdb_methods *methods;
+
+ if (tdb->transaction == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: no transaction\n"));
+ return -1;
+ }
+
+ if (tdb->transaction->prepared) {
+ tdb->ecode = TDB_ERR_EINVAL;
+ _tdb_transaction_cancel(tdb);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: transaction already prepared\n"));
+ return -1;
+ }
+
+ if (tdb->transaction->transaction_error) {
+ tdb->ecode = TDB_ERR_IO;
+ _tdb_transaction_cancel(tdb);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: transaction error pending\n"));
+ return -1;
+ }
+
+
+ if (tdb->transaction->nesting != 0) {
+ return 0;
+ }
+
+ /* check for a null transaction */
+ if (tdb->transaction->blocks == NULL) {
+ return 0;
+ }
+
+ methods = tdb->transaction->io_methods;
+
+ /* if there are any locks pending then the caller has not
+ nested their locks properly, so fail the transaction */
+ if (tdb_have_extra_locks(tdb)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: locks pending on commit\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ /* upgrade the main transaction lock region to a write lock */
+ if (tdb_allrecord_upgrade(tdb) == -1) {
+ if (tdb->ecode == TDB_ERR_RDONLY && tdb->read_only) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_transaction_prepare_commit: "
+ "failed to upgrade hash locks: "
+ "database is read only\n"));
+ } else if (tdb->ecode == TDB_ERR_RDONLY
+ && tdb->traverse_read) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_transaction_prepare_commit: "
+ "failed to upgrade hash locks: "
+ "a database traverse is in progress\n"));
+ } else {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_transaction_prepare_commit: "
+ "failed to upgrade hash locks: %s\n",
+ tdb_errorstr(tdb)));
+ }
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ /* get the open lock - this prevents new users attaching to the database
+ during the commit */
+ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to get open lock\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ /* write the recovery data to the end of the file */
+ if (transaction_setup_recovery(tdb, &tdb->transaction->magic_offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_prepare_commit: failed to setup recovery data\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ tdb->transaction->prepared = true;
+
+ /* expand the file to the new size if needed */
+ if (tdb->map_size != tdb->transaction->old_map_size) {
+ if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size,
+ tdb->map_size -
+ tdb->transaction->old_map_size) == -1) {
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_prepare_commit: expansion failed\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+ tdb->map_size = tdb->transaction->old_map_size;
+ methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+ }
+
+ /* Keep the open lock until the actual commit */
+
+ return 0;
+}
+
+/*
+ prepare to commit the current transaction
+*/
+_PUBLIC_ int tdb_transaction_prepare_commit(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_transaction_prepare_commit");
+ return _tdb_transaction_prepare_commit(tdb);
+}
+
+/* A repack is worthwhile if the largest is less than half total free. */
+static bool repack_worthwhile(struct tdb_context *tdb)
+{
+ tdb_off_t ptr;
+ struct tdb_record rec;
+ tdb_len_t total = 0, largest = 0;
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &ptr) == -1) {
+ return false;
+ }
+
+ while (ptr != 0 && tdb_rec_free_read(tdb, ptr, &rec) == 0) {
+ total += rec.rec_len;
+ if (rec.rec_len > largest) {
+ largest = rec.rec_len;
+ }
+ ptr = rec.next;
+ }
+
+ return total > largest * 2;
+}
+
+/*
+ commit the current transaction
+*/
+_PUBLIC_ int tdb_transaction_commit(struct tdb_context *tdb)
+{
+ const struct tdb_methods *methods;
+ uint32_t i;
+ bool need_repack = false;
+
+ if (tdb->transaction == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n"));
+ return -1;
+ }
+
+ tdb_trace(tdb, "tdb_transaction_commit");
+
+ if (tdb->transaction->transaction_error) {
+ tdb->ecode = TDB_ERR_IO;
+ _tdb_transaction_cancel(tdb);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: transaction error pending\n"));
+ return -1;
+ }
+
+
+ if (tdb->transaction->nesting != 0) {
+ tdb->transaction->nesting--;
+ return 0;
+ }
+
+ /* check for a null transaction */
+ if (tdb->transaction->blocks == NULL) {
+ _tdb_transaction_cancel(tdb);
+ return 0;
+ }
+
+ if (!tdb->transaction->prepared) {
+ int ret = _tdb_transaction_prepare_commit(tdb);
+ if (ret)
+ return ret;
+ }
+
+ methods = tdb->transaction->io_methods;
+
+ /* perform all the writes */
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ tdb_off_t offset;
+ tdb_len_t length;
+
+ if (tdb->transaction->blocks[i] == NULL) {
+ continue;
+ }
+
+ offset = i * tdb->transaction->block_size;
+ length = tdb->transaction->block_size;
+ if (i == tdb->transaction->num_blocks-1) {
+ length = tdb->transaction->last_block_size;
+ }
+
+ if (methods->tdb_write(tdb, offset, tdb->transaction->blocks[i], length) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n"));
+
+ /* we've overwritten part of the data and
+ possibly expanded the file, so we need to
+ run the crash recovery code */
+ tdb->methods = methods;
+ tdb_transaction_recover(tdb);
+
+ _tdb_transaction_cancel(tdb);
+
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed\n"));
+ return -1;
+ }
+ SAFE_FREE(tdb->transaction->blocks[i]);
+ }
+
+ /* Do this before we drop lock or blocks. */
+ if (tdb->transaction->expanded) {
+ need_repack = repack_worthwhile(tdb);
+ }
+
+ SAFE_FREE(tdb->transaction->blocks);
+ tdb->transaction->num_blocks = 0;
+
+ /* ensure the new data is on disk */
+ if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
+ return -1;
+ }
+
+ /*
+ TODO: maybe write to some dummy hdr field, or write to magic
+ offset without mmap, before the last sync, instead of the
+ utime() call
+ */
+
+ /* on some systems (like Linux 2.6.x) changes via mmap/msync
+ don't change the mtime of the file, this means the file may
+ not be backed up (as tdb rounding to block sizes means that
+ file size changes are quite rare too). The following forces
+ mtime changes when a transaction completes */
+#ifdef HAVE_UTIME
+ utime(tdb->name, NULL);
+#endif
+
+ /* use a transaction cancel to free memory and remove the
+ transaction locks */
+ _tdb_transaction_cancel(tdb);
+
+ if (need_repack) {
+ int ret = tdb_repack(tdb);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ __location__ " Failed to repack database (not fatal)\n"));
+ }
+ /*
+ * Ignore the error.
+ *
+ * Why?
+ *
+ * We just committed to the DB above, so anything
+ * written during the transaction is committed, the
+ * caller needs to know that the long-term state was
+ * successfully modified.
+ *
+ * tdb_repack is an optimization that can fail for
+ * reasons like lock ordering and we cannot recover
+ * the transaction lock at this point, having released
+ * it above.
+ *
+ * If we return a failure the caller thinks the
+ * transaction was rolled back.
+ */
+ }
+
+ return 0;
+}
+
+
+/*
+ recover from an aborted transaction. Must be called with exclusive
+ database write access already established (including the open
+ lock to prevent new processes attaching)
+*/
+int tdb_transaction_recover(struct tdb_context *tdb)
+{
+ tdb_off_t recovery_head, recovery_eof;
+ unsigned char *data, *p;
+ uint32_t zero = 0;
+ struct tdb_record rec;
+
+ /* find the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery head\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ if (recovery_head == 0) {
+ /* we have never allocated a recovery record */
+ return 0;
+ }
+
+ /* read the recovery record */
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec,
+ sizeof(rec), DOCONV()) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery record\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ if (rec.magic != TDB_RECOVERY_MAGIC) {
+ /* there is no valid recovery data */
+ return 0;
+ }
+
+ if (tdb->read_only) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: attempt to recover read only database\n"));
+ tdb->ecode = TDB_ERR_CORRUPT;
+ return -1;
+ }
+
+ recovery_eof = rec.key_len;
+
+ data = (unsigned char *)malloc(rec.data_len);
+ if (data == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to allocate recovery data\n"));
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ /* read the full recovery data */
+ if (tdb->methods->tdb_read(tdb, recovery_head + sizeof(rec), data,
+ rec.data_len, 0) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery data\n"));
+ tdb->ecode = TDB_ERR_IO;
+ free(data);
+ return -1;
+ }
+
+ /* recover the file data */
+ p = data;
+ while (p+8 < data + rec.data_len) {
+ uint32_t ofs, len;
+ if (DOCONV()) {
+ tdb_convert(p, 8);
+ }
+ memcpy(&ofs, p, 4);
+ memcpy(&len, p+4, 4);
+
+ if (tdb->methods->tdb_write(tdb, ofs, p+8, len) == -1) {
+ free(data);
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to recover %u bytes at offset %u\n", len, ofs));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ p += 8 + len;
+ }
+
+ free(data);
+
+ if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync recovery\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* if the recovery area is after the recovered eof then remove it */
+ if (recovery_eof <= recovery_head) {
+ if (tdb_ofs_write(tdb, TDB_RECOVERY_HEAD, &zero) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery head\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ }
+
+ /* remove the recovery magic */
+ if (tdb_ofs_write(tdb, recovery_head + offsetof(struct tdb_record, magic),
+ &zero) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery magic\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ if (transaction_sync(tdb, 0, recovery_eof) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync2 recovery\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_recover: recovered %u byte database\n",
+ recovery_eof));
+
+ /* all done */
+ return 0;
+}
+
+/* Any I/O failures we say "needs recovery". */
+bool tdb_needs_recovery(struct tdb_context *tdb)
+{
+ tdb_off_t recovery_head;
+ struct tdb_record rec;
+
+ /* find the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ return true;
+ }
+
+ if (recovery_head == 0) {
+ /* we have never allocated a recovery record */
+ return false;
+ }
+
+ /* read the recovery record */
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec,
+ sizeof(rec), DOCONV()) == -1) {
+ return true;
+ }
+
+ return (rec.magic == TDB_RECOVERY_MAGIC);
+}
diff --git a/lib/tdb/common/traverse.c b/lib/tdb/common/traverse.c
new file mode 100644
index 0000000..fcd2e00
--- /dev/null
+++ b/lib/tdb/common/traverse.c
@@ -0,0 +1,510 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+#define TDB_NEXT_LOCK_ERR ((tdb_off_t)-1)
+
+/* Uses traverse lock: 0 = finish, TDB_NEXT_LOCK_ERR = error,
+ other = record offset */
+static tdb_off_t tdb_next_lock(struct tdb_context *tdb, struct tdb_traverse_lock *tlock,
+ struct tdb_record *rec)
+{
+ int want_next = (tlock->off != 0);
+
+ /* Lock each chain from the start one. */
+ for (; tlock->list < tdb->hash_size; tlock->list++) {
+ if (!tlock->off && tlock->list != 0) {
+ /* this is an optimisation for the common case where
+ the hash chain is empty, which is particularly
+ common for the use of tdb with ldb, where large
+ hashes are used. In that case we spend most of our
+ time in tdb_brlock(), locking empty hash chains.
+
+ To avoid this, we do an unlocked pre-check to see
+ if the hash chain is empty before starting to look
+ inside it. If it is empty then we can avoid that
+ hash chain. If it isn't empty then we can't believe
+ the value we get back, as we read it without a
+ lock, so instead we get the lock and re-fetch the
+ value below.
+
+ Notice that not doing this optimisation on the
+ first hash chain is critical. We must guarantee
+ that we have done at least one fcntl lock at the
+ start of a search to guarantee that memory is
+ coherent on SMP systems. If records are added by
+ others during the search then that's OK, and we
+ could possibly miss those with this trick, but we
+ could miss them anyway without this trick, so the
+ semantics don't change.
+
+ With a non-indexed ldb search this trick gains us a
+ factor of around 80 in speed on a linux 2.6.x
+ system (testing using ldbtest).
+ */
+ tdb->methods->next_hash_chain(tdb, &tlock->list);
+ if (tlock->list == tdb->hash_size) {
+ continue;
+ }
+ }
+
+ if (tdb_lock(tdb, tlock->list, tlock->lock_rw) == -1)
+ return TDB_NEXT_LOCK_ERR;
+
+ /* No previous record? Start at top of chain. */
+ if (!tlock->off) {
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(tlock->list),
+ &tlock->off) == -1)
+ goto fail;
+ } else {
+ /* Otherwise unlock the previous record. */
+ if (tdb_unlock_record(tdb, tlock->off) != 0)
+ goto fail;
+ }
+
+ if (want_next) {
+ /* We have offset of old record: grab next */
+ if (tdb_rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
+ tlock->off = rec->next;
+ }
+
+ /* Iterate through chain */
+ while( tlock->off) {
+ if (tdb_rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
+
+ /* Detect infinite loops. From "Shlomi Yaakobovich" <Shlomi@exanet.com>. */
+ if (tlock->off == rec->next) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_next_lock: loop detected.\n"));
+ goto fail;
+ }
+
+ if (!TDB_DEAD(rec)) {
+ /* Woohoo: we found one! */
+ if (tdb_lock_record(tdb, tlock->off) != 0)
+ goto fail;
+ return tlock->off;
+ }
+
+ tlock->off = rec->next;
+ }
+ tdb_unlock(tdb, tlock->list, tlock->lock_rw);
+ want_next = 0;
+ }
+ /* We finished iteration without finding anything */
+ tdb->ecode = TDB_SUCCESS;
+ return 0;
+
+ fail:
+ tlock->off = 0;
+ if (tdb_unlock(tdb, tlock->list, tlock->lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_next_lock: On error unlock failed!\n"));
+ return TDB_NEXT_LOCK_ERR;
+}
+
+/* traverse the entire database - calling fn(tdb, key, data) on each element.
+ return -1 on error or the record count traversed
+ if fn is NULL then it is not called
+ a non-zero return value from fn() indicates that the traversal should stop
+ */
+static int tdb_traverse_internal(struct tdb_context *tdb,
+ tdb_traverse_func fn, void *private_data,
+ struct tdb_traverse_lock *tl)
+{
+ TDB_DATA key, dbuf;
+ struct tdb_record rec;
+ int ret = 0, count = 0;
+ tdb_off_t off;
+ size_t recbuf_len;
+
+ recbuf_len = 4096;
+ key.dptr = malloc(recbuf_len);
+ if (key.dptr == NULL) {
+ return -1;
+ }
+
+ /* This was in the initialization, above, but the IRIX compiler
+ * did not like it. crh
+ */
+ tl->next = tdb->travlocks.next;
+
+ /* fcntl locks don't stack: beware traverse inside traverse */
+ tdb->travlocks.next = tl;
+
+ /* tdb_next_lock places locks on the record returned, and its chain */
+ while ((off = tdb_next_lock(tdb, tl, &rec)) != 0) {
+ tdb_len_t full_len;
+ int nread;
+
+ if (off == TDB_NEXT_LOCK_ERR) {
+ ret = -1;
+ goto out;
+ }
+
+ full_len = rec.key_len + rec.data_len;
+
+ if (full_len > recbuf_len) {
+ recbuf_len = full_len;
+
+ /*
+ * No realloc, we don't need the old data and thus can
+ * do without the memcpy
+ */
+ free(key.dptr);
+ key.dptr = malloc(recbuf_len);
+
+ if (key.dptr == NULL) {
+ ret = -1;
+ if (tdb_unlock(tdb, tl->list, tl->lock_rw)
+ != 0) {
+ goto out;
+ }
+ if (tdb_unlock_record(tdb, tl->off) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_traverse: malloc "
+ "failed and unlock_record "
+ "failed!\n"));
+ }
+ goto out;
+ }
+ }
+
+ count++;
+ /* now read the full record */
+ nread = tdb->methods->tdb_read(tdb, tl->off + sizeof(rec),
+ key.dptr, full_len, 0);
+ if (nread == -1) {
+ ret = -1;
+ if (tdb_unlock(tdb, tl->list, tl->lock_rw) != 0)
+ goto out;
+ if (tdb_unlock_record(tdb, tl->off) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: key.dptr == NULL and unlock_record failed!\n"));
+ goto out;
+ }
+ key.dsize = rec.key_len;
+ dbuf.dptr = key.dptr + rec.key_len;
+ dbuf.dsize = rec.data_len;
+
+ tdb_trace_1rec_retrec(tdb, "traverse", key, dbuf);
+
+ /* Drop chain lock, call out */
+ if (tdb_unlock(tdb, tl->list, tl->lock_rw) != 0) {
+ ret = -1;
+ goto out;
+ }
+ if (fn && fn(tdb, key, dbuf, private_data)) {
+ /* They want us to terminate traversal */
+ tdb_trace_ret(tdb, "tdb_traverse_end", count);
+ if (tdb_unlock_record(tdb, tl->off) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: unlock_record failed!\n"));;
+ ret = -1;
+ }
+ goto out;
+ }
+ }
+ tdb_trace(tdb, "tdb_traverse_end");
+out:
+ SAFE_FREE(key.dptr);
+ tdb->travlocks.next = tl->next;
+ if (ret < 0)
+ return -1;
+ else
+ return count;
+}
+
+
+/*
+ a read style traverse - temporarily marks each record read only
+*/
+_PUBLIC_ int tdb_traverse_read(struct tdb_context *tdb,
+ tdb_traverse_func fn, void *private_data)
+{
+ struct tdb_traverse_lock tl = { NULL, 0, 0, F_RDLCK };
+ int ret;
+
+ tdb->traverse_read++;
+ tdb_trace(tdb, "tdb_traverse_read_start");
+ ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
+ tdb->traverse_read--;
+
+ return ret;
+}
+
+/*
+ a write style traverse - needs to get the transaction lock to
+ prevent deadlocks
+
+ WARNING: The data buffer given to the callback fn does NOT meet the
+ alignment guarantees malloc gives you.
+*/
+_PUBLIC_ int tdb_traverse(struct tdb_context *tdb,
+ tdb_traverse_func fn, void *private_data)
+{
+ struct tdb_traverse_lock tl = { NULL, 0, 0, F_WRLCK };
+ enum tdb_lock_flags lock_flags;
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ return tdb_traverse_read(tdb, fn, private_data);
+ }
+
+ lock_flags = TDB_LOCK_WAIT;
+
+ if (tdb->allrecord_lock.count != 0) {
+ /*
+ * This avoids a deadlock between tdb_lockall() and
+ * tdb_traverse(). See
+ * https://bugzilla.samba.org/show_bug.cgi?id=11381
+ */
+ lock_flags = TDB_LOCK_NOWAIT;
+ }
+
+ if (tdb_transaction_lock(tdb, F_WRLCK, lock_flags)) {
+ return -1;
+ }
+
+ tdb->traverse_write++;
+ tdb_trace(tdb, "tdb_traverse_start");
+ ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
+ tdb->traverse_write--;
+
+ tdb_transaction_unlock(tdb, F_WRLCK);
+
+ return ret;
+}
+
+
+/* find the first entry in the database and return its key */
+_PUBLIC_ TDB_DATA tdb_firstkey(struct tdb_context *tdb)
+{
+ TDB_DATA key;
+ struct tdb_record rec;
+ tdb_off_t off;
+
+ /* release any old lock */
+ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0)
+ return tdb_null;
+ tdb->travlocks.off = tdb->travlocks.list = 0;
+ tdb->travlocks.lock_rw = F_RDLCK;
+
+ /* Grab first record: locks chain and returned record. */
+ off = tdb_next_lock(tdb, &tdb->travlocks, &rec);
+ if (off == 0 || off == TDB_NEXT_LOCK_ERR) {
+ tdb_trace_retrec(tdb, "tdb_firstkey", tdb_null);
+ return tdb_null;
+ }
+ /* now read the key */
+ key.dsize = rec.key_len;
+ key.dptr =tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),key.dsize);
+
+ tdb_trace_retrec(tdb, "tdb_firstkey", key);
+
+ /* Unlock the hash chain of the record we just read. */
+ if (tdb_unlock(tdb, tdb->travlocks.list, tdb->travlocks.lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_firstkey: error occurred while tdb_unlocking!\n"));
+ return key;
+}
+
+/* find the next entry in the database, returning its key */
+_PUBLIC_ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey)
+{
+ uint32_t oldlist;
+ TDB_DATA key = tdb_null;
+ struct tdb_record rec;
+ unsigned char *k = NULL;
+ tdb_off_t off;
+
+ /* Is locked key the old key? If so, traverse will be reliable. */
+ if (tdb->travlocks.off) {
+ if (tdb_lock(tdb,tdb->travlocks.list,tdb->travlocks.lock_rw))
+ return tdb_null;
+ if (tdb_rec_read(tdb, tdb->travlocks.off, &rec) == -1
+ || !(k = tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),
+ rec.key_len))
+ || memcmp(k, oldkey.dptr, oldkey.dsize) != 0) {
+ /* No, it wasn't: unlock it and start from scratch */
+ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0) {
+ tdb_trace_1rec_retrec(tdb, "tdb_nextkey",
+ oldkey, tdb_null);
+ SAFE_FREE(k);
+ return tdb_null;
+ }
+ if (tdb_unlock(tdb, tdb->travlocks.list, tdb->travlocks.lock_rw) != 0) {
+ SAFE_FREE(k);
+ return tdb_null;
+ }
+ tdb->travlocks.off = 0;
+ }
+
+ SAFE_FREE(k);
+ }
+
+ if (!tdb->travlocks.off) {
+ /* No previous element: do normal find, and lock record */
+ tdb->travlocks.off = tdb_find_lock_hash(tdb, oldkey, tdb->hash_fn(&oldkey), tdb->travlocks.lock_rw, &rec);
+ if (!tdb->travlocks.off) {
+ tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, tdb_null);
+ return tdb_null;
+ }
+ tdb->travlocks.list = BUCKET(rec.full_hash);
+ if (tdb_lock_record(tdb, tdb->travlocks.off) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: lock_record failed (%s)!\n", strerror(errno)));
+ return tdb_null;
+ }
+ }
+ oldlist = tdb->travlocks.list;
+
+ /* Grab next record: locks chain and returned record,
+ unlocks old record */
+ off = tdb_next_lock(tdb, &tdb->travlocks, &rec);
+ if (off != TDB_NEXT_LOCK_ERR && off != 0) {
+ key.dsize = rec.key_len;
+ key.dptr = tdb_alloc_read(tdb, tdb->travlocks.off+sizeof(rec),
+ key.dsize);
+ /* Unlock the chain of this new record */
+ if (tdb_unlock(tdb, tdb->travlocks.list, tdb->travlocks.lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n"));
+ }
+ /* Unlock the chain of old record */
+ if (tdb_unlock(tdb, oldlist, tdb->travlocks.lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n"));
+ tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, key);
+ return key;
+}
+
+_PUBLIC_ int tdb_traverse_chain(struct tdb_context *tdb,
+ unsigned chain,
+ tdb_traverse_func fn,
+ void *private_data)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_chainwalk_ctx chainwalk;
+ int count = 0;
+ int ret;
+
+ if (chain >= tdb->hash_size) {
+ tdb->ecode = TDB_ERR_EINVAL;
+ return -1;
+ }
+
+ if (tdb->traverse_read != 0) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ ret = tdb_lock(tdb, chain, F_RDLCK);
+ if (ret == -1) {
+ return -1;
+ }
+
+ tdb->traverse_read += 1;
+
+ ret = tdb_ofs_read(tdb, TDB_HASH_TOP(chain), &rec_ptr);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ tdb_chainwalk_init(&chainwalk, rec_ptr);
+
+ while (rec_ptr != 0) {
+ struct tdb_record rec;
+ bool ok;
+
+ ret = tdb_rec_read(tdb, rec_ptr, &rec);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ if (!TDB_DEAD(&rec)) {
+ /* no overflow checks, tdb_rec_read checked it */
+ tdb_off_t key_ofs = rec_ptr + sizeof(rec);
+ size_t full_len = rec.key_len + rec.data_len;
+ uint8_t *buf = NULL;
+
+ TDB_DATA key = { .dsize = rec.key_len };
+ TDB_DATA data = { .dsize = rec.data_len };
+
+ if ((tdb->transaction == NULL) &&
+ (tdb->map_ptr != NULL)) {
+ ret = tdb_oob(tdb, key_ofs, full_len, 0);
+ if (ret == -1) {
+ goto fail;
+ }
+ key.dptr = (uint8_t *)tdb->map_ptr + key_ofs;
+ } else {
+ buf = tdb_alloc_read(tdb, key_ofs, full_len);
+ if (buf == NULL) {
+ goto fail;
+ }
+ key.dptr = buf;
+ }
+ data.dptr = key.dptr + key.dsize;
+
+ ret = fn(tdb, key, data, private_data);
+ free(buf);
+
+ count += 1;
+
+ if (ret != 0) {
+ break;
+ }
+ }
+
+ rec_ptr = rec.next;
+
+ ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr);
+ if (!ok) {
+ goto fail;
+ }
+ }
+ tdb->traverse_read -= 1;
+ tdb_unlock(tdb, chain, F_RDLCK);
+ return count;
+
+fail:
+ tdb->traverse_read -= 1;
+ tdb_unlock(tdb, chain, F_RDLCK);
+ return -1;
+}
+
+_PUBLIC_ int tdb_traverse_key_chain(struct tdb_context *tdb,
+ TDB_DATA key,
+ tdb_traverse_func fn,
+ void *private_data)
+{
+ uint32_t hash, chain;
+ int ret;
+
+ hash = tdb->hash_fn(&key);
+ chain = BUCKET(hash);
+ ret = tdb_traverse_chain(tdb, chain, fn, private_data);
+
+ return ret;
+}