summaryrefslogtreecommitdiffstats
path: root/source3/locking
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 17:47:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 17:47:29 +0000
commit4f5791ebd03eaec1c7da0865a383175b05102712 (patch)
tree8ce7b00f7a76baa386372422adebbe64510812d4 /source3/locking
parentInitial commit. (diff)
downloadsamba-4f5791ebd03eaec1c7da0865a383175b05102712.tar.xz
samba-4f5791ebd03eaec1c7da0865a383175b05102712.zip
Adding upstream version 2:4.17.12+dfsg.upstream/2%4.17.12+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'source3/locking')
-rw-r--r--source3/locking/brlock.c2019
-rw-r--r--source3/locking/leases_db.c726
-rw-r--r--source3/locking/leases_db.h80
-rw-r--r--source3/locking/leases_util.c77
-rw-r--r--source3/locking/locking.c1186
-rw-r--r--source3/locking/posix.c1374
-rw-r--r--source3/locking/proto.h215
-rw-r--r--source3/locking/share_mode_lock.c2517
-rw-r--r--source3/locking/share_mode_lock.h140
-rw-r--r--source3/locking/share_mode_lock_private.h24
10 files changed, 8358 insertions, 0 deletions
diff --git a/source3/locking/brlock.c b/source3/locking/brlock.c
new file mode 100644
index 0000000..68ade6b
--- /dev/null
+++ b/source3/locking/brlock.c
@@ -0,0 +1,2019 @@
+/*
+ Unix SMB/CIFS implementation.
+ byte range locking code
+ Updated to handle range splits/merges.
+
+ Copyright (C) Andrew Tridgell 1992-2000
+ Copyright (C) Jeremy Allison 1992-2000
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* This module implements a tdb based byte range locking service,
+ replacing the fcntl() based byte range locking previously
+ used. This allows us to provide the same semantics as NT */
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "lib/util/server_id.h"
+#include "locking/proto.h"
+#include "smbd/globals.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_open.h"
+#include "serverid.h"
+#include "messages.h"
+#include "util_tdb.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_LOCKING
+
+#define ZERO_ZERO 0
+
+/* The open brlock.tdb database. */
+
+static struct db_context *brlock_db;
+
+struct byte_range_lock {
+ struct files_struct *fsp;
+ TALLOC_CTX *req_mem_ctx;
+ const struct GUID *req_guid;
+ unsigned int num_locks;
+ bool modified;
+ struct lock_struct *lock_data;
+ struct db_record *record;
+};
+
+/****************************************************************************
+ Debug info at level 10 for lock struct.
+****************************************************************************/
+
+static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
+{
+ struct server_id_buf tmp;
+
+ DBG_DEBUG("[%u]: smblctx = %"PRIu64", tid = %"PRIu32", pid = %s, "
+ "start = %"PRIu64", size = %"PRIu64", fnum = %"PRIu64", "
+ "%s %s\n",
+ i,
+ pls->context.smblctx,
+ pls->context.tid,
+ server_id_str_buf(pls->context.pid, &tmp),
+ pls->start,
+ pls->size,
+ pls->fnum,
+ lock_type_name(pls->lock_type),
+ lock_flav_name(pls->lock_flav));
+}
+
+unsigned int brl_num_locks(const struct byte_range_lock *brl)
+{
+ return brl->num_locks;
+}
+
+struct files_struct *brl_fsp(struct byte_range_lock *brl)
+{
+ return brl->fsp;
+}
+
+TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl)
+{
+ if (brl->req_mem_ctx == NULL) {
+ return talloc_get_type_abort(brl, struct byte_range_lock);
+ }
+
+ return brl->req_mem_ctx;
+}
+
+const struct GUID *brl_req_guid(const struct byte_range_lock *brl)
+{
+ if (brl->req_guid == NULL) {
+ static const struct GUID brl_zero_req_guid;
+ return &brl_zero_req_guid;
+ }
+
+ return brl->req_guid;
+}
+
+/****************************************************************************
+ See if two locking contexts are equal.
+****************************************************************************/
+
+static bool brl_same_context(const struct lock_context *ctx1,
+ const struct lock_context *ctx2)
+{
+ return (server_id_equal(&ctx1->pid, &ctx2->pid) &&
+ (ctx1->smblctx == ctx2->smblctx) &&
+ (ctx1->tid == ctx2->tid));
+}
+
+bool byte_range_valid(uint64_t ofs, uint64_t len)
+{
+ uint64_t max_len = UINT64_MAX - ofs;
+ uint64_t effective_len;
+
+ /*
+ * [MS-FSA] specifies this:
+ *
+ * If (((FileOffset + Length - 1) < FileOffset) && Length != 0) {
+ * return STATUS_INVALID_LOCK_RANGE
+ * }
+ *
+ * We avoid integer wrapping and calculate
+ * max and effective len instead.
+ */
+
+ if (len == 0) {
+ return true;
+ }
+
+ effective_len = len - 1;
+ if (effective_len <= max_len) {
+ return true;
+ }
+
+ return false;
+}
+
+bool byte_range_overlap(uint64_t ofs1,
+ uint64_t len1,
+ uint64_t ofs2,
+ uint64_t len2)
+{
+ uint64_t last1;
+ uint64_t last2;
+ bool valid;
+
+ /*
+ * This is based on [MS-FSA] 2.1.4.10
+ * Algorithm for Determining If a Range Access
+ * Conflicts with Byte-Range Locks
+ */
+
+ /*
+ * The {0, 0} range doesn't conflict with any byte-range lock
+ */
+ if (ofs1 == 0 && len1 == 0) {
+ return false;
+ }
+ if (ofs2 == 0 && len2 == 0) {
+ return false;
+ }
+
+ /*
+ * The caller should have checked that the ranges are
+ * valid. But currently we gracefully handle
+ * the overflow of a read/write check.
+ */
+ valid = byte_range_valid(ofs1, len1);
+ if (valid) {
+ last1 = ofs1 + len1 - 1;
+ } else {
+ last1 = UINT64_MAX;
+ }
+ valid = byte_range_valid(ofs2, len2);
+ if (valid) {
+ last2 = ofs2 + len2 - 1;
+ } else {
+ last2 = UINT64_MAX;
+ }
+
+ /*
+ * If one range starts after the last
+ * byte of the other range there's
+ * no conflict.
+ */
+ if (ofs1 > last2) {
+ return false;
+ }
+ if (ofs2 > last1) {
+ return false;
+ }
+
+ return true;
+}
+
+/****************************************************************************
+ See if lck1 and lck2 overlap.
+****************************************************************************/
+
+static bool brl_overlap(const struct lock_struct *lck1,
+ const struct lock_struct *lck2)
+{
+ return byte_range_overlap(lck1->start,
+ lck1->size,
+ lck2->start,
+ lck2->size);
+}
+
+/****************************************************************************
+ See if lock2 can be added when lock1 is in place.
+****************************************************************************/
+
+static bool brl_conflict(const struct lock_struct *lck1,
+ const struct lock_struct *lck2)
+{
+ /* Read locks never conflict. */
+ if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
+ return False;
+ }
+
+ /* A READ lock can stack on top of a WRITE lock if they have the same
+ * context & fnum. */
+ if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
+ brl_same_context(&lck1->context, &lck2->context) &&
+ lck1->fnum == lck2->fnum) {
+ return False;
+ }
+
+ return brl_overlap(lck1, lck2);
+}
+
+/****************************************************************************
+ See if lock2 can be added when lock1 is in place - when both locks are POSIX
+ flavour. POSIX locks ignore fnum - they only care about dev/ino which we
+ know already match.
+****************************************************************************/
+
+static bool brl_conflict_posix(const struct lock_struct *lck1,
+ const struct lock_struct *lck2)
+{
+#if defined(DEVELOPER)
+ SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
+ SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
+#endif
+
+ /* Read locks never conflict. */
+ if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
+ return False;
+ }
+
+ /* Locks on the same context don't conflict. Ignore fnum. */
+ if (brl_same_context(&lck1->context, &lck2->context)) {
+ return False;
+ }
+
+ /* One is read, the other write, or the context is different,
+ do they overlap ? */
+ return brl_overlap(lck1, lck2);
+}
+
+#if ZERO_ZERO
+static bool brl_conflict1(const struct lock_struct *lck1,
+ const struct lock_struct *lck2)
+{
+ if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
+ return False;
+ }
+
+ if (brl_same_context(&lck1->context, &lck2->context) &&
+ lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
+ return False;
+ }
+
+ if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
+ return True;
+ }
+
+ if (lck1->start >= (lck2->start + lck2->size) ||
+ lck2->start >= (lck1->start + lck1->size)) {
+ return False;
+ }
+
+ return True;
+}
+#endif
+
+/****************************************************************************
+ Check to see if this lock conflicts, but ignore our own locks on the
+ same fnum only. This is the read/write lock check code path.
+ This is never used in the POSIX lock case.
+****************************************************************************/
+
+static bool brl_conflict_other(const struct lock_struct *lock,
+ const struct lock_struct *rw_probe)
+{
+ if (lock->lock_type == READ_LOCK && rw_probe->lock_type == READ_LOCK) {
+ return False;
+ }
+
+ if (lock->lock_flav == POSIX_LOCK &&
+ rw_probe->lock_flav == POSIX_LOCK) {
+ /*
+ * POSIX flavour locks never conflict here - this is only called
+ * in the read/write path.
+ */
+ return False;
+ }
+
+ if (!brl_overlap(lock, rw_probe)) {
+ /*
+ * I/O can only conflict when overlapping a lock, thus let it
+ * pass
+ */
+ return false;
+ }
+
+ if (!brl_same_context(&lock->context, &rw_probe->context)) {
+ /*
+ * Different process, conflict
+ */
+ return true;
+ }
+
+ if (lock->fnum != rw_probe->fnum) {
+ /*
+ * Different file handle, conflict
+ */
+ return true;
+ }
+
+ if ((lock->lock_type == READ_LOCK) &&
+ (rw_probe->lock_type == WRITE_LOCK)) {
+ /*
+ * Incoming WRITE locks conflict with existing READ locks even
+ * if the context is the same. JRA. See LOCKTEST7 in
+ * smbtorture.
+ */
+ return true;
+ }
+
+ /*
+ * I/O request compatible with existing lock, let it pass without
+ * conflict
+ */
+
+ return false;
+}
+
+/****************************************************************************
+ Open up the brlock.tdb database.
+****************************************************************************/
+
+void brl_init(bool read_only)
+{
+ int tdb_flags;
+ char *db_path;
+
+ if (brlock_db) {
+ return;
+ }
+
+ tdb_flags =
+ TDB_DEFAULT|
+ TDB_VOLATILE|
+ TDB_CLEAR_IF_FIRST|
+ TDB_INCOMPATIBLE_HASH|
+ TDB_SEQNUM;
+
+ db_path = lock_path(talloc_tos(), "brlock.tdb");
+ if (db_path == NULL) {
+ DEBUG(0, ("out of memory!\n"));
+ return;
+ }
+
+ brlock_db = db_open(NULL, db_path,
+ SMB_OPEN_DATABASE_TDB_HASH_SIZE, tdb_flags,
+ read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
+ DBWRAP_LOCK_ORDER_2, DBWRAP_FLAG_NONE);
+ if (!brlock_db) {
+ DEBUG(0,("Failed to open byte range locking database %s\n",
+ db_path));
+ TALLOC_FREE(db_path);
+ return;
+ }
+ TALLOC_FREE(db_path);
+}
+
+/****************************************************************************
+ Close down the brlock.tdb database.
+****************************************************************************/
+
+void brl_shutdown(void)
+{
+ TALLOC_FREE(brlock_db);
+}
+
+#if ZERO_ZERO
+/****************************************************************************
+ Compare two locks for sorting.
+****************************************************************************/
+
+static int lock_compare(const struct lock_struct *lck1,
+ const struct lock_struct *lck2)
+{
+ if (lck1->start != lck2->start) {
+ return (lck1->start - lck2->start);
+ }
+ if (lck2->size != lck1->size) {
+ return ((int)lck1->size - (int)lck2->size);
+ }
+ return 0;
+}
+#endif
+
+/****************************************************************************
+ Lock a range of bytes - Windows lock semantics.
+****************************************************************************/
+
+NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
+ struct lock_struct *plock)
+{
+ unsigned int i;
+ files_struct *fsp = br_lck->fsp;
+ struct lock_struct *locks = br_lck->lock_data;
+ NTSTATUS status;
+ bool valid;
+
+ SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
+
+ valid = byte_range_valid(plock->start, plock->size);
+ if (!valid) {
+ return NT_STATUS_INVALID_LOCK_RANGE;
+ }
+
+ for (i=0; i < br_lck->num_locks; i++) {
+ /* Do any Windows or POSIX locks conflict ? */
+ if (brl_conflict(&locks[i], plock)) {
+ if (!serverid_exists(&locks[i].context.pid)) {
+ locks[i].context.pid.pid = 0;
+ br_lck->modified = true;
+ continue;
+ }
+ /* Remember who blocked us. */
+ plock->context.smblctx = locks[i].context.smblctx;
+ return NT_STATUS_LOCK_NOT_GRANTED;
+ }
+#if ZERO_ZERO
+ if (plock->start == 0 && plock->size == 0 &&
+ locks[i].size == 0) {
+ break;
+ }
+#endif
+ }
+
+ contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
+
+ /* We can get the Windows lock, now see if it needs to
+ be mapped into a lower level POSIX one, and if so can
+ we get it ? */
+
+ if (lp_posix_locking(fsp->conn->params)) {
+ int errno_ret;
+ if (!set_posix_lock_windows_flavour(fsp,
+ plock->start,
+ plock->size,
+ plock->lock_type,
+ &plock->context,
+ locks,
+ br_lck->num_locks,
+ &errno_ret)) {
+
+ /* We don't know who blocked us. */
+ plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
+
+ if (errno_ret == EACCES || errno_ret == EAGAIN) {
+ status = NT_STATUS_LOCK_NOT_GRANTED;
+ goto fail;
+ } else {
+ status = map_nt_error_from_unix(errno);
+ goto fail;
+ }
+ }
+ }
+
+ /* no conflicts - add it to the list of locks */
+ locks = talloc_realloc(br_lck, locks, struct lock_struct,
+ (br_lck->num_locks + 1));
+ if (!locks) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+
+ memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
+ br_lck->num_locks += 1;
+ br_lck->lock_data = locks;
+ br_lck->modified = True;
+
+ return NT_STATUS_OK;
+ fail:
+ contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
+ return status;
+}
+
+/****************************************************************************
+ Cope with POSIX range splits and merges.
+****************************************************************************/
+
+static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
+ struct lock_struct *ex, /* existing lock. */
+ struct lock_struct *plock) /* proposed lock. */
+{
+ bool lock_types_differ = (ex->lock_type != plock->lock_type);
+
+ /* We can't merge non-conflicting locks on different context - ignore fnum. */
+
+ if (!brl_same_context(&ex->context, &plock->context)) {
+ /* Just copy. */
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ return 1;
+ }
+
+ /* We now know we have the same context. */
+
+ /* Did we overlap ? */
+
+/*********************************************
+ +---------+
+ | ex |
+ +---------+
+ +-------+
+ | plock |
+ +-------+
+OR....
+ +---------+
+ | ex |
+ +---------+
+**********************************************/
+
+ if ( (ex->start > (plock->start + plock->size)) ||
+ (plock->start > (ex->start + ex->size))) {
+
+ /* No overlap with this lock - copy existing. */
+
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ return 1;
+ }
+
+/*********************************************
+ +---------------------------+
+ | ex |
+ +---------------------------+
+ +---------------------------+
+ | plock | -> replace with plock.
+ +---------------------------+
+OR
+ +---------------+
+ | ex |
+ +---------------+
+ +---------------------------+
+ | plock | -> replace with plock.
+ +---------------------------+
+
+**********************************************/
+
+ if ( (ex->start >= plock->start) &&
+ (ex->start + ex->size <= plock->start + plock->size) ) {
+
+ /* Replace - discard existing lock. */
+
+ return 0;
+ }
+
+/*********************************************
+Adjacent after.
+ +-------+
+ | ex |
+ +-------+
+ +---------------+
+ | plock |
+ +---------------+
+
+BECOMES....
+ +---------------+-------+
+ | plock | ex | - different lock types.
+ +---------------+-------+
+OR.... (merge)
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
+**********************************************/
+
+ if (plock->start + plock->size == ex->start) {
+
+ /* If the lock types are the same, we merge, if different, we
+ add the remainder of the old lock. */
+
+ if (lock_types_differ) {
+ /* Add existing. */
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. */
+ plock->size += ex->size;
+ return 0;
+ }
+ }
+
+/*********************************************
+Adjacent before.
+ +-------+
+ | ex |
+ +-------+
+ +---------------+
+ | plock |
+ +---------------+
+BECOMES....
+ +-------+---------------+
+ | ex | plock | - different lock types
+ +-------+---------------+
+
+OR.... (merge)
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
+
+**********************************************/
+
+ if (ex->start + ex->size == plock->start) {
+
+ /* If the lock types are the same, we merge, if different, we
+ add the existing lock. */
+
+ if (lock_types_differ) {
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. */
+ plock->start = ex->start;
+ plock->size += ex->size;
+ return 0;
+ }
+ }
+
+/*********************************************
+Overlap after.
+ +-----------------------+
+ | ex |
+ +-----------------------+
+ +---------------+
+ | plock |
+ +---------------+
+OR
+ +----------------+
+ | ex |
+ +----------------+
+ +---------------+
+ | plock |
+ +---------------+
+
+BECOMES....
+ +---------------+-------+
+ | plock | ex | - different lock types.
+ +---------------+-------+
+OR.... (merge)
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
+**********************************************/
+
+ if ( (ex->start >= plock->start) &&
+ (ex->start <= plock->start + plock->size) &&
+ (ex->start + ex->size > plock->start + plock->size) ) {
+
+ /* If the lock types are the same, we merge, if different, we
+ add the remainder of the old lock. */
+
+ if (lock_types_differ) {
+ /* Add remaining existing. */
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ /* Adjust existing start and size. */
+ lck_arr[0].start = plock->start + plock->size;
+ lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
+ return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. */
+ plock->size += (ex->start + ex->size) - (plock->start + plock->size);
+ return 0;
+ }
+ }
+
+/*********************************************
+Overlap before.
+ +-----------------------+
+ | ex |
+ +-----------------------+
+ +---------------+
+ | plock |
+ +---------------+
+OR
+ +-------------+
+ | ex |
+ +-------------+
+ +---------------+
+ | plock |
+ +---------------+
+
+BECOMES....
+ +-------+---------------+
+ | ex | plock | - different lock types
+ +-------+---------------+
+
+OR.... (merge)
+ +-----------------------+
+ | plock | - same lock type.
+ +-----------------------+
+
+**********************************************/
+
+ if ( (ex->start < plock->start) &&
+ (ex->start + ex->size >= plock->start) &&
+ (ex->start + ex->size <= plock->start + plock->size) ) {
+
+ /* If the lock types are the same, we merge, if different, we
+ add the truncated old lock. */
+
+ if (lock_types_differ) {
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ /* Adjust existing size. */
+ lck_arr[0].size = plock->start - ex->start;
+ return 1;
+ } else {
+ /* Merge - adjust incoming lock as we may have more
+ * merging to come. MUST ADJUST plock SIZE FIRST ! */
+ plock->size += (plock->start - ex->start);
+ plock->start = ex->start;
+ return 0;
+ }
+ }
+
+/*********************************************
+Complete overlap.
+ +---------------------------+
+ | ex |
+ +---------------------------+
+ +---------+
+ | plock |
+ +---------+
+BECOMES.....
+ +-------+---------+---------+
+ | ex | plock | ex | - different lock types.
+ +-------+---------+---------+
+OR
+ +---------------------------+
+ | plock | - same lock type.
+ +---------------------------+
+**********************************************/
+
+ if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
+
+ if (lock_types_differ) {
+
+ /* We have to split ex into two locks here. */
+
+ memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
+ memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
+
+ /* Adjust first existing size. */
+ lck_arr[0].size = plock->start - ex->start;
+
+ /* Adjust second existing start and size. */
+ lck_arr[1].start = plock->start + plock->size;
+ lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
+ return 2;
+ } else {
+ /* Just eat the existing locks, merge them into plock. */
+ plock->start = ex->start;
+ plock->size = ex->size;
+ return 0;
+ }
+ }
+
+ /* Never get here. */
+ smb_panic("brlock_posix_split_merge");
+ /* Notreached. */
+
+ /* Keep some compilers happy. */
+ return 0;
+}
+
+/****************************************************************************
+ Lock a range of bytes - POSIX lock semantics.
+ We must cope with range splits and merges.
+****************************************************************************/
+
+static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
+ struct lock_struct *plock)
+{
+ unsigned int i, count, posix_count;
+ struct lock_struct *locks = br_lck->lock_data;
+ struct lock_struct *tp;
+ bool break_oplocks = false;
+ NTSTATUS status;
+
+ /* No zero-zero locks for POSIX. */
+ if (plock->start == 0 && plock->size == 0) {
+ return NT_STATUS_INVALID_PARAMETER;
+ }
+
+ /* Don't allow 64-bit lock wrap. */
+ if (plock->start + plock->size - 1 < plock->start) {
+ return NT_STATUS_INVALID_PARAMETER;
+ }
+
+ /* The worst case scenario here is we have to split an
+ existing POSIX lock range into two, and add our lock,
+ so we need at most 2 more entries. */
+
+ tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2);
+ if (!tp) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ count = posix_count = 0;
+
+ for (i=0; i < br_lck->num_locks; i++) {
+ struct lock_struct *curr_lock = &locks[i];
+
+ if (curr_lock->lock_flav == WINDOWS_LOCK) {
+ /* Do any Windows flavour locks conflict ? */
+ if (brl_conflict(curr_lock, plock)) {
+ if (!serverid_exists(&curr_lock->context.pid)) {
+ curr_lock->context.pid.pid = 0;
+ br_lck->modified = true;
+ continue;
+ }
+ /* No games with error messages. */
+ TALLOC_FREE(tp);
+ /* Remember who blocked us. */
+ plock->context.smblctx = curr_lock->context.smblctx;
+ return NT_STATUS_LOCK_NOT_GRANTED;
+ }
+ /* Just copy the Windows lock into the new array. */
+ memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
+ count++;
+ } else {
+ unsigned int tmp_count = 0;
+
+ /* POSIX conflict semantics are different. */
+ if (brl_conflict_posix(curr_lock, plock)) {
+ if (!serverid_exists(&curr_lock->context.pid)) {
+ curr_lock->context.pid.pid = 0;
+ br_lck->modified = true;
+ continue;
+ }
+ /* Can't block ourselves with POSIX locks. */
+ /* No games with error messages. */
+ TALLOC_FREE(tp);
+ /* Remember who blocked us. */
+ plock->context.smblctx = curr_lock->context.smblctx;
+ return NT_STATUS_LOCK_NOT_GRANTED;
+ }
+
+ /* Work out overlaps. */
+ tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
+ posix_count += tmp_count;
+ count += tmp_count;
+ }
+ }
+
+ /*
+ * Break oplocks while we hold a brl. Since lock() and unlock() calls
+ * are not symetric with POSIX semantics, we cannot guarantee our
+ * contend_level2_oplocks_begin/end calls will be acquired and
+ * released one-for-one as with Windows semantics. Therefore we only
+ * call contend_level2_oplocks_begin if this is the first POSIX brl on
+ * the file.
+ */
+ break_oplocks = (posix_count == 0);
+ if (break_oplocks) {
+ contend_level2_oplocks_begin(br_lck->fsp,
+ LEVEL2_CONTEND_POSIX_BRL);
+ }
+
+ /* Try and add the lock in order, sorted by lock start. */
+ for (i=0; i < count; i++) {
+ struct lock_struct *curr_lock = &tp[i];
+
+ if (curr_lock->start <= plock->start) {
+ continue;
+ }
+ }
+
+ if (i < count) {
+ memmove(&tp[i+1], &tp[i],
+ (count - i)*sizeof(struct lock_struct));
+ }
+ memcpy(&tp[i], plock, sizeof(struct lock_struct));
+ count++;
+
+ /* We can get the POSIX lock, now see if it needs to
+ be mapped into a lower level POSIX one, and if so can
+ we get it ? */
+
+ if (lp_posix_locking(br_lck->fsp->conn->params)) {
+ int errno_ret;
+
+ /* The lower layer just needs to attempt to
+ get the system POSIX lock. We've weeded out
+ any conflicts above. */
+
+ if (!set_posix_lock_posix_flavour(br_lck->fsp,
+ plock->start,
+ plock->size,
+ plock->lock_type,
+ &plock->context,
+ &errno_ret)) {
+
+ /* We don't know who blocked us. */
+ plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
+
+ if (errno_ret == EACCES || errno_ret == EAGAIN) {
+ TALLOC_FREE(tp);
+ status = NT_STATUS_LOCK_NOT_GRANTED;
+ goto fail;
+ } else {
+ TALLOC_FREE(tp);
+ status = map_nt_error_from_unix(errno);
+ goto fail;
+ }
+ }
+ }
+
+ /* If we didn't use all the allocated size,
+ * Realloc so we don't leak entries per lock call. */
+ if (count < br_lck->num_locks + 2) {
+ tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
+ if (!tp) {
+ status = NT_STATUS_NO_MEMORY;
+ goto fail;
+ }
+ }
+
+ br_lck->num_locks = count;
+ TALLOC_FREE(br_lck->lock_data);
+ br_lck->lock_data = tp;
+ locks = tp;
+ br_lck->modified = True;
+
+ /* A successful downgrade from write to read lock can trigger a lock
+ re-evalutation where waiting readers can now proceed. */
+
+ return NT_STATUS_OK;
+ fail:
+ if (break_oplocks) {
+ contend_level2_oplocks_end(br_lck->fsp,
+ LEVEL2_CONTEND_POSIX_BRL);
+ }
+ return status;
+}
+
+NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
+ struct byte_range_lock *br_lck,
+ struct lock_struct *plock)
+{
+ VFS_FIND(brl_lock_windows);
+ return handle->fns->brl_lock_windows_fn(handle, br_lck, plock);
+}
+
+/****************************************************************************
+ Lock a range of bytes.
+****************************************************************************/
+
+NTSTATUS brl_lock(
+ struct byte_range_lock *br_lck,
+ uint64_t smblctx,
+ struct server_id pid,
+ br_off start,
+ br_off size,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ struct server_id *blocker_pid,
+ uint64_t *psmblctx)
+{
+ NTSTATUS ret;
+ struct lock_struct lock;
+
+ ZERO_STRUCT(lock);
+
+#if !ZERO_ZERO
+ if (start == 0 && size == 0) {
+ DEBUG(0,("client sent 0/0 lock - please report this\n"));
+ }
+#endif
+
+ lock = (struct lock_struct) {
+ .context.smblctx = smblctx,
+ .context.pid = pid,
+ .context.tid = br_lck->fsp->conn->cnum,
+ .start = start,
+ .size = size,
+ .fnum = br_lck->fsp->fnum,
+ .lock_type = lock_type,
+ .lock_flav = lock_flav
+ };
+
+ if (lock_flav == WINDOWS_LOCK) {
+ ret = SMB_VFS_BRL_LOCK_WINDOWS(
+ br_lck->fsp->conn, br_lck, &lock);
+ } else {
+ ret = brl_lock_posix(br_lck, &lock);
+ }
+
+#if ZERO_ZERO
+ /* sort the lock list */
+ TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
+#endif
+ /* If we're returning an error, return who blocked us. */
+ if (!NT_STATUS_IS_OK(ret) && psmblctx) {
+ *blocker_pid = lock.context.pid;
+ *psmblctx = lock.context.smblctx;
+ }
+ return ret;
+}
+
+/****************************************************************************
+ Unlock a range of bytes - Windows semantics.
+****************************************************************************/
+
+bool brl_unlock_windows_default(struct byte_range_lock *br_lck,
+ const struct lock_struct *plock)
+{
+ unsigned int i;
+ struct lock_struct *locks = br_lck->lock_data;
+ enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
+
+ SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
+
+#if ZERO_ZERO
+ /* Delete write locks by preference... The lock list
+ is sorted in the zero zero case. */
+
+ for (i = 0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &locks[i];
+
+ if (lock->lock_type == WRITE_LOCK &&
+ brl_same_context(&lock->context, &plock->context) &&
+ lock->fnum == plock->fnum &&
+ lock->lock_flav == WINDOWS_LOCK &&
+ lock->start == plock->start &&
+ lock->size == plock->size) {
+
+ /* found it - delete it */
+ deleted_lock_type = lock->lock_type;
+ break;
+ }
+ }
+
+ if (i != br_lck->num_locks) {
+ /* We found it - don't search again. */
+ goto unlock_continue;
+ }
+#endif
+
+ for (i = 0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &locks[i];
+
+ /* Only remove our own locks that match in start, size, and flavour. */
+ if (brl_same_context(&lock->context, &plock->context) &&
+ lock->fnum == plock->fnum &&
+ lock->lock_flav == WINDOWS_LOCK &&
+ lock->start == plock->start &&
+ lock->size == plock->size ) {
+ deleted_lock_type = lock->lock_type;
+ break;
+ }
+ }
+
+ if (i == br_lck->num_locks) {
+ /* we didn't find it */
+ return False;
+ }
+
+#if ZERO_ZERO
+ unlock_continue:
+#endif
+
+ ARRAY_DEL_ELEMENT(locks, i, br_lck->num_locks);
+ br_lck->num_locks -= 1;
+ br_lck->modified = True;
+
+ /* Unlock the underlying POSIX regions. */
+ if(lp_posix_locking(br_lck->fsp->conn->params)) {
+ release_posix_lock_windows_flavour(br_lck->fsp,
+ plock->start,
+ plock->size,
+ deleted_lock_type,
+ &plock->context,
+ locks,
+ br_lck->num_locks);
+ }
+
+ contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
+ return True;
+}
+
+/****************************************************************************
+ Unlock a range of bytes - POSIX semantics.
+****************************************************************************/
+
+static bool brl_unlock_posix(struct byte_range_lock *br_lck,
+ struct lock_struct *plock)
+{
+ unsigned int i, count;
+ struct lock_struct *tp;
+ struct lock_struct *locks = br_lck->lock_data;
+ bool overlap_found = False;
+
+ /* No zero-zero locks for POSIX. */
+ if (plock->start == 0 && plock->size == 0) {
+ return False;
+ }
+
+ /* Don't allow 64-bit lock wrap. */
+ if (plock->start + plock->size < plock->start ||
+ plock->start + plock->size < plock->size) {
+ DEBUG(10,("brl_unlock_posix: lock wrap\n"));
+ return False;
+ }
+
+ /* The worst case scenario here is we have to split an
+ existing POSIX lock range into two, so we need at most
+ 1 more entry. */
+
+ tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1);
+ if (!tp) {
+ DEBUG(10,("brl_unlock_posix: malloc fail\n"));
+ return False;
+ }
+
+ count = 0;
+ for (i = 0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &locks[i];
+ unsigned int tmp_count;
+
+ /* Only remove our own locks - ignore fnum. */
+ if (!brl_same_context(&lock->context, &plock->context)) {
+ memcpy(&tp[count], lock, sizeof(struct lock_struct));
+ count++;
+ continue;
+ }
+
+ if (lock->lock_flav == WINDOWS_LOCK) {
+ /* Do any Windows flavour locks conflict ? */
+ if (brl_conflict(lock, plock)) {
+ TALLOC_FREE(tp);
+ return false;
+ }
+ /* Just copy the Windows lock into the new array. */
+ memcpy(&tp[count], lock, sizeof(struct lock_struct));
+ count++;
+ continue;
+ }
+
+ /* Work out overlaps. */
+ tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
+
+ if (tmp_count == 0) {
+ /* plock overlapped the existing lock completely,
+ or replaced it. Don't copy the existing lock. */
+ overlap_found = true;
+ } else if (tmp_count == 1) {
+ /* Either no overlap, (simple copy of existing lock) or
+ * an overlap of an existing lock. */
+ /* If the lock changed size, we had an overlap. */
+ if (tp[count].size != lock->size) {
+ overlap_found = true;
+ }
+ count += tmp_count;
+ } else if (tmp_count == 2) {
+ /* We split a lock range in two. */
+ overlap_found = true;
+ count += tmp_count;
+
+ /* Optimisation... */
+ /* We know we're finished here as we can't overlap any
+ more POSIX locks. Copy the rest of the lock array. */
+
+ if (i < br_lck->num_locks - 1) {
+ memcpy(&tp[count], &locks[i+1],
+ sizeof(*locks)*((br_lck->num_locks-1) - i));
+ count += ((br_lck->num_locks-1) - i);
+ }
+ break;
+ }
+
+ }
+
+ if (!overlap_found) {
+ /* Just ignore - no change. */
+ TALLOC_FREE(tp);
+ DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
+ return True;
+ }
+
+ /* Unlock any POSIX regions. */
+ if(lp_posix_locking(br_lck->fsp->conn->params)) {
+ release_posix_lock_posix_flavour(br_lck->fsp,
+ plock->start,
+ plock->size,
+ &plock->context,
+ tp,
+ count);
+ }
+
+ /* Realloc so we don't leak entries per unlock call. */
+ if (count) {
+ tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
+ if (!tp) {
+ DEBUG(10,("brl_unlock_posix: realloc fail\n"));
+ return False;
+ }
+ } else {
+ /* We deleted the last lock. */
+ TALLOC_FREE(tp);
+ tp = NULL;
+ }
+
+ contend_level2_oplocks_end(br_lck->fsp,
+ LEVEL2_CONTEND_POSIX_BRL);
+
+ br_lck->num_locks = count;
+ TALLOC_FREE(br_lck->lock_data);
+ locks = tp;
+ br_lck->lock_data = tp;
+ br_lck->modified = True;
+
+ return True;
+}
+
+bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
+ struct byte_range_lock *br_lck,
+ const struct lock_struct *plock)
+{
+ VFS_FIND(brl_unlock_windows);
+ return handle->fns->brl_unlock_windows_fn(handle, br_lck, plock);
+}
+
+/****************************************************************************
+ Unlock a range of bytes.
+****************************************************************************/
+
+bool brl_unlock(struct byte_range_lock *br_lck,
+ uint64_t smblctx,
+ struct server_id pid,
+ br_off start,
+ br_off size,
+ enum brl_flavour lock_flav)
+{
+ struct lock_struct lock;
+
+ lock.context.smblctx = smblctx;
+ lock.context.pid = pid;
+ lock.context.tid = br_lck->fsp->conn->cnum;
+ lock.start = start;
+ lock.size = size;
+ lock.fnum = br_lck->fsp->fnum;
+ lock.lock_type = UNLOCK_LOCK;
+ lock.lock_flav = lock_flav;
+
+ if (lock_flav == WINDOWS_LOCK) {
+ return SMB_VFS_BRL_UNLOCK_WINDOWS(
+ br_lck->fsp->conn, br_lck, &lock);
+ } else {
+ return brl_unlock_posix(br_lck, &lock);
+ }
+}
+
+/****************************************************************************
+ Test if we could add a lock if we wanted to.
+ Returns True if the region required is currently unlocked, False if locked.
+****************************************************************************/
+
+bool brl_locktest(struct byte_range_lock *br_lck,
+ const struct lock_struct *rw_probe)
+{
+ bool ret = True;
+ unsigned int i;
+ struct lock_struct *locks = br_lck->lock_data;
+ files_struct *fsp = br_lck->fsp;
+
+ /* Make sure existing locks don't conflict */
+ for (i=0; i < br_lck->num_locks; i++) {
+ /*
+ * Our own locks don't conflict.
+ */
+ if (brl_conflict_other(&locks[i], rw_probe)) {
+ if (br_lck->record == NULL) {
+ /* readonly */
+ return false;
+ }
+
+ if (!serverid_exists(&locks[i].context.pid)) {
+ locks[i].context.pid.pid = 0;
+ br_lck->modified = true;
+ continue;
+ }
+
+ return False;
+ }
+ }
+
+ /*
+ * There is no lock held by an SMB daemon, check to
+ * see if there is a POSIX lock from a UNIX or NFS process.
+ * This only conflicts with Windows locks, not POSIX locks.
+ */
+
+ if(lp_posix_locking(fsp->conn->params) &&
+ (rw_probe->lock_flav == WINDOWS_LOCK)) {
+ /*
+ * Make copies -- is_posix_locked might modify the values
+ */
+
+ br_off start = rw_probe->start;
+ br_off size = rw_probe->size;
+ enum brl_type lock_type = rw_probe->lock_type;
+
+ ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
+
+ DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
+ "file %s\n", (uintmax_t)start, (uintmax_t)size,
+ ret ? "locked" : "unlocked",
+ fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
+
+ /* We need to return the inverse of is_posix_locked. */
+ ret = !ret;
+ }
+
+ /* no conflicts - we could have added it */
+ return ret;
+}
+
+/****************************************************************************
+ Query for existing locks.
+****************************************************************************/
+
+NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
+ uint64_t *psmblctx,
+ struct server_id pid,
+ br_off *pstart,
+ br_off *psize,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav)
+{
+ unsigned int i;
+ struct lock_struct lock;
+ const struct lock_struct *locks = br_lck->lock_data;
+ files_struct *fsp = br_lck->fsp;
+
+ lock.context.smblctx = *psmblctx;
+ lock.context.pid = pid;
+ lock.context.tid = br_lck->fsp->conn->cnum;
+ lock.start = *pstart;
+ lock.size = *psize;
+ lock.fnum = fsp->fnum;
+ lock.lock_type = *plock_type;
+ lock.lock_flav = lock_flav;
+
+ /* Make sure existing locks don't conflict */
+ for (i=0; i < br_lck->num_locks; i++) {
+ const struct lock_struct *exlock = &locks[i];
+ bool conflict = False;
+
+ if (exlock->lock_flav == WINDOWS_LOCK) {
+ conflict = brl_conflict(exlock, &lock);
+ } else {
+ conflict = brl_conflict_posix(exlock, &lock);
+ }
+
+ if (conflict) {
+ *psmblctx = exlock->context.smblctx;
+ *pstart = exlock->start;
+ *psize = exlock->size;
+ *plock_type = exlock->lock_type;
+ return NT_STATUS_LOCK_NOT_GRANTED;
+ }
+ }
+
+ /*
+ * There is no lock held by an SMB daemon, check to
+ * see if there is a POSIX lock from a UNIX or NFS process.
+ */
+
+ if(lp_posix_locking(fsp->conn->params)) {
+ bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
+
+ DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
+ "file %s\n", (uintmax_t)*pstart,
+ (uintmax_t)*psize, ret ? "locked" : "unlocked",
+ fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
+
+ if (ret) {
+ /* Hmmm. No clue what to set smblctx to - use -1. */
+ *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
+ return NT_STATUS_LOCK_NOT_GRANTED;
+ }
+ }
+
+ return NT_STATUS_OK;
+}
+
+
+/****************************************************************************
+ Remove any locks associated with a open file.
+ We return True if this process owns any other Windows locks on this
+ fd and so we should not immediately close the fd.
+****************************************************************************/
+
+void brl_close_fnum(struct byte_range_lock *br_lck)
+{
+ files_struct *fsp = br_lck->fsp;
+ uint32_t tid = fsp->conn->cnum;
+ uint64_t fnum = fsp->fnum;
+ unsigned int i;
+ struct lock_struct *locks = br_lck->lock_data;
+ struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ struct lock_struct *locks_copy;
+ unsigned int num_locks_copy;
+
+ /* Copy the current lock array. */
+ if (br_lck->num_locks) {
+ locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
+ if (!locks_copy) {
+ smb_panic("brl_close_fnum: talloc failed");
+ }
+ } else {
+ locks_copy = NULL;
+ }
+
+ num_locks_copy = br_lck->num_locks;
+
+ for (i=0; i < num_locks_copy; i++) {
+ struct lock_struct *lock = &locks_copy[i];
+
+ if (lock->context.tid == tid &&
+ server_id_equal(&lock->context.pid, &pid) &&
+ (lock->fnum == fnum)) {
+ brl_unlock(
+ br_lck,
+ lock->context.smblctx,
+ pid,
+ lock->start,
+ lock->size,
+ lock->lock_flav);
+ }
+ }
+}
+
+bool brl_mark_disconnected(struct files_struct *fsp)
+{
+ uint32_t tid = fsp->conn->cnum;
+ uint64_t smblctx;
+ uint64_t fnum = fsp->fnum;
+ unsigned int i;
+ struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ struct byte_range_lock *br_lck = NULL;
+
+ if (fsp->op == NULL) {
+ return false;
+ }
+
+ smblctx = fsp->op->global->open_persistent_id;
+
+ if (!fsp->op->global->durable) {
+ return false;
+ }
+
+ if (fsp->current_lock_count == 0) {
+ return true;
+ }
+
+ br_lck = brl_get_locks(talloc_tos(), fsp);
+ if (br_lck == NULL) {
+ return false;
+ }
+
+ for (i=0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &br_lck->lock_data[i];
+
+ /*
+ * as this is a durable handle, we only expect locks
+ * of the current file handle!
+ */
+
+ if (lock->context.smblctx != smblctx) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (lock->context.tid != tid) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (!server_id_equal(&lock->context.pid, &self)) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (lock->fnum != fnum) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ server_id_set_disconnected(&lock->context.pid);
+ lock->context.tid = TID_FIELD_INVALID;
+ lock->fnum = FNUM_FIELD_INVALID;
+ }
+
+ br_lck->modified = true;
+ TALLOC_FREE(br_lck);
+ return true;
+}
+
+bool brl_reconnect_disconnected(struct files_struct *fsp)
+{
+ uint32_t tid = fsp->conn->cnum;
+ uint64_t smblctx;
+ uint64_t fnum = fsp->fnum;
+ unsigned int i;
+ struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ struct byte_range_lock *br_lck = NULL;
+
+ if (fsp->op == NULL) {
+ return false;
+ }
+
+ smblctx = fsp->op->global->open_persistent_id;
+
+ if (!fsp->op->global->durable) {
+ return false;
+ }
+
+ /*
+ * When reconnecting, we do not want to validate the brlock entries
+ * and thereby remove our own (disconnected) entries but reactivate
+ * them instead.
+ */
+
+ br_lck = brl_get_locks(talloc_tos(), fsp);
+ if (br_lck == NULL) {
+ return false;
+ }
+
+ if (br_lck->num_locks == 0) {
+ TALLOC_FREE(br_lck);
+ return true;
+ }
+
+ for (i=0; i < br_lck->num_locks; i++) {
+ struct lock_struct *lock = &br_lck->lock_data[i];
+
+ /*
+ * as this is a durable handle we only expect locks
+ * of the current file handle!
+ */
+
+ if (lock->context.smblctx != smblctx) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (lock->context.tid != TID_FIELD_INVALID) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (!server_id_is_disconnected(&lock->context.pid)) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ if (lock->fnum != FNUM_FIELD_INVALID) {
+ TALLOC_FREE(br_lck);
+ return false;
+ }
+
+ lock->context.pid = self;
+ lock->context.tid = tid;
+ lock->fnum = fnum;
+ }
+
+ fsp->current_lock_count = br_lck->num_locks;
+ br_lck->modified = true;
+ TALLOC_FREE(br_lck);
+ return true;
+}
+
+struct brl_forall_cb {
+ void (*fn)(struct file_id id, struct server_id pid,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ br_off start, br_off size,
+ void *private_data);
+ void *private_data;
+};
+
+/****************************************************************************
+ Traverse the whole database with this function, calling traverse_callback
+ on each lock.
+****************************************************************************/
+
+static int brl_traverse_fn(struct db_record *rec, void *state)
+{
+ struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
+ struct lock_struct *locks;
+ struct file_id *key;
+ unsigned int i;
+ unsigned int num_locks = 0;
+ TDB_DATA dbkey;
+ TDB_DATA value;
+
+ dbkey = dbwrap_record_get_key(rec);
+ value = dbwrap_record_get_value(rec);
+
+ /* In a traverse function we must make a copy of
+ dbuf before modifying it. */
+
+ locks = (struct lock_struct *)talloc_memdup(
+ talloc_tos(), value.dptr, value.dsize);
+ if (!locks) {
+ return -1; /* Terminate traversal. */
+ }
+
+ key = (struct file_id *)dbkey.dptr;
+ num_locks = value.dsize/sizeof(*locks);
+
+ if (cb->fn) {
+ for ( i=0; i<num_locks; i++) {
+ cb->fn(*key,
+ locks[i].context.pid,
+ locks[i].lock_type,
+ locks[i].lock_flav,
+ locks[i].start,
+ locks[i].size,
+ cb->private_data);
+ }
+ }
+
+ TALLOC_FREE(locks);
+ return 0;
+}
+
+/*******************************************************************
+ Call the specified function on each lock in the database.
+********************************************************************/
+
+int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ br_off start, br_off size,
+ void *private_data),
+ void *private_data)
+{
+ struct brl_forall_cb cb;
+ NTSTATUS status;
+ int count = 0;
+
+ if (!brlock_db) {
+ return 0;
+ }
+ cb.fn = fn;
+ cb.private_data = private_data;
+ status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
+
+ if (!NT_STATUS_IS_OK(status)) {
+ return -1;
+ } else {
+ return count;
+ }
+}
+
+/*******************************************************************
+ Store a potentially modified set of byte range lock data back into
+ the database.
+ Unlock the record.
+********************************************************************/
+
+static void byte_range_lock_flush(struct byte_range_lock *br_lck)
+{
+ unsigned i;
+ struct lock_struct *locks = br_lck->lock_data;
+
+ if (!br_lck->modified) {
+ DEBUG(10, ("br_lck not modified\n"));
+ goto done;
+ }
+
+ i = 0;
+
+ while (i < br_lck->num_locks) {
+ if (locks[i].context.pid.pid == 0) {
+ /*
+ * Autocleanup, the process conflicted and does not
+ * exist anymore.
+ */
+ locks[i] = locks[br_lck->num_locks-1];
+ br_lck->num_locks -= 1;
+ } else {
+ i += 1;
+ }
+ }
+
+ if (br_lck->num_locks == 0) {
+ /* No locks - delete this entry. */
+ NTSTATUS status = dbwrap_record_delete(br_lck->record);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0, ("delete_rec returned %s\n",
+ nt_errstr(status)));
+ smb_panic("Could not delete byte range lock entry");
+ }
+ } else {
+ TDB_DATA data = {
+ .dsize = br_lck->num_locks * sizeof(struct lock_struct),
+ .dptr = (uint8_t *)br_lck->lock_data,
+ };
+ NTSTATUS status;
+
+ status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
+ if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(0, ("store returned %s\n", nt_errstr(status)));
+ smb_panic("Could not store byte range mode entry");
+ }
+ }
+
+ DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db)));
+
+ done:
+ br_lck->modified = false;
+ TALLOC_FREE(br_lck->record);
+}
+
+static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
+{
+ byte_range_lock_flush(br_lck);
+ return 0;
+}
+
+static bool brl_parse_data(struct byte_range_lock *br_lck, TDB_DATA data)
+{
+ size_t data_len;
+
+ if (data.dsize == 0) {
+ return true;
+ }
+ if (data.dsize % sizeof(struct lock_struct) != 0) {
+ DEBUG(1, ("Invalid data size: %u\n", (unsigned)data.dsize));
+ return false;
+ }
+
+ br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
+ data_len = br_lck->num_locks * sizeof(struct lock_struct);
+
+ br_lck->lock_data = talloc_memdup(br_lck, data.dptr, data_len);
+ if (br_lck->lock_data == NULL) {
+ DEBUG(1, ("talloc_memdup failed\n"));
+ return false;
+ }
+ return true;
+}
+
+/*******************************************************************
+ Fetch a set of byte range lock data from the database.
+ Leave the record locked.
+ TALLOC_FREE(brl) will release the lock in the destructor.
+********************************************************************/
+
+struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp)
+{
+ TDB_DATA key, data;
+ struct byte_range_lock *br_lck;
+
+ br_lck = talloc_zero(mem_ctx, struct byte_range_lock);
+ if (br_lck == NULL) {
+ return NULL;
+ }
+
+ br_lck->fsp = fsp;
+
+ key.dptr = (uint8_t *)&fsp->file_id;
+ key.dsize = sizeof(struct file_id);
+
+ br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
+
+ if (br_lck->record == NULL) {
+ DEBUG(3, ("Could not lock byte range lock entry\n"));
+ TALLOC_FREE(br_lck);
+ return NULL;
+ }
+
+ data = dbwrap_record_get_value(br_lck->record);
+
+ if (!brl_parse_data(br_lck, data)) {
+ TALLOC_FREE(br_lck);
+ return NULL;
+ }
+
+ talloc_set_destructor(br_lck, byte_range_lock_destructor);
+
+ if (DEBUGLEVEL >= 10) {
+ unsigned int i;
+ struct file_id_buf buf;
+ struct lock_struct *locks = br_lck->lock_data;
+ DBG_DEBUG("%u current locks on file_id %s\n",
+ br_lck->num_locks,
+ file_id_str_buf(fsp->file_id, &buf));
+ for( i = 0; i < br_lck->num_locks; i++) {
+ print_lock_struct(i, &locks[i]);
+ }
+ }
+
+ return br_lck;
+}
+
+struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
+ files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid)
+{
+ struct byte_range_lock *br_lck = NULL;
+
+ br_lck = brl_get_locks(mem_ctx, fsp);
+ if (br_lck == NULL) {
+ return NULL;
+ }
+ SMB_ASSERT(req_mem_ctx != NULL);
+ br_lck->req_mem_ctx = req_mem_ctx;
+ SMB_ASSERT(req_guid != NULL);
+ br_lck->req_guid = req_guid;
+
+ return br_lck;
+}
+
+struct brl_get_locks_readonly_state {
+ TALLOC_CTX *mem_ctx;
+ struct byte_range_lock **br_lock;
+};
+
+static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data,
+ void *private_data)
+{
+ struct brl_get_locks_readonly_state *state =
+ (struct brl_get_locks_readonly_state *)private_data;
+ struct byte_range_lock *br_lck;
+
+ br_lck = talloc_pooled_object(
+ state->mem_ctx, struct byte_range_lock, 1, data.dsize);
+ if (br_lck == NULL) {
+ *state->br_lock = NULL;
+ return;
+ }
+ *br_lck = (struct byte_range_lock) { 0 };
+ if (!brl_parse_data(br_lck, data)) {
+ *state->br_lock = NULL;
+ return;
+ }
+ *state->br_lock = br_lck;
+}
+
+struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
+{
+ struct byte_range_lock *br_lock = NULL;
+ struct brl_get_locks_readonly_state state;
+ NTSTATUS status;
+
+ DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
+ dbwrap_get_seqnum(brlock_db), fsp->brlock_seqnum));
+
+ if ((fsp->brlock_rec != NULL)
+ && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
+ /*
+ * We have cached the brlock_rec and the database did not
+ * change.
+ */
+ return fsp->brlock_rec;
+ }
+
+ /*
+ * Parse the record fresh from the database
+ */
+
+ state.mem_ctx = fsp;
+ state.br_lock = &br_lock;
+
+ status = dbwrap_parse_record(
+ brlock_db,
+ make_tdb_data((uint8_t *)&fsp->file_id,
+ sizeof(fsp->file_id)),
+ brl_get_locks_readonly_parser, &state);
+
+ if (NT_STATUS_EQUAL(status,NT_STATUS_NOT_FOUND)) {
+ /*
+ * No locks on this file. Return an empty br_lock.
+ */
+ br_lock = talloc_zero(fsp, struct byte_range_lock);
+ if (br_lock == NULL) {
+ return NULL;
+ }
+
+ } else if (!NT_STATUS_IS_OK(status)) {
+ DEBUG(3, ("Could not parse byte range lock record: "
+ "%s\n", nt_errstr(status)));
+ return NULL;
+ }
+ if (br_lock == NULL) {
+ return NULL;
+ }
+
+ br_lock->fsp = fsp;
+ br_lock->modified = false;
+ br_lock->record = NULL;
+
+ /*
+ * Cache the brlock struct, invalidated when the dbwrap_seqnum
+ * changes. See beginning of this routine.
+ */
+ TALLOC_FREE(fsp->brlock_rec);
+ fsp->brlock_rec = br_lock;
+ fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
+
+ return br_lock;
+}
+
+bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
+{
+ bool ret = false;
+ TALLOC_CTX *frame = talloc_stackframe();
+ TDB_DATA key, val;
+ struct db_record *rec;
+ struct lock_struct *lock;
+ unsigned n, num;
+ struct file_id_buf buf;
+ NTSTATUS status;
+
+ key = make_tdb_data((void*)&fid, sizeof(fid));
+
+ rec = dbwrap_fetch_locked(brlock_db, frame, key);
+ if (rec == NULL) {
+ DBG_INFO("failed to fetch record for file %s\n",
+ file_id_str_buf(fid, &buf));
+ goto done;
+ }
+
+ val = dbwrap_record_get_value(rec);
+ lock = (struct lock_struct*)val.dptr;
+ num = val.dsize / sizeof(struct lock_struct);
+ if (lock == NULL) {
+ DBG_DEBUG("no byte range locks for file %s\n",
+ file_id_str_buf(fid, &buf));
+ ret = true;
+ goto done;
+ }
+
+ for (n=0; n<num; n++) {
+ struct lock_context *ctx = &lock[n].context;
+
+ if (!server_id_is_disconnected(&ctx->pid)) {
+ struct server_id_buf tmp;
+ DBG_INFO("byte range lock "
+ "%s used by server %s, do not cleanup\n",
+ file_id_str_buf(fid, &buf),
+ server_id_str_buf(ctx->pid, &tmp));
+ goto done;
+ }
+
+ if (ctx->smblctx != open_persistent_id) {
+ DBG_INFO("byte range lock %s expected smblctx %"PRIu64" "
+ "but found %"PRIu64", do not cleanup\n",
+ file_id_str_buf(fid, &buf),
+ open_persistent_id,
+ ctx->smblctx);
+ goto done;
+ }
+ }
+
+ status = dbwrap_record_delete(rec);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_INFO("failed to delete record "
+ "for file %s from %s, open %"PRIu64": %s\n",
+ file_id_str_buf(fid, &buf),
+ dbwrap_name(brlock_db),
+ open_persistent_id,
+ nt_errstr(status));
+ goto done;
+ }
+
+ DBG_DEBUG("file %s cleaned up %u entries from open %"PRIu64"\n",
+ file_id_str_buf(fid, &buf),
+ num,
+ open_persistent_id);
+
+ ret = true;
+done:
+ talloc_free(frame);
+ return ret;
+}
diff --git a/source3/locking/leases_db.c b/source3/locking/leases_db.c
new file mode 100644
index 0000000..855d614
--- /dev/null
+++ b/source3/locking/leases_db.c
@@ -0,0 +1,726 @@
+/*
+ Unix SMB/CIFS implementation.
+ Map lease keys to file ids
+ Copyright (C) Volker Lendecke 2013
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "locking/leases_db.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_open.h"
+#include "util_tdb.h"
+#include "ndr.h"
+#include "librpc/gen_ndr/ndr_leases_db.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_LOCKING
+
+/* the leases database handle */
+static struct db_context *leases_db;
+
+bool leases_db_init(bool read_only)
+{
+ char *db_path;
+
+ if (leases_db) {
+ return true;
+ }
+
+ db_path = lock_path(talloc_tos(), "leases.tdb");
+ if (db_path == NULL) {
+ return false;
+ }
+
+ leases_db = db_open(NULL, db_path, 0,
+ TDB_DEFAULT|
+ TDB_VOLATILE|
+ TDB_CLEAR_IF_FIRST|
+ TDB_SEQNUM|
+ TDB_INCOMPATIBLE_HASH,
+ read_only ? O_RDONLY : O_RDWR|O_CREAT, 0644,
+ DBWRAP_LOCK_ORDER_4, DBWRAP_FLAG_NONE);
+ TALLOC_FREE(db_path);
+ if (leases_db == NULL) {
+ DEBUG(1, ("ERROR: Failed to initialise leases database\n"));
+ return false;
+ }
+
+ return true;
+}
+
+struct leases_db_key_buf {
+ uint8_t buf[32];
+};
+
+static TDB_DATA leases_db_key(struct leases_db_key_buf *buf,
+ const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key)
+{
+ struct leases_db_key db_key = {
+ .client_guid = *client_guid,
+ .lease_key = *lease_key };
+ DATA_BLOB blob = { .data = buf->buf, .length = sizeof(buf->buf) };
+ enum ndr_err_code ndr_err;
+
+ if (DEBUGLEVEL >= 10) {
+ DBG_DEBUG("\n");
+ NDR_PRINT_DEBUG(leases_db_key, &db_key);
+ }
+
+ ndr_err = ndr_push_struct_into_fixed_blob(
+ &blob, &db_key, (ndr_push_flags_fn_t)ndr_push_leases_db_key);
+ SMB_ASSERT(NDR_ERR_CODE_IS_SUCCESS(ndr_err));
+
+ return (TDB_DATA) { .dptr = buf->buf, .dsize = sizeof(buf->buf) };
+}
+
+struct leases_db_do_locked_state {
+ void (*fn)(struct leases_db_value *value,
+ bool *modified,
+ void *private_data);
+ void *private_data;
+ NTSTATUS status;
+};
+
+static void leases_db_do_locked_fn(
+ struct db_record *rec,
+ TDB_DATA db_value,
+ void *private_data)
+{
+ struct leases_db_do_locked_state *state = private_data;
+ DATA_BLOB blob = { .data = db_value.dptr, .length = db_value.dsize };
+ struct leases_db_value *value = NULL;
+ enum ndr_err_code ndr_err;
+ bool modified = false;
+
+ value = talloc_zero(talloc_tos(), struct leases_db_value);
+ if (value == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ goto done;
+ }
+
+ if (blob.length != 0) {
+ ndr_err = ndr_pull_struct_blob_all(
+ &blob,
+ value,
+ value,
+ (ndr_pull_flags_fn_t)ndr_pull_leases_db_value);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_ERR("ndr_pull_struct_blob_failed: %s\n",
+ ndr_errstr(ndr_err));
+ state->status = ndr_map_error2ntstatus(ndr_err);
+ goto done;
+ }
+ }
+
+ state->fn(value, &modified, state->private_data);
+
+ if (!modified) {
+ goto done;
+ }
+
+ if (value->num_files == 0) {
+ state->status = dbwrap_record_delete(rec);
+ if (!NT_STATUS_IS_OK(state->status)) {
+ DBG_ERR("dbwrap_record_delete returned %s\n",
+ nt_errstr(state->status));
+ }
+ goto done;
+ }
+
+ ndr_err = ndr_push_struct_blob(
+ &blob,
+ value,
+ value,
+ (ndr_push_flags_fn_t)ndr_push_leases_db_value);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_ERR("ndr_push_struct_blob_failed: %s\n",
+ ndr_errstr(ndr_err));
+ state->status = ndr_map_error2ntstatus(ndr_err);
+ goto done;
+ }
+
+ if (DEBUGLEVEL >= 10) {
+ DBG_DEBUG("\n");
+ NDR_PRINT_DEBUG(leases_db_value, value);
+ }
+
+ db_value = make_tdb_data(blob.data, blob.length);
+
+ state->status = dbwrap_record_store(rec, db_value, 0);
+ if (!NT_STATUS_IS_OK(state->status)) {
+ DBG_ERR("dbwrap_record_store returned %s\n",
+ nt_errstr(state->status));
+ }
+
+done:
+ TALLOC_FREE(value);
+}
+
+static NTSTATUS leases_db_do_locked(
+ const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ void (*fn)(struct leases_db_value *value,
+ bool *modified,
+ void *private_data),
+ void *private_data)
+{
+ struct leases_db_key_buf keybuf;
+ TDB_DATA db_key = leases_db_key(&keybuf, client_guid, lease_key);
+ struct leases_db_do_locked_state state = {
+ .fn = fn, .private_data = private_data,
+ };
+ NTSTATUS status;
+
+ if (!leases_db_init(false)) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+
+ status = dbwrap_do_locked(
+ leases_db, db_key, leases_db_do_locked_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ return state.status;
+}
+
+struct leases_db_add_state {
+ const struct file_id *id;
+ uint32_t current_state;
+ uint16_t lease_version;
+ uint16_t epoch;
+ const char *servicepath;
+ const char *base_name;
+ const char *stream_name;
+ NTSTATUS status;
+};
+
+static void leases_db_add_fn(
+ struct leases_db_value *value, bool *modified, void *private_data)
+{
+ struct leases_db_add_state *state = private_data;
+ struct leases_db_file *tmp = NULL;
+ uint32_t i;
+
+ /* id must be unique. */
+ for (i = 0; i < value->num_files; i++) {
+ if (file_id_equal(state->id, &value->files[i].id)) {
+ state->status = NT_STATUS_OBJECT_NAME_COLLISION;
+ return;
+ }
+ }
+
+ if (value->num_files == 0) {
+ /* new record */
+ value->current_state = state->current_state;
+ value->lease_version = state->lease_version;
+ value->epoch = state->epoch;
+ }
+
+ tmp = talloc_realloc(
+ value,
+ value->files,
+ struct leases_db_file,
+ value->num_files + 1);
+ if (tmp == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ return;
+ }
+ value->files = tmp;
+
+ value->files[value->num_files] = (struct leases_db_file) {
+ .id = *state->id,
+ .servicepath = state->servicepath,
+ .base_name = state->base_name,
+ .stream_name = state->stream_name,
+ };
+ value->num_files += 1;
+
+ *modified = true;
+}
+
+NTSTATUS leases_db_add(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *id,
+ uint32_t current_state,
+ uint16_t lease_version,
+ uint16_t epoch,
+ const char *servicepath,
+ const char *base_name,
+ const char *stream_name)
+{
+ struct leases_db_add_state state = {
+ .id = id,
+ .current_state = current_state,
+ .lease_version = lease_version,
+ .epoch = epoch,
+ .servicepath = servicepath,
+ .base_name = base_name,
+ .stream_name = stream_name,
+ };
+ NTSTATUS status;
+
+ status = leases_db_do_locked(
+ client_guid, lease_key, leases_db_add_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("leases_db_do_locked failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ return state.status;
+}
+
+struct leases_db_del_state {
+ const struct file_id *id;
+ NTSTATUS status;
+};
+
+static void leases_db_del_fn(
+ struct leases_db_value *value, bool *modified, void *private_data)
+{
+ struct leases_db_del_state *state = private_data;
+ uint32_t i;
+
+ for (i = 0; i < value->num_files; i++) {
+ if (file_id_equal(state->id, &value->files[i].id)) {
+ break;
+ }
+ }
+ if (i == value->num_files) {
+ state->status = NT_STATUS_NOT_FOUND;
+ return;
+ }
+
+ value->files[i] = value->files[value->num_files-1];
+ value->num_files -= 1;
+
+ *modified = true;
+}
+
+NTSTATUS leases_db_del(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *id)
+{
+ struct leases_db_del_state state = { .id = id };
+ NTSTATUS status;
+
+ status = leases_db_do_locked(
+ client_guid, lease_key, leases_db_del_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("leases_db_do_locked failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ return state.status;
+}
+
+struct leases_db_fetch_state {
+ void (*parser)(uint32_t num_files,
+ const struct leases_db_file *files,
+ void *private_data);
+ void *private_data;
+ NTSTATUS status;
+};
+
+static void leases_db_parser(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct leases_db_fetch_state *state =
+ (struct leases_db_fetch_state *)private_data;
+ DATA_BLOB blob = { .data = data.dptr, .length = data.dsize };
+ enum ndr_err_code ndr_err;
+ struct leases_db_value *value;
+
+ value = talloc(talloc_tos(), struct leases_db_value);
+ if (value == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ return;
+ }
+
+ ndr_err = ndr_pull_struct_blob_all(
+ &blob, value, value,
+ (ndr_pull_flags_fn_t)ndr_pull_leases_db_value);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DEBUG(10, ("%s: ndr_pull_struct_blob_failed: %s\n",
+ __func__, ndr_errstr(ndr_err)));
+ TALLOC_FREE(value);
+ state->status = ndr_map_error2ntstatus(ndr_err);
+ return;
+ }
+
+ if (DEBUGLEVEL >= 10) {
+ DEBUG(10, ("%s:\n", __func__));
+ NDR_PRINT_DEBUG(leases_db_value, value);
+ }
+
+ state->parser(value->num_files,
+ value->files,
+ state->private_data);
+
+ TALLOC_FREE(value);
+ state->status = NT_STATUS_OK;
+}
+
+NTSTATUS leases_db_parse(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ void (*parser)(uint32_t num_files,
+ const struct leases_db_file *files,
+ void *private_data),
+ void *private_data)
+{
+ struct leases_db_key_buf keybuf;
+ TDB_DATA db_key = leases_db_key(&keybuf, client_guid, lease_key);
+ struct leases_db_fetch_state state;
+ NTSTATUS status;
+
+ if (!leases_db_init(true)) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+
+ state = (struct leases_db_fetch_state) {
+ .parser = parser,
+ .private_data = private_data,
+ .status = NT_STATUS_OK
+ };
+
+ status = dbwrap_parse_record(leases_db, db_key, leases_db_parser,
+ &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ return state.status;
+}
+
+struct leases_db_rename_state {
+ const struct file_id *id;
+ const char *servicename_new;
+ const char *filename_new;
+ const char *stream_name_new;
+ NTSTATUS status;
+};
+
+static void leases_db_rename_fn(
+ struct leases_db_value *value, bool *modified, void *private_data)
+{
+ struct leases_db_rename_state *state = private_data;
+ struct leases_db_file *file = NULL;
+ uint32_t i;
+
+ /* id must exist. */
+ for (i = 0; i < value->num_files; i++) {
+ if (file_id_equal(state->id, &value->files[i].id)) {
+ break;
+ }
+ }
+ if (i == value->num_files) {
+ state->status = NT_STATUS_NOT_FOUND;
+ return;
+ }
+
+ file = &value->files[i];
+ file->servicepath = state->servicename_new;
+ file->base_name = state->filename_new;
+ file->stream_name = state->stream_name_new;
+
+ *modified = true;
+}
+
+NTSTATUS leases_db_rename(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *id,
+ const char *servicename_new,
+ const char *filename_new,
+ const char *stream_name_new)
+{
+ struct leases_db_rename_state state = {
+ .id = id,
+ .servicename_new = servicename_new,
+ .filename_new = filename_new,
+ .stream_name_new = stream_name_new,
+ };
+ NTSTATUS status;
+
+ status = leases_db_do_locked(
+ client_guid, lease_key, leases_db_rename_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("leases_db_do_locked failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ return state.status;
+}
+
+struct leases_db_set_state {
+ uint32_t current_state;
+ bool breaking;
+ uint32_t breaking_to_requested;
+ uint32_t breaking_to_required;
+ uint16_t lease_version;
+ uint16_t epoch;
+};
+
+static void leases_db_set_fn(
+ struct leases_db_value *value, bool *modified, void *private_data)
+{
+ struct leases_db_set_state *state = private_data;
+
+ if (value->num_files == 0) {
+ DBG_WARNING("leases_db_set on new entry\n");
+ return;
+ }
+ value->current_state = state->current_state;
+ value->breaking = state->breaking;
+ value->breaking_to_requested = state->breaking_to_requested;
+ value->breaking_to_required = state->breaking_to_required;
+ value->lease_version = state->lease_version;
+ value->epoch = state->epoch;
+ *modified = true;
+}
+
+NTSTATUS leases_db_set(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ uint32_t current_state,
+ bool breaking,
+ uint32_t breaking_to_requested,
+ uint32_t breaking_to_required,
+ uint16_t lease_version,
+ uint16_t epoch)
+{
+ struct leases_db_set_state state = {
+ .current_state = current_state,
+ .breaking = breaking,
+ .breaking_to_requested = breaking_to_requested,
+ .breaking_to_required = breaking_to_required,
+ .lease_version = lease_version,
+ .epoch = epoch,
+ };
+ NTSTATUS status;
+
+ status = leases_db_do_locked(
+ client_guid, lease_key, leases_db_set_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("leases_db_do_locked failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ return NT_STATUS_OK;
+}
+
+struct leases_db_get_state {
+ const struct file_id *file_id;
+ uint32_t *current_state;
+ bool *breaking;
+ uint32_t *breaking_to_requested;
+ uint32_t *breaking_to_required;
+ uint16_t *lease_version;
+ uint16_t *epoch;
+ NTSTATUS status;
+};
+
+static void leases_db_get_fn(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct leases_db_get_state *state = private_data;
+ DATA_BLOB blob = { .data = data.dptr, .length = data.dsize };
+ enum ndr_err_code ndr_err;
+ struct leases_db_value *value;
+ uint32_t i;
+
+ value = talloc(talloc_tos(), struct leases_db_value);
+ if (value == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ return;
+ }
+
+ ndr_err = ndr_pull_struct_blob_all(
+ &blob, value, value,
+ (ndr_pull_flags_fn_t)ndr_pull_leases_db_value);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_ERR("ndr_pull_struct_blob_failed: %s\n",
+ ndr_errstr(ndr_err));
+ TALLOC_FREE(value);
+ state->status = ndr_map_error2ntstatus(ndr_err);
+ return;
+ }
+
+ if (DEBUGLEVEL >= 10) {
+ DBG_DEBUG("\n");
+ NDR_PRINT_DEBUG(leases_db_value, value);
+ }
+
+ /* id must exist. */
+ for (i = 0; i < value->num_files; i++) {
+ if (file_id_equal(state->file_id, &value->files[i].id)) {
+ break;
+ }
+ }
+
+ if (i == value->num_files) {
+ state->status = NT_STATUS_NOT_FOUND;
+ TALLOC_FREE(value);
+ return;
+ }
+
+ if (state->current_state != NULL) {
+ *state->current_state = value->current_state;
+ };
+ if (state->breaking != NULL) {
+ *state->breaking = value->breaking;
+ };
+ if (state->breaking_to_requested != NULL) {
+ *state->breaking_to_requested = value->breaking_to_requested;
+ };
+ if (state->breaking_to_required != NULL) {
+ *state->breaking_to_required = value->breaking_to_required;
+ };
+ if (state->lease_version != NULL) {
+ *state->lease_version = value->lease_version;
+ };
+ if (state->epoch != NULL) {
+ *state->epoch = value->epoch;
+ };
+
+ TALLOC_FREE(value);
+ state->status = NT_STATUS_OK;
+}
+
+NTSTATUS leases_db_get(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *file_id,
+ uint32_t *current_state,
+ bool *breaking,
+ uint32_t *breaking_to_requested,
+ uint32_t *breaking_to_required,
+ uint16_t *lease_version,
+ uint16_t *epoch)
+{
+ struct leases_db_get_state state = {
+ .file_id = file_id,
+ .current_state = current_state,
+ .breaking = breaking,
+ .breaking_to_requested = breaking_to_requested,
+ .breaking_to_required = breaking_to_required,
+ .lease_version = lease_version,
+ .epoch = epoch,
+ };
+ struct leases_db_key_buf keybuf;
+ TDB_DATA db_key = leases_db_key(&keybuf, client_guid, lease_key);
+ NTSTATUS status;
+
+ if (!leases_db_init(true)) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+
+ status = dbwrap_parse_record(
+ leases_db, db_key, leases_db_get_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ return state.status;
+}
+
+struct leases_db_get_current_state_state {
+ int seqnum;
+ uint32_t current_state;
+ NTSTATUS status;
+};
+
+/*
+ * This function is an optimization that
+ * relies on the fact that the
+ * smb2_lease_state current_state
+ * (which is a uint32_t size)
+ * from struct leases_db_value is the first
+ * entry in the ndr-encoded struct leases_db_value.
+ * Read it without having to ndr decode all
+ * the values in struct leases_db_value.
+ */
+
+static void leases_db_get_current_state_fn(
+ TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct leases_db_get_current_state_state *state = private_data;
+ struct ndr_pull ndr;
+ enum ndr_err_code ndr_err;
+
+ if (data.dsize < sizeof(uint32_t)) {
+ state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
+ return;
+ }
+
+ state->seqnum = dbwrap_get_seqnum(leases_db);
+
+ ndr = (struct ndr_pull) {
+ .data = data.dptr, .data_size = data.dsize,
+ };
+ ndr_err = ndr_pull_uint32(&ndr, NDR_SCALARS, &state->current_state);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ state->status = ndr_map_error2ntstatus(ndr_err);
+ }
+}
+
+NTSTATUS leases_db_get_current_state(
+ const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ int *database_seqnum,
+ uint32_t *current_state)
+{
+ struct leases_db_get_current_state_state state = { 0 };
+ struct leases_db_key_buf keybuf;
+ TDB_DATA db_key = { 0 };
+ NTSTATUS status;
+
+ if (!leases_db_init(true)) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+
+ state.seqnum = dbwrap_get_seqnum(leases_db);
+ if (*database_seqnum == state.seqnum) {
+ return NT_STATUS_OK;
+ }
+
+ db_key = leases_db_key(&keybuf, client_guid, lease_key);
+
+ status = dbwrap_parse_record(
+ leases_db, db_key, leases_db_get_current_state_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+ *database_seqnum = state.seqnum;
+ *current_state = state.current_state;
+
+ return NT_STATUS_OK;
+}
+
+NTSTATUS leases_db_copy_file_ids(TALLOC_CTX *mem_ctx,
+ uint32_t num_files,
+ const struct leases_db_file *files,
+ struct file_id **pp_ids)
+{
+ uint32_t i;
+ struct file_id *ids = talloc_array(mem_ctx,
+ struct file_id,
+ num_files);
+ if (ids == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ for (i = 0; i < num_files; i++) {
+ ids[i] = files[i].id;
+ }
+ *pp_ids = ids;
+ return NT_STATUS_OK;
+}
diff --git a/source3/locking/leases_db.h b/source3/locking/leases_db.h
new file mode 100644
index 0000000..9c149c1
--- /dev/null
+++ b/source3/locking/leases_db.h
@@ -0,0 +1,80 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * leases.tdb functions
+ *
+ * Copyright (C) Volker Lendecke 2014
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LEASES_DB_H_
+#define _LEASES_DB_H_
+
+struct GUID;
+struct smb2_lease_key;
+struct file_id;
+struct leases_db_file;
+
+bool leases_db_init(bool read_only);
+NTSTATUS leases_db_add(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *id,
+ uint32_t current_state,
+ uint16_t lease_version,
+ uint16_t epoch,
+ const char *servicepath,
+ const char *filename,
+ const char *stream_name);
+NTSTATUS leases_db_del(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *id);
+NTSTATUS leases_db_parse(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ void (*parser)(uint32_t num_files,
+ const struct leases_db_file *files,
+ void *private_data),
+ void *private_data);
+NTSTATUS leases_db_rename(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *id,
+ const char *servicepath_new,
+ const char *filename_new,
+ const char *stream_name_new);
+NTSTATUS leases_db_set(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ uint32_t current_state,
+ bool breaking,
+ uint32_t breaking_to_requested,
+ uint32_t breaking_to_required,
+ uint16_t lease_version,
+ uint16_t epoch);
+NTSTATUS leases_db_get(const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ const struct file_id *file_id,
+ uint32_t *current_state,
+ bool *breaking,
+ uint32_t *breaking_to_requested,
+ uint32_t *breaking_to_required,
+ uint16_t *lease_version,
+ uint16_t *epoch);
+NTSTATUS leases_db_get_current_state(
+ const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key,
+ int *database_seqnum,
+ uint32_t *current_state);
+NTSTATUS leases_db_copy_file_ids(TALLOC_CTX *mem_ctx,
+ uint32_t num_files,
+ const struct leases_db_file *files,
+ struct file_id **pp_ids);
+#endif /* _LEASES_DB_H_ */
diff --git a/source3/locking/leases_util.c b/source3/locking/leases_util.c
new file mode 100644
index 0000000..9ae4081
--- /dev/null
+++ b/source3/locking/leases_util.c
@@ -0,0 +1,77 @@
+/*
+ Unix SMB/CIFS implementation.
+ Lease utility functions
+
+ Copyright (C) Jeremy Allison 2017.
+ Copyright (C) Stefan (metze) Metzmacher 2017.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#define DBGC_CLASS DBGC_LOCKING
+#include "includes.h"
+#include "../librpc/gen_ndr/open_files.h"
+#include "locking/proto.h"
+#include "smbd/globals.h"
+#include "locking/leases_db.h"
+
+uint32_t map_oplock_to_lease_type(uint16_t op_type)
+{
+ uint32_t ret;
+
+ switch(op_type) {
+ case BATCH_OPLOCK:
+ case BATCH_OPLOCK|EXCLUSIVE_OPLOCK:
+ ret = SMB2_LEASE_READ|SMB2_LEASE_WRITE|SMB2_LEASE_HANDLE;
+ break;
+ case EXCLUSIVE_OPLOCK:
+ ret = SMB2_LEASE_READ|SMB2_LEASE_WRITE;
+ break;
+ case LEVEL_II_OPLOCK:
+ ret = SMB2_LEASE_READ;
+ break;
+ default:
+ ret = SMB2_LEASE_NONE;
+ break;
+ }
+ return ret;
+}
+
+uint32_t fsp_lease_type(struct files_struct *fsp)
+{
+ NTSTATUS status;
+
+ if (fsp->oplock_type != LEASE_OPLOCK) {
+ uint32_t type = map_oplock_to_lease_type(fsp->oplock_type);
+ return type;
+ }
+
+ status = leases_db_get_current_state(
+ fsp_client_guid(fsp),
+ &fsp->lease->lease.lease_key,
+ &fsp->leases_db_seqnum,
+ &fsp->lease_type);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("leases_db_get_current_state failed: %s\n",
+ nt_errstr(status));
+ fsp->lease_type = 0; /* no lease */
+ }
+
+ return fsp->lease_type;
+}
+
+const struct GUID *fsp_client_guid(const files_struct *fsp)
+{
+ return &fsp->conn->sconn->client->global->client_guid;
+}
diff --git a/source3/locking/locking.c b/source3/locking/locking.c
new file mode 100644
index 0000000..befdc10
--- /dev/null
+++ b/source3/locking/locking.c
@@ -0,0 +1,1186 @@
+/*
+ Unix SMB/CIFS implementation.
+ Locking functions
+ Copyright (C) Andrew Tridgell 1992-2000
+ Copyright (C) Jeremy Allison 1992-2006
+ Copyright (C) Volker Lendecke 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ Revision History:
+
+ 12 aug 96: Erik.Devriendt@te6.siemens.be
+ added support for shared memory implementation of share mode locking
+
+ May 1997. Jeremy Allison (jallison@whistle.com). Modified share mode
+ locking to deal with multiple share modes per open file.
+
+ September 1997. Jeremy Allison (jallison@whistle.com). Added oplock
+ support.
+
+ rewritten completely to use new tdb code. Tridge, Dec '99
+
+ Added POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000.
+ Added Unix Extensions POSIX locking support. Jeremy Allison Mar 2006.
+*/
+
+#include "includes.h"
+#include "lib/util/time_basic.h"
+#include "system/filesys.h"
+#include "lib/util/server_id.h"
+#include "share_mode_lock.h"
+#include "share_mode_lock_private.h"
+#include "locking/proto.h"
+#include "smbd/globals.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_open.h"
+#include "../libcli/security/security.h"
+#include "serverid.h"
+#include "messages.h"
+#include "util_tdb.h"
+#include "../librpc/gen_ndr/ndr_open_files.h"
+#include "librpc/gen_ndr/ndr_file_id.h"
+#include "librpc/gen_ndr/ndr_leases_db.h"
+#include "locking/leases_db.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_LOCKING
+
+#define NO_LOCKING_COUNT (-1)
+
+/****************************************************************************
+ Debugging aids :-).
+****************************************************************************/
+
+const char *lock_type_name(enum brl_type lock_type)
+{
+ switch (lock_type) {
+ case READ_LOCK:
+ return "READ";
+ case WRITE_LOCK:
+ return "WRITE";
+ default:
+ return "other";
+ }
+}
+
+const char *lock_flav_name(enum brl_flavour lock_flav)
+{
+ return (lock_flav == WINDOWS_LOCK) ? "WINDOWS_LOCK" : "POSIX_LOCK";
+}
+
+/****************************************************************************
+ Utility function called to see if a file region is locked.
+ Called in the read/write codepath.
+****************************************************************************/
+
+void init_strict_lock_struct(files_struct *fsp,
+ uint64_t smblctx,
+ br_off start,
+ br_off size,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ struct lock_struct *plock)
+{
+ SMB_ASSERT(lock_type == READ_LOCK || lock_type == WRITE_LOCK);
+
+ plock->context.smblctx = smblctx;
+ plock->context.tid = fsp->conn->cnum;
+ plock->context.pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
+ plock->start = start;
+ plock->size = size;
+ plock->fnum = fsp->fnum;
+ plock->lock_type = lock_type;
+ plock->lock_flav = lp_posix_cifsu_locktype(fsp);
+}
+
+bool strict_lock_check_default(files_struct *fsp, struct lock_struct *plock)
+{
+ struct byte_range_lock *br_lck;
+ int strict_locking = lp_strict_locking(fsp->conn->params);
+ bool ret = False;
+
+ if (plock->size == 0) {
+ return True;
+ }
+
+ if (!lp_locking(fsp->conn->params) || !strict_locking) {
+ return True;
+ }
+
+ if (strict_locking == Auto) {
+ uint32_t lease_type = fsp_lease_type(fsp);
+
+ if ((lease_type & SMB2_LEASE_READ) &&
+ (plock->lock_type == READ_LOCK))
+ {
+ DBG_DEBUG("optimisation - read lease on file %s\n",
+ fsp_str_dbg(fsp));
+ return true;
+ }
+
+ if ((lease_type & SMB2_LEASE_WRITE) &&
+ (plock->lock_type == WRITE_LOCK))
+ {
+ DBG_DEBUG("optimisation - write lease on file %s\n",
+ fsp_str_dbg(fsp));
+ return true;
+ }
+ }
+
+ br_lck = brl_get_locks_readonly(fsp);
+ if (!br_lck) {
+ return true;
+ }
+ ret = brl_locktest(br_lck, plock);
+
+ if (!ret) {
+ /*
+ * We got a lock conflict. Retry with rw locks to enable
+ * autocleanup. This is the slow path anyway.
+ */
+ br_lck = brl_get_locks(talloc_tos(), fsp);
+ if (br_lck == NULL) {
+ return true;
+ }
+ ret = brl_locktest(br_lck, plock);
+ TALLOC_FREE(br_lck);
+ }
+
+ DEBUG(10, ("strict_lock_default: flavour = %s brl start=%ju "
+ "len=%ju %s for fnum %ju file %s\n",
+ lock_flav_name(plock->lock_flav),
+ (uintmax_t)plock->start, (uintmax_t)plock->size,
+ ret ? "unlocked" : "locked",
+ (uintmax_t)plock->fnum, fsp_str_dbg(fsp)));
+
+ return ret;
+}
+
+/****************************************************************************
+ Find out if a lock could be granted - return who is blocking us if we can't.
+****************************************************************************/
+
+NTSTATUS query_lock(files_struct *fsp,
+ uint64_t *psmblctx,
+ uint64_t *pcount,
+ uint64_t *poffset,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav)
+{
+ struct byte_range_lock *br_lck = NULL;
+
+ if (!fsp->fsp_flags.can_lock) {
+ return fsp->fsp_flags.is_directory ?
+ NT_STATUS_INVALID_DEVICE_REQUEST :
+ NT_STATUS_INVALID_HANDLE;
+ }
+
+ if (!lp_locking(fsp->conn->params)) {
+ return NT_STATUS_OK;
+ }
+
+ br_lck = brl_get_locks_readonly(fsp);
+ if (!br_lck) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ return brl_lockquery(br_lck,
+ psmblctx,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ poffset,
+ pcount,
+ plock_type,
+ lock_flav);
+}
+
+static void increment_current_lock_count(files_struct *fsp,
+ enum brl_flavour lock_flav)
+{
+ if (lock_flav == WINDOWS_LOCK &&
+ fsp->current_lock_count != NO_LOCKING_COUNT) {
+ /* blocking ie. pending, locks also count here,
+ * as this is an efficiency counter to avoid checking
+ * the lock db. on close. JRA. */
+
+ fsp->current_lock_count++;
+ } else {
+ /* Notice that this has had a POSIX lock request.
+ * We can't count locks after this so forget them.
+ */
+ fsp->current_lock_count = NO_LOCKING_COUNT;
+ }
+}
+
+static void decrement_current_lock_count(files_struct *fsp,
+ enum brl_flavour lock_flav)
+{
+ if (lock_flav == WINDOWS_LOCK &&
+ fsp->current_lock_count != NO_LOCKING_COUNT) {
+ SMB_ASSERT(fsp->current_lock_count > 0);
+ fsp->current_lock_count--;
+ }
+}
+
+/****************************************************************************
+ Utility function called by locking requests.
+****************************************************************************/
+
+struct do_lock_state {
+ struct files_struct *fsp;
+ TALLOC_CTX *req_mem_ctx;
+ const struct GUID *req_guid;
+ uint64_t smblctx;
+ uint64_t count;
+ uint64_t offset;
+ enum brl_type lock_type;
+ enum brl_flavour lock_flav;
+
+ struct server_id blocker_pid;
+ uint64_t blocker_smblctx;
+ NTSTATUS status;
+};
+
+static void do_lock_fn(
+ const uint8_t *buf,
+ size_t buflen,
+ bool *modified_dependent,
+ void *private_data)
+{
+ struct do_lock_state *state = private_data;
+ struct byte_range_lock *br_lck = NULL;
+
+ br_lck = brl_get_locks_for_locking(talloc_tos(),
+ state->fsp,
+ state->req_mem_ctx,
+ state->req_guid);
+ if (br_lck == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ return;
+ }
+
+ state->status = brl_lock(
+ br_lck,
+ state->smblctx,
+ messaging_server_id(state->fsp->conn->sconn->msg_ctx),
+ state->offset,
+ state->count,
+ state->lock_type,
+ state->lock_flav,
+ &state->blocker_pid,
+ &state->blocker_smblctx);
+
+ TALLOC_FREE(br_lck);
+}
+
+NTSTATUS do_lock(files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid,
+ uint64_t smblctx,
+ uint64_t count,
+ uint64_t offset,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ struct server_id *pblocker_pid,
+ uint64_t *psmblctx)
+{
+ struct do_lock_state state = {
+ .fsp = fsp,
+ .req_mem_ctx = req_mem_ctx,
+ .req_guid = req_guid,
+ .smblctx = smblctx,
+ .count = count,
+ .offset = offset,
+ .lock_type = lock_type,
+ .lock_flav = lock_flav,
+ };
+ NTSTATUS status;
+
+ /* silently return ok on print files as we don't do locking there */
+ if (fsp->print_file) {
+ return NT_STATUS_OK;
+ }
+
+ if (!fsp->fsp_flags.can_lock) {
+ if (fsp->fsp_flags.is_directory) {
+ return NT_STATUS_INVALID_DEVICE_REQUEST;
+ }
+ return NT_STATUS_INVALID_HANDLE;
+ }
+
+ if (!lp_locking(fsp->conn->params)) {
+ return NT_STATUS_OK;
+ }
+
+ /* NOTE! 0 byte long ranges ARE allowed and should be stored */
+
+ DBG_DEBUG("lock flavour %s lock type %s start=%"PRIu64" len=%"PRIu64" "
+ "requested for %s file %s\n",
+ lock_flav_name(lock_flav),
+ lock_type_name(lock_type),
+ offset,
+ count,
+ fsp_fnum_dbg(fsp),
+ fsp_str_dbg(fsp));
+
+ status = share_mode_do_locked(fsp->file_id, do_lock_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("share_mode_do_locked returned %s\n",
+ nt_errstr(status));
+ return status;
+ }
+
+ if (psmblctx != NULL) {
+ *psmblctx = state.blocker_smblctx;
+ }
+ if (pblocker_pid != NULL) {
+ *pblocker_pid = state.blocker_pid;
+ }
+
+ DBG_DEBUG("returning status=%s\n", nt_errstr(state.status));
+
+ increment_current_lock_count(fsp, lock_flav);
+
+ return state.status;
+}
+
+/****************************************************************************
+ Utility function called by unlocking requests.
+****************************************************************************/
+
+NTSTATUS do_unlock(files_struct *fsp,
+ uint64_t smblctx,
+ uint64_t count,
+ uint64_t offset,
+ enum brl_flavour lock_flav)
+{
+ bool ok = False;
+ struct byte_range_lock *br_lck = NULL;
+
+ if (!fsp->fsp_flags.can_lock) {
+ return fsp->fsp_flags.is_directory ?
+ NT_STATUS_INVALID_DEVICE_REQUEST :
+ NT_STATUS_INVALID_HANDLE;
+ }
+
+ if (!lp_locking(fsp->conn->params)) {
+ return NT_STATUS_OK;
+ }
+
+ DBG_DEBUG("unlock start=%"PRIu64" len=%"PRIu64" requested for %s file "
+ "%s\n",
+ offset,
+ count,
+ fsp_fnum_dbg(fsp),
+ fsp_str_dbg(fsp));
+
+ br_lck = brl_get_locks(talloc_tos(), fsp);
+ if (!br_lck) {
+ return NT_STATUS_NO_MEMORY;
+ }
+
+ ok = brl_unlock(br_lck,
+ smblctx,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ offset,
+ count,
+ lock_flav);
+
+ TALLOC_FREE(br_lck);
+
+ if (!ok) {
+ DEBUG(10,("do_unlock: returning ERRlock.\n" ));
+ return NT_STATUS_RANGE_NOT_LOCKED;
+ }
+
+ decrement_current_lock_count(fsp, lock_flav);
+ return NT_STATUS_OK;
+}
+
+/****************************************************************************
+ Remove any locks on this fd. Called from file_close().
+****************************************************************************/
+
+void locking_close_file(files_struct *fsp,
+ enum file_close_type close_type)
+{
+ struct byte_range_lock *br_lck;
+
+ if (!lp_locking(fsp->conn->params)) {
+ return;
+ }
+
+ /* If we have no outstanding locks or pending
+ * locks then we don't need to look in the lock db.
+ */
+
+ if (fsp->current_lock_count == 0) {
+ return;
+ }
+
+ br_lck = brl_get_locks(talloc_tos(),fsp);
+
+ if (br_lck) {
+ /*
+ * Unlocks must trigger dbwrap_watch watchers,
+ * normally in smbd_do_unlocking. Here it's done
+ * implictly, we're closing the file and thus remove a
+ * share mode. This will wake the waiters.
+ */
+ brl_close_fnum(br_lck);
+ TALLOC_FREE(br_lck);
+ }
+}
+
+/*******************************************************************
+ Print out a share mode.
+********************************************************************/
+
+char *share_mode_str(TALLOC_CTX *ctx, int num,
+ const struct file_id *id,
+ const struct share_mode_entry *e)
+{
+ struct server_id_buf tmp;
+ struct file_id_buf ftmp;
+
+ return talloc_asprintf(ctx, "share_mode_entry[%d]: "
+ "pid = %s, share_access = 0x%x, private_options = 0x%x, "
+ "access_mask = 0x%x, mid = 0x%llx, type= 0x%x, gen_id = %llu, "
+ "uid = %u, flags = %u, file_id %s, name_hash = 0x%x",
+ num,
+ server_id_str_buf(e->pid, &tmp),
+ e->share_access, e->private_options,
+ e->access_mask, (unsigned long long)e->op_mid,
+ e->op_type, (unsigned long long)e->share_file_id,
+ (unsigned int)e->uid, (unsigned int)e->flags,
+ file_id_str_buf(*id, &ftmp),
+ (unsigned int)e->name_hash);
+}
+
+struct rename_share_filename_state {
+ struct share_mode_lock *lck;
+ struct messaging_context *msg_ctx;
+ struct server_id self;
+ uint32_t orig_name_hash;
+ uint32_t new_name_hash;
+ struct file_rename_message msg;
+};
+
+static bool rename_lease_fn(struct share_mode_entry *e,
+ void *private_data)
+{
+ struct rename_share_filename_state *state = private_data;
+ struct share_mode_data *d = state->lck->data;
+ NTSTATUS status;
+
+ status = leases_db_rename(&e->client_guid,
+ &e->lease_key,
+ &d->id,
+ d->servicepath,
+ d->base_name,
+ d->stream_name);
+
+ if (!NT_STATUS_IS_OK(status)) {
+ /* Any error recovery possible here ? */
+ DBG_WARNING("Failed to rename lease key for "
+ "renamed file %s:%s. %s\n",
+ d->base_name,
+ d->stream_name,
+ nt_errstr(status));
+ }
+
+ return false;
+}
+
+/*******************************************************************
+ Sets the service name and filename for rename.
+ At this point we emit "file renamed" messages to all
+ process id's that have this file open.
+ Based on an initial code idea from SATOH Fumiyasu <fumiya@samba.gr.jp>
+********************************************************************/
+
+static bool rename_share_filename_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct rename_share_filename_state *state = private_data;
+ DATA_BLOB blob;
+ enum ndr_err_code ndr_err;
+ bool ok;
+
+ /*
+ * If this is a hardlink to the inode with a different name,
+ * skip this.
+ */
+ if (e->name_hash != state->orig_name_hash) {
+ return false;
+ }
+ e->name_hash = state->new_name_hash;
+ *modified = true;
+
+ ok = server_id_equal(&e->pid, &state->self);
+ if (ok) {
+ return false;
+ }
+
+ state->msg.share_file_id = e->share_file_id;
+
+ ndr_err = ndr_push_struct_blob(
+ &blob,
+ talloc_tos(),
+ &state->msg,
+ (ndr_push_flags_fn_t)ndr_push_file_rename_message);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_DEBUG("ndr_push_file_rename_message failed: %s\n",
+ ndr_errstr(ndr_err));
+ return false;
+ }
+ if (DEBUGLEVEL >= 10) {
+ struct server_id_buf tmp;
+ DBG_DEBUG("sending rename message to %s\n",
+ server_id_str_buf(e->pid, &tmp));
+ NDR_PRINT_DEBUG(file_rename_message, &state->msg);
+ }
+
+ messaging_send(state->msg_ctx, e->pid, MSG_SMB_FILE_RENAME, &blob);
+
+ TALLOC_FREE(blob.data);
+
+ return false;
+}
+
+bool rename_share_filename(struct messaging_context *msg_ctx,
+ struct share_mode_lock *lck,
+ struct file_id id,
+ const char *servicepath,
+ uint32_t orig_name_hash,
+ uint32_t new_name_hash,
+ const struct smb_filename *smb_fname_dst)
+{
+ struct rename_share_filename_state state = {
+ .lck = lck,
+ .msg_ctx = msg_ctx,
+ .self = messaging_server_id(msg_ctx),
+ .orig_name_hash = orig_name_hash,
+ .new_name_hash = new_name_hash,
+ .msg.id = id,
+ .msg.servicepath = servicepath,
+ .msg.base_name = smb_fname_dst->base_name,
+ .msg.stream_name = smb_fname_dst->stream_name,
+ };
+ struct share_mode_data *d = lck->data;
+ bool ok;
+
+ DEBUG(10, ("rename_share_filename: servicepath %s newname %s\n",
+ servicepath, smb_fname_dst->base_name));
+
+ /*
+ * rename_internal_fsp() and rename_internals() add './' to
+ * head of newname if newname does not contain a '/'.
+ */
+
+ if (strncmp(state.msg.base_name, "./", 2) == 0) {
+ state.msg.base_name += 2;
+ }
+
+ d->servicepath = talloc_strdup(d, state.msg.servicepath);
+ d->base_name = talloc_strdup(d, state.msg.base_name);
+ d->stream_name = talloc_strdup(d, state.msg.stream_name);
+ if ((d->servicepath == NULL) ||
+ (d->base_name == NULL) ||
+ ((state.msg.stream_name != NULL) && (d->stream_name == NULL))) {
+ DBG_WARNING("talloc failed\n");
+ return false;
+ }
+ d->modified = True;
+
+ ok = share_mode_forall_entries(
+ lck, rename_share_filename_fn, &state);
+ if (!ok) {
+ DBG_WARNING("share_mode_forall_entries failed\n");
+ }
+
+ ok = share_mode_forall_leases(lck, rename_lease_fn, &state);
+ if (!ok) {
+ /*
+ * Ignore error here. Not sure what to do..
+ */
+ DBG_WARNING("share_mode_forall_leases failed\n");
+ }
+
+ return True;
+}
+
+void get_file_infos(struct file_id id,
+ uint32_t name_hash,
+ bool *delete_on_close,
+ struct timespec *write_time)
+{
+ struct share_mode_lock *lck;
+
+ if (delete_on_close) {
+ *delete_on_close = false;
+ }
+
+ if (write_time) {
+ *write_time = make_omit_timespec();
+ }
+
+ if (!(lck = fetch_share_mode_unlocked(talloc_tos(), id))) {
+ return;
+ }
+
+ if (delete_on_close) {
+ *delete_on_close = is_delete_on_close_set(lck, name_hash);
+ }
+
+ if (write_time) {
+ *write_time = get_share_mode_write_time(lck);
+ }
+
+ TALLOC_FREE(lck);
+}
+
+bool is_valid_share_mode_entry(const struct share_mode_entry *e)
+{
+ int num_props = 0;
+
+ if (e->stale) {
+ return false;
+ }
+
+ num_props += ((e->op_type == NO_OPLOCK) ? 1 : 0);
+ num_props += (EXCLUSIVE_OPLOCK_TYPE(e->op_type) ? 1 : 0);
+ num_props += (LEVEL_II_OPLOCK_TYPE(e->op_type) ? 1 : 0);
+ num_props += (e->op_type == LEASE_OPLOCK);
+
+ if ((num_props > 1) && serverid_exists(&e->pid)) {
+ smb_panic("Invalid share mode entry");
+ }
+ return (num_props != 0);
+}
+
+struct find_lease_ref_state {
+ const struct GUID *client_guid;
+ const struct smb2_lease_key *lease_key;
+ bool found_same;
+};
+
+static bool find_lease_ref_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct find_lease_ref_state *state = private_data;
+
+ if (e->stale) {
+ return false;
+ }
+ if (e->op_type != LEASE_OPLOCK) {
+ return false;
+ }
+
+ state->found_same = smb2_lease_equal(
+ &e->client_guid,
+ &e->lease_key,
+ state->client_guid,
+ state->lease_key);
+ /*
+ * If we found a lease reference, look no further (i.e. return true)
+ */
+ return state->found_same;
+}
+
+NTSTATUS remove_lease_if_stale(struct share_mode_lock *lck,
+ const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key)
+{
+ struct find_lease_ref_state state = {
+ .client_guid = client_guid, .lease_key = lease_key,
+ };
+ struct share_mode_data *d = lck->data;
+ NTSTATUS status;
+ bool ok;
+
+ ok = share_mode_forall_entries(lck, find_lease_ref_fn, &state);
+ if (!ok) {
+ return NT_STATUS_INTERNAL_ERROR;
+ }
+
+ if (state.found_same) {
+ return NT_STATUS_RESOURCE_IN_USE;
+ }
+
+ status = leases_db_del(client_guid, lease_key, &d->id);
+ if (!NT_STATUS_IS_OK(status)) {
+ int level = DBGLVL_DEBUG;
+
+ if (!NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
+ level = DBGLVL_ERR;
+ }
+ DBG_PREFIX(level, ("leases_db_del failed: %s\n",
+ nt_errstr(status)));
+ }
+ return status;
+}
+
+bool share_entry_stale_pid(struct share_mode_entry *e)
+{
+ struct server_id_buf buf;
+ bool exists;
+
+ if (e->stale) {
+ return true;
+ }
+
+ exists = serverid_exists(&e->pid);
+ if (exists) {
+ DBG_DEBUG("PID %s still exists\n",
+ server_id_str_buf(e->pid, &buf));
+ return false;
+ }
+
+ DBG_DEBUG("PID %s does not exist anymore\n",
+ server_id_str_buf(e->pid, &buf));
+
+ e->stale = true;
+
+ return true;
+}
+
+/****************************************************************************
+ Adds a delete on close token.
+****************************************************************************/
+
+static bool add_delete_on_close_token(struct share_mode_data *d,
+ uint32_t name_hash,
+ const struct security_token *nt_tok,
+ const struct security_unix_token *tok)
+{
+ struct delete_token *tmp, *dtl;
+
+ tmp = talloc_realloc(d, d->delete_tokens, struct delete_token,
+ d->num_delete_tokens+1);
+ if (tmp == NULL) {
+ return false;
+ }
+ d->delete_tokens = tmp;
+ dtl = &d->delete_tokens[d->num_delete_tokens];
+
+ dtl->name_hash = name_hash;
+ dtl->delete_nt_token = dup_nt_token(d->delete_tokens, nt_tok);
+ if (dtl->delete_nt_token == NULL) {
+ return false;
+ }
+ dtl->delete_token = copy_unix_token(d->delete_tokens, tok);
+ if (dtl->delete_token == NULL) {
+ return false;
+ }
+ d->num_delete_tokens += 1;
+ d->modified = true;
+ return true;
+}
+
+void reset_delete_on_close_lck(files_struct *fsp,
+ struct share_mode_lock *lck)
+{
+ struct share_mode_data *d = lck->data;
+ uint32_t i;
+
+ for (i=0; i<d->num_delete_tokens; i++) {
+ struct delete_token *dt = &d->delete_tokens[i];
+
+ if (dt->name_hash == fsp->name_hash) {
+ d->modified = true;
+
+ /* Delete this entry. */
+ TALLOC_FREE(dt->delete_nt_token);
+ TALLOC_FREE(dt->delete_token);
+ *dt = d->delete_tokens[d->num_delete_tokens-1];
+ d->num_delete_tokens -= 1;
+ }
+ }
+}
+
+struct set_delete_on_close_state {
+ struct messaging_context *msg_ctx;
+ DATA_BLOB blob;
+};
+
+static bool set_delete_on_close_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct set_delete_on_close_state *state = private_data;
+ NTSTATUS status;
+
+ status = messaging_send(
+ state->msg_ctx,
+ e->pid,
+ MSG_SMB_NOTIFY_CANCEL_DELETED,
+ &state->blob);
+
+ if (!NT_STATUS_IS_OK(status)) {
+ struct server_id_buf tmp;
+ DBG_DEBUG("messaging_send to %s returned %s\n",
+ server_id_str_buf(e->pid, &tmp),
+ nt_errstr(status));
+ }
+
+ return false;
+}
+
+/****************************************************************************
+ Sets the delete on close flag over all share modes on this file.
+ Modify the share mode entry for all files open
+ on this device and inode to tell other smbds we have
+ changed the delete on close flag. This will be noticed
+ in the close code, the last closer will delete the file
+ if flag is set.
+ This makes a copy of any struct security_unix_token into the
+ lck entry. This function is used when the lock is already granted.
+****************************************************************************/
+
+void set_delete_on_close_lck(files_struct *fsp,
+ struct share_mode_lock *lck,
+ const struct security_token *nt_tok,
+ const struct security_unix_token *tok)
+{
+ struct share_mode_data *d = lck->data;
+ struct set_delete_on_close_state state = {
+ .msg_ctx = fsp->conn->sconn->msg_ctx
+ };
+ uint32_t i;
+ bool ret;
+ enum ndr_err_code ndr_err;
+
+ SMB_ASSERT(nt_tok != NULL);
+ SMB_ASSERT(tok != NULL);
+
+ for (i=0; i<d->num_delete_tokens; i++) {
+ struct delete_token *dt = &d->delete_tokens[i];
+ if (dt->name_hash == fsp->name_hash) {
+ d->modified = true;
+
+ /* Replace this token with the given tok. */
+ TALLOC_FREE(dt->delete_nt_token);
+ dt->delete_nt_token = dup_nt_token(dt, nt_tok);
+ SMB_ASSERT(dt->delete_nt_token != NULL);
+ TALLOC_FREE(dt->delete_token);
+ dt->delete_token = copy_unix_token(dt, tok);
+ SMB_ASSERT(dt->delete_token != NULL);
+
+ return;
+ }
+ }
+
+ ret = add_delete_on_close_token(lck->data, fsp->name_hash, nt_tok, tok);
+ SMB_ASSERT(ret);
+
+ ndr_err = ndr_push_struct_blob(
+ &state.blob,
+ talloc_tos(),
+ &fsp->file_id,
+ (ndr_push_flags_fn_t)ndr_push_file_id);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DEBUG(10, ("ndr_push_file_id failed: %s\n",
+ ndr_errstr(ndr_err)));
+ }
+
+ ret = share_mode_forall_entries(
+ lck, set_delete_on_close_fn, &state);
+ if (!ret) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
+ }
+
+ TALLOC_FREE(state.blob.data);
+}
+
+bool set_delete_on_close(files_struct *fsp, bool delete_on_close,
+ const struct security_token *nt_tok,
+ const struct security_unix_token *tok)
+{
+ struct share_mode_lock *lck;
+
+ DEBUG(10,("set_delete_on_close: %s delete on close flag for "
+ "%s, file %s\n",
+ delete_on_close ? "Adding" : "Removing", fsp_fnum_dbg(fsp),
+ fsp_str_dbg(fsp)));
+
+ lck = get_existing_share_mode_lock(talloc_tos(), fsp->file_id);
+ if (lck == NULL) {
+ return False;
+ }
+
+ if (delete_on_close) {
+ set_delete_on_close_lck(fsp, lck, nt_tok, tok);
+ } else {
+ reset_delete_on_close_lck(fsp, lck);
+ }
+
+ if (fsp->fsp_flags.is_directory) {
+ SMB_ASSERT(!is_ntfs_stream_smb_fname(fsp->fsp_name));
+ send_stat_cache_delete_message(fsp->conn->sconn->msg_ctx,
+ fsp->fsp_name->base_name);
+ }
+
+ TALLOC_FREE(lck);
+
+ fsp->fsp_flags.delete_on_close = delete_on_close;
+
+ return True;
+}
+
+static struct delete_token *find_delete_on_close_token(
+ struct share_mode_data *d, uint32_t name_hash)
+{
+ uint32_t i;
+
+ DBG_DEBUG("name_hash = 0x%"PRIx32"\n", name_hash);
+
+ for (i=0; i<d->num_delete_tokens; i++) {
+ struct delete_token *dt = &d->delete_tokens[i];
+
+ DBG_DEBUG("dt->name_hash = 0x%"PRIx32"\n",
+ dt->name_hash);
+ if (dt->name_hash == name_hash) {
+ return dt;
+ }
+ }
+ return NULL;
+}
+
+/****************************************************************************
+ Return the NT token and UNIX token if there's a match. Return true if
+ found, false if not.
+****************************************************************************/
+
+bool get_delete_on_close_token(struct share_mode_lock *lck,
+ uint32_t name_hash,
+ const struct security_token **pp_nt_tok,
+ const struct security_unix_token **pp_tok)
+{
+ struct delete_token *dt;
+
+ dt = find_delete_on_close_token(lck->data, name_hash);
+ if (dt == NULL) {
+ return false;
+ }
+ *pp_nt_tok = dt->delete_nt_token;
+ *pp_tok = dt->delete_token;
+ return true;
+}
+
+bool is_delete_on_close_set(struct share_mode_lock *lck, uint32_t name_hash)
+{
+ return find_delete_on_close_token(lck->data, name_hash) != NULL;
+}
+
+bool set_sticky_write_time(struct file_id fileid, struct timespec write_time)
+{
+ struct share_mode_lock *lck;
+ struct file_id_buf ftmp;
+ struct timeval_buf tbuf;
+ NTTIME nt = full_timespec_to_nt_time(&write_time);
+
+ DBG_INFO("%s id=%s\n",
+ timespec_string_buf(&write_time, true, &tbuf),
+ file_id_str_buf(fileid, &ftmp));
+
+ lck = get_existing_share_mode_lock(talloc_tos(), fileid);
+ if (lck == NULL) {
+ return False;
+ }
+
+ if (lck->data->changed_write_time != nt) {
+ lck->data->modified = True;
+ lck->data->changed_write_time = nt;
+ }
+
+ TALLOC_FREE(lck);
+ return True;
+}
+
+bool set_write_time(struct file_id fileid, struct timespec write_time)
+{
+ struct share_mode_lock *lck;
+ struct file_id_buf idbuf;
+ struct timeval_buf tbuf;
+ NTTIME nt = full_timespec_to_nt_time(&write_time);
+
+ DBG_INFO("%s id=%s\n",
+ timespec_string_buf(&write_time, true, &tbuf),
+ file_id_str_buf(fileid, &idbuf));
+
+ lck = get_existing_share_mode_lock(talloc_tos(), fileid);
+ if (lck == NULL) {
+ return False;
+ }
+
+ if (lck->data->old_write_time != nt) {
+ lck->data->modified = True;
+ lck->data->old_write_time = nt;
+ }
+
+ TALLOC_FREE(lck);
+ return True;
+}
+
+struct timespec get_share_mode_write_time(struct share_mode_lock *lck)
+{
+ struct share_mode_data *d = lck->data;
+
+ if (!null_nttime(d->changed_write_time)) {
+ return nt_time_to_full_timespec(d->changed_write_time);
+ }
+ return nt_time_to_full_timespec(d->old_write_time);
+}
+
+struct file_has_open_streams_state {
+ bool found_one;
+};
+
+static bool file_has_open_streams_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct file_has_open_streams_state *state = private_data;
+
+ if ((e->private_options &
+ NTCREATEX_FLAG_STREAM_BASEOPEN) == 0) {
+ return false;
+ }
+
+ if (share_entry_stale_pid(e)) {
+ return false;
+ }
+
+ state->found_one = true;
+ return true;
+}
+
+bool file_has_open_streams(files_struct *fsp)
+{
+ struct file_has_open_streams_state state = { .found_one = false };
+ struct share_mode_lock *lock = NULL;
+ bool ok;
+
+ lock = get_existing_share_mode_lock(talloc_tos(), fsp->file_id);
+ if (lock == NULL) {
+ return false;
+ }
+
+ ok = share_mode_forall_entries(
+ lock, file_has_open_streams_fn, &state);
+ TALLOC_FREE(lock);
+
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
+ return false;
+ }
+ return state.found_one;
+}
+
+/*
+ * Walk share mode entries, looking at every lease only once
+ */
+
+struct share_mode_forall_leases_state {
+ TALLOC_CTX *mem_ctx;
+ struct leases_db_key *leases;
+ bool (*fn)(struct share_mode_entry *e,
+ void *private_data);
+ void *private_data;
+ NTSTATUS status;
+};
+
+static bool share_mode_forall_leases_fn(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct share_mode_forall_leases_state *state = private_data;
+ struct leases_db_key *leases = state->leases;
+ size_t i, num_leases;
+ bool stop;
+
+ if (e->op_type != LEASE_OPLOCK) {
+ return false;
+ }
+
+ num_leases = talloc_array_length(leases);
+
+ for (i=0; i<num_leases; i++) {
+ struct leases_db_key *l = &leases[i];
+ bool same = smb2_lease_equal(
+ &e->client_guid,
+ &e->lease_key,
+ &l->client_guid,
+ &l->lease_key);
+ if (same) {
+ return false;
+ }
+ }
+
+ leases = talloc_realloc(
+ state->mem_ctx,
+ leases,
+ struct leases_db_key,
+ num_leases+1);
+ if (leases == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ return true;
+ }
+ leases[num_leases] = (struct leases_db_key) {
+ .client_guid = e->client_guid,
+ .lease_key = e->lease_key,
+ };
+ state->leases = leases;
+
+ stop = state->fn(e, state->private_data);
+ return stop;
+}
+
+bool share_mode_forall_leases(
+ struct share_mode_lock *lck,
+ bool (*fn)(struct share_mode_entry *e,
+ void *private_data),
+ void *private_data)
+{
+ struct share_mode_forall_leases_state state = {
+ .mem_ctx = talloc_tos(),
+ .fn = fn,
+ .private_data = private_data
+ };
+ bool ok;
+
+ ok = share_mode_forall_entries(
+ lck, share_mode_forall_leases_fn, &state);
+ TALLOC_FREE(state.leases);
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
+ return false;
+ }
+
+ if (!NT_STATUS_IS_OK(state.status)) {
+ DBG_DEBUG("share_mode_forall_leases_fn returned %s\n",
+ nt_errstr(state.status));
+ return false;
+ }
+
+ return true;
+}
diff --git a/source3/locking/posix.c b/source3/locking/posix.c
new file mode 100644
index 0000000..536d8bf
--- /dev/null
+++ b/source3/locking/posix.c
@@ -0,0 +1,1374 @@
+/*
+ Unix SMB/CIFS implementation.
+ Locking functions
+ Copyright (C) Jeremy Allison 1992-2006
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ Revision History:
+
+ POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "lib/util/server_id.h"
+#include "locking/proto.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_rbt.h"
+#include "util_tdb.h"
+#include "smbd/fd_handle.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_LOCKING
+
+/*
+ * The pending close database handle.
+ */
+
+static struct db_context *posix_pending_close_db;
+
+/****************************************************************************
+ First - the functions that deal with the underlying system locks - these
+ functions are used no matter if we're mapping CIFS Windows locks or CIFS
+ POSIX locks onto POSIX.
+****************************************************************************/
+
+/****************************************************************************
+ Utility function to map a lock type correctly depending on the open
+ mode of a file.
+****************************************************************************/
+
+static int map_posix_lock_type( files_struct *fsp, enum brl_type lock_type)
+{
+ if ((lock_type == WRITE_LOCK) && !fsp->fsp_flags.can_write) {
+ /*
+ * Many UNIX's cannot get a write lock on a file opened read-only.
+ * Win32 locking semantics allow this.
+ * Do the best we can and attempt a read-only lock.
+ */
+ DEBUG(10,("map_posix_lock_type: Downgrading write lock to read due to read-only file.\n"));
+ return F_RDLCK;
+ }
+
+ /*
+ * This return should be the most normal, as we attempt
+ * to always open files read/write.
+ */
+
+ return (lock_type == READ_LOCK) ? F_RDLCK : F_WRLCK;
+}
+
+/****************************************************************************
+ Debugging aid :-).
+****************************************************************************/
+
+static const char *posix_lock_type_name(int lock_type)
+{
+ return (lock_type == F_RDLCK) ? "READ" : "WRITE";
+}
+
+/****************************************************************************
+ Check to see if the given unsigned lock range is within the possible POSIX
+ range. Modifies the given args to be in range if possible, just returns
+ False if not.
+****************************************************************************/
+
+#define SMB_OFF_T_BITS (sizeof(off_t)*8)
+
+static bool posix_lock_in_range(off_t *offset_out, off_t *count_out,
+ uint64_t u_offset, uint64_t u_count)
+{
+ off_t offset = (off_t)u_offset;
+ off_t count = (off_t)u_count;
+
+ /*
+ * For the type of system we are, attempt to
+ * find the maximum positive lock offset as an off_t.
+ */
+
+#if defined(MAX_POSITIVE_LOCK_OFFSET) /* Some systems have arbitrary limits. */
+
+ off_t max_positive_lock_offset = (MAX_POSITIVE_LOCK_OFFSET);
+#else
+ /*
+ * In this case off_t is 64 bits,
+ * and the underlying system can handle 64 bit signed locks.
+ */
+
+ off_t mask2 = ((off_t)0x4) << (SMB_OFF_T_BITS-4);
+ off_t mask = (mask2<<1);
+ off_t max_positive_lock_offset = ~mask;
+
+#endif
+ /*
+ * POSIX locks of length zero mean lock to end-of-file.
+ * Win32 locks of length zero are point probes. Ignore
+ * any Win32 locks of length zero. JRA.
+ */
+
+ if (count == 0) {
+ DEBUG(10,("posix_lock_in_range: count = 0, ignoring.\n"));
+ return False;
+ }
+
+ /*
+ * If the given offset was > max_positive_lock_offset then we cannot map this at all
+ * ignore this lock.
+ */
+
+ if (u_offset & ~((uint64_t)max_positive_lock_offset)) {
+ DEBUG(10, ("posix_lock_in_range: (offset = %ju) offset > %ju "
+ "and we cannot handle this. Ignoring lock.\n",
+ (uintmax_t)u_offset,
+ (uintmax_t)max_positive_lock_offset));
+ return False;
+ }
+
+ /*
+ * We must truncate the count to less than max_positive_lock_offset.
+ */
+
+ if (u_count & ~((uint64_t)max_positive_lock_offset)) {
+ count = max_positive_lock_offset;
+ }
+
+ /*
+ * Truncate count to end at max lock offset.
+ */
+
+ if (offset > INT64_MAX - count ||
+ offset + count > max_positive_lock_offset) {
+ count = max_positive_lock_offset - offset;
+ }
+
+ /*
+ * If we ate all the count, ignore this lock.
+ */
+
+ if (count == 0) {
+ DEBUG(10, ("posix_lock_in_range: Count = 0. Ignoring lock "
+ "u_offset = %ju, u_count = %ju\n",
+ (uintmax_t)u_offset,
+ (uintmax_t)u_count));
+ return False;
+ }
+
+ /*
+ * The mapping was successful.
+ */
+
+ DEBUG(10, ("posix_lock_in_range: offset_out = %ju, "
+ "count_out = %ju\n",
+ (uintmax_t)offset, (uintmax_t)count));
+
+ *offset_out = offset;
+ *count_out = count;
+
+ return True;
+}
+
+bool smb_vfs_call_lock(struct vfs_handle_struct *handle,
+ struct files_struct *fsp, int op, off_t offset,
+ off_t count, int type)
+{
+ VFS_FIND(lock);
+ return handle->fns->lock_fn(handle, fsp, op, offset, count, type);
+}
+
+/****************************************************************************
+ Actual function that does POSIX locks. Copes with 64 -> 32 bit cruft and
+ broken NFS implementations.
+****************************************************************************/
+
+static bool posix_fcntl_lock(files_struct *fsp, int op, off_t offset, off_t count, int type)
+{
+ bool ret;
+
+ DEBUG(8,("posix_fcntl_lock %d %d %jd %jd %d\n",
+ fsp_get_io_fd(fsp),op,(intmax_t)offset,(intmax_t)count,type));
+
+ ret = SMB_VFS_LOCK(fsp, op, offset, count, type);
+
+ if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
+
+ if ((errno == EINVAL) &&
+ (op != F_GETLK &&
+ op != F_SETLK &&
+ op != F_SETLKW)) {
+ DEBUG(0,("WARNING: OFD locks in use and no kernel "
+ "support. Try setting "
+ "'smbd:force process locks = true' "
+ "in smb.conf\n"));
+ } else {
+ DEBUG(0, ("WARNING: lock request at offset "
+ "%ju, length %ju returned\n",
+ (uintmax_t)offset, (uintmax_t)count));
+ DEBUGADD(0, ("an %s error. This can happen when using 64 bit "
+ "lock offsets\n", strerror(errno)));
+ DEBUGADD(0, ("on 32 bit NFS mounted file systems.\n"));
+ }
+
+ /*
+ * If the offset is > 0x7FFFFFFF then this will cause problems on
+ * 32 bit NFS mounted filesystems. Just ignore it.
+ */
+
+ if (offset & ~((off_t)0x7fffffff)) {
+ DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
+ return True;
+ }
+
+ if (count & ~((off_t)0x7fffffff)) {
+ /* 32 bit NFS file system, retry with smaller offset */
+ DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
+ errno = 0;
+ count &= 0x7fffffff;
+ ret = SMB_VFS_LOCK(fsp, op, offset, count, type);
+ }
+ }
+
+ DEBUG(8,("posix_fcntl_lock: Lock call %s\n", ret ? "successful" : "failed"));
+ return ret;
+}
+
+bool smb_vfs_call_getlock(struct vfs_handle_struct *handle,
+ struct files_struct *fsp, off_t *poffset,
+ off_t *pcount, int *ptype, pid_t *ppid)
+{
+ VFS_FIND(getlock);
+ return handle->fns->getlock_fn(handle, fsp, poffset, pcount, ptype,
+ ppid);
+}
+
+/****************************************************************************
+ Actual function that gets POSIX locks. Copes with 64 -> 32 bit cruft and
+ broken NFS implementations.
+****************************************************************************/
+
+static bool posix_fcntl_getlock(files_struct *fsp, off_t *poffset, off_t *pcount, int *ptype)
+{
+ pid_t pid;
+ bool ret;
+
+ DEBUG(8, ("posix_fcntl_getlock %d %ju %ju %d\n",
+ fsp_get_io_fd(fsp), (uintmax_t)*poffset, (uintmax_t)*pcount,
+ *ptype));
+
+ ret = SMB_VFS_GETLOCK(fsp, poffset, pcount, ptype, &pid);
+
+ if (!ret && ((errno == EFBIG) || (errno == ENOLCK) || (errno == EINVAL))) {
+
+ DEBUG(0, ("posix_fcntl_getlock: WARNING: lock request at "
+ "offset %ju, length %ju returned\n",
+ (uintmax_t)*poffset, (uintmax_t)*pcount));
+ DEBUGADD(0, ("an %s error. This can happen when using 64 bit "
+ "lock offsets\n", strerror(errno)));
+ DEBUGADD(0, ("on 32 bit NFS mounted file systems.\n"));
+
+ /*
+ * If the offset is > 0x7FFFFFFF then this will cause problems on
+ * 32 bit NFS mounted filesystems. Just ignore it.
+ */
+
+ if (*poffset & ~((off_t)0x7fffffff)) {
+ DEBUG(0,("Offset greater than 31 bits. Returning success.\n"));
+ return True;
+ }
+
+ if (*pcount & ~((off_t)0x7fffffff)) {
+ /* 32 bit NFS file system, retry with smaller offset */
+ DEBUG(0,("Count greater than 31 bits - retrying with 31 bit truncated length.\n"));
+ errno = 0;
+ *pcount &= 0x7fffffff;
+ ret = SMB_VFS_GETLOCK(fsp,poffset,pcount,ptype,&pid);
+ }
+ }
+
+ DEBUG(8,("posix_fcntl_getlock: Lock query call %s\n", ret ? "successful" : "failed"));
+ return ret;
+}
+
+/****************************************************************************
+ POSIX function to see if a file region is locked. Returns True if the
+ region is locked, False otherwise.
+****************************************************************************/
+
+bool is_posix_locked(files_struct *fsp,
+ uint64_t *pu_offset,
+ uint64_t *pu_count,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav)
+{
+ off_t offset;
+ off_t count;
+ int posix_lock_type = map_posix_lock_type(fsp,*plock_type);
+
+ DEBUG(10, ("is_posix_locked: File %s, offset = %ju, count = %ju, "
+ "type = %s\n", fsp_str_dbg(fsp), (uintmax_t)*pu_offset,
+ (uintmax_t)*pu_count, posix_lock_type_name(*plock_type)));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * never set it, so presume it is not locked.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, *pu_offset, *pu_count)) {
+ return False;
+ }
+
+ if (!posix_fcntl_getlock(fsp,&offset,&count,&posix_lock_type)) {
+ return False;
+ }
+
+ if (posix_lock_type == F_UNLCK) {
+ return False;
+ }
+
+ if (lock_flav == POSIX_LOCK) {
+ /* Only POSIX lock queries need to know the details. */
+ *pu_offset = (uint64_t)offset;
+ *pu_count = (uint64_t)count;
+ *plock_type = (posix_lock_type == F_RDLCK) ? READ_LOCK : WRITE_LOCK;
+ }
+ return True;
+}
+
+/****************************************************************************
+ Next - the functions that deal with in memory database storing representations
+ of either Windows CIFS locks or POSIX CIFS locks.
+****************************************************************************/
+
+/* The key used in the in-memory POSIX databases. */
+
+struct lock_ref_count_key {
+ struct file_id id;
+ char r;
+};
+
+/*******************************************************************
+ Form a static locking key for a dev/inode pair for the lock ref count
+******************************************************************/
+
+static TDB_DATA locking_ref_count_key_fsp(const files_struct *fsp,
+ struct lock_ref_count_key *tmp)
+{
+ ZERO_STRUCTP(tmp);
+ tmp->id = fsp->file_id;
+ tmp->r = 'r';
+ return make_tdb_data((uint8_t *)tmp, sizeof(*tmp));
+}
+
+/*******************************************************************
+ Convenience function to get an fd_array key from an fsp.
+******************************************************************/
+
+static TDB_DATA fd_array_key_fsp(const files_struct *fsp)
+{
+ return make_tdb_data((const uint8_t *)&fsp->file_id, sizeof(fsp->file_id));
+}
+
+/*******************************************************************
+ Create the in-memory POSIX lock databases.
+********************************************************************/
+
+bool posix_locking_init(bool read_only)
+{
+ if (posix_pending_close_db != NULL) {
+ return true;
+ }
+
+ posix_pending_close_db = db_open_rbt(NULL);
+
+ if (posix_pending_close_db == NULL) {
+ DEBUG(0,("Failed to open POSIX pending close database.\n"));
+ return false;
+ }
+
+ return true;
+}
+
+/*******************************************************************
+ Delete the in-memory POSIX lock databases.
+********************************************************************/
+
+bool posix_locking_end(void)
+{
+ /*
+ * Shouldn't we close all fd's here?
+ */
+ TALLOC_FREE(posix_pending_close_db);
+ return true;
+}
+
+/****************************************************************************
+ Next - the functions that deal with reference count of number of locks open
+ on a dev/ino pair.
+****************************************************************************/
+
+/****************************************************************************
+ Increase the lock ref count. Creates lock_ref_count entry if it doesn't exist.
+****************************************************************************/
+
+static void increment_lock_ref_count(const files_struct *fsp)
+{
+ struct lock_ref_count_key tmp;
+ int32_t lock_ref_count = 0;
+ NTSTATUS status;
+
+ status = dbwrap_change_int32_atomic(
+ posix_pending_close_db, locking_ref_count_key_fsp(fsp, &tmp),
+ &lock_ref_count, 1);
+
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+ SMB_ASSERT(lock_ref_count < INT32_MAX);
+
+ DEBUG(10,("lock_ref_count for file %s = %d\n",
+ fsp_str_dbg(fsp), (int)(lock_ref_count + 1)));
+}
+
+/****************************************************************************
+ Reduce the lock ref count.
+****************************************************************************/
+
+static void decrement_lock_ref_count(const files_struct *fsp)
+{
+ struct lock_ref_count_key tmp;
+ int32_t lock_ref_count = 0;
+ NTSTATUS status;
+
+ status = dbwrap_change_int32_atomic(
+ posix_pending_close_db, locking_ref_count_key_fsp(fsp, &tmp),
+ &lock_ref_count, -1);
+
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+ SMB_ASSERT(lock_ref_count > 0);
+
+ DEBUG(10,("lock_ref_count for file %s = %d\n",
+ fsp_str_dbg(fsp), (int)(lock_ref_count - 1)));
+}
+
+/****************************************************************************
+ Fetch the lock ref count.
+****************************************************************************/
+
+static int32_t get_lock_ref_count(const files_struct *fsp)
+{
+ struct lock_ref_count_key tmp;
+ NTSTATUS status;
+ int32_t lock_ref_count = 0;
+
+ status = dbwrap_fetch_int32(
+ posix_pending_close_db, locking_ref_count_key_fsp(fsp, &tmp),
+ &lock_ref_count);
+
+ if (!NT_STATUS_IS_OK(status) &&
+ !NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
+ DEBUG(0, ("Error fetching "
+ "lock ref count for file %s: %s\n",
+ fsp_str_dbg(fsp), nt_errstr(status)));
+ }
+ return lock_ref_count;
+}
+
+/****************************************************************************
+ Delete a lock_ref_count entry.
+****************************************************************************/
+
+static void delete_lock_ref_count(const files_struct *fsp)
+{
+ struct lock_ref_count_key tmp;
+
+ /* Not a bug if it doesn't exist - no locks were ever granted. */
+
+ dbwrap_delete(posix_pending_close_db,
+ locking_ref_count_key_fsp(fsp, &tmp));
+
+ DEBUG(10,("delete_lock_ref_count for file %s\n",
+ fsp_str_dbg(fsp)));
+}
+
+/****************************************************************************
+ Next - the functions that deal with storing fd's that have outstanding
+ POSIX locks when closed.
+****************************************************************************/
+
+/****************************************************************************
+ The records in posix_pending_close_db are composed of an array of
+ ints keyed by dev/ino pair. Those ints are the fd's that were open on
+ this dev/ino pair that should have been closed, but can't as the lock
+ ref count is non zero.
+****************************************************************************/
+
+struct add_fd_to_close_entry_state {
+ const struct files_struct *fsp;
+};
+
+static void add_fd_to_close_entry_fn(
+ struct db_record *rec,
+ TDB_DATA value,
+ void *private_data)
+{
+ struct add_fd_to_close_entry_state *state = private_data;
+ int fd = fsp_get_pathref_fd(state->fsp);
+ TDB_DATA values[] = {
+ value,
+ { .dptr = (uint8_t *)&fd,
+ .dsize = sizeof(fd) },
+ };
+ NTSTATUS status;
+
+ SMB_ASSERT((values[0].dsize % sizeof(int)) == 0);
+
+ status = dbwrap_record_storev(rec, values, ARRAY_SIZE(values), 0);
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+}
+
+/****************************************************************************
+ Add an fd to the pending close db.
+****************************************************************************/
+
+static void add_fd_to_close_entry(const files_struct *fsp)
+{
+ struct add_fd_to_close_entry_state state = { .fsp = fsp };
+ NTSTATUS status;
+
+ status = dbwrap_do_locked(
+ posix_pending_close_db,
+ fd_array_key_fsp(fsp),
+ add_fd_to_close_entry_fn,
+ &state);
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+
+ DBG_DEBUG("added fd %d file %s\n",
+ fsp_get_pathref_fd(fsp),
+ fsp_str_dbg(fsp));
+}
+
+static void fd_close_posix_fn(
+ struct db_record *rec,
+ TDB_DATA data,
+ void *private_data)
+{
+ size_t num_fds, i;
+
+ SMB_ASSERT((data.dsize % sizeof(int)) == 0);
+ num_fds = data.dsize / sizeof(int);
+
+ for (i=0; i<num_fds; i++) {
+ int fd;
+ memcpy(&fd, data.dptr, sizeof(int));
+ close(fd);
+ data.dptr += sizeof(int);
+ }
+ dbwrap_record_delete(rec);
+}
+
+/****************************************************************************
+ Deal with pending closes needed by POSIX locking support.
+ Note that locking_close_file() is expected to have been called
+ to delete all locks on this fsp before this function is called.
+****************************************************************************/
+
+int fd_close_posix(const struct files_struct *fsp)
+{
+ NTSTATUS status;
+
+ if (!lp_locking(fsp->conn->params) ||
+ !lp_posix_locking(fsp->conn->params) ||
+ fsp->fsp_flags.use_ofd_locks)
+ {
+ /*
+ * No locking or POSIX to worry about or we are using POSIX
+ * open file description lock semantics which only removes
+ * locks on the file descriptor we're closing. Just close.
+ */
+ return close(fsp_get_pathref_fd(fsp));
+ }
+
+ if (get_lock_ref_count(fsp)) {
+
+ /*
+ * There are outstanding locks on this dev/inode pair on
+ * other fds. Add our fd to the pending close db. We also
+ * set fsp_get_io_fd(fsp) to -1 inside fd_close() after returning
+ * from VFS layer.
+ */
+
+ add_fd_to_close_entry(fsp);
+ return 0;
+ }
+
+ status = dbwrap_do_locked(
+ posix_pending_close_db,
+ fd_array_key_fsp(fsp),
+ fd_close_posix_fn,
+ NULL);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_WARNING("dbwrap_do_locked failed: %s\n",
+ nt_errstr(status));
+ }
+
+ /* Don't need a lock ref count on this dev/ino anymore. */
+ delete_lock_ref_count(fsp);
+
+ /*
+ * Finally close the fd associated with this fsp.
+ */
+
+ return close(fsp_get_pathref_fd(fsp));
+}
+
+/****************************************************************************
+ Next - the functions that deal with the mapping CIFS Windows locks onto
+ the underlying system POSIX locks.
+****************************************************************************/
+
+/*
+ * Structure used when splitting a lock range
+ * into a POSIX lock range. Doubly linked list.
+ */
+
+struct lock_list {
+ struct lock_list *next;
+ struct lock_list *prev;
+ off_t start;
+ off_t size;
+};
+
+/****************************************************************************
+ Create a list of lock ranges that don't overlap a given range. Used in calculating
+ POSIX locks and unlocks. This is a difficult function that requires ASCII art to
+ understand it :-).
+****************************************************************************/
+
+static struct lock_list *posix_lock_list(TALLOC_CTX *ctx,
+ struct lock_list *lhead,
+ const struct lock_context *lock_ctx, /* Lock context lhead belongs to. */
+ const struct lock_struct *plocks,
+ int num_locks)
+{
+ int i;
+
+ /*
+ * Check the current lock list on this dev/inode pair.
+ * Quit if the list is deleted.
+ */
+
+ DEBUG(10, ("posix_lock_list: curr: start=%ju,size=%ju\n",
+ (uintmax_t)lhead->start, (uintmax_t)lhead->size ));
+
+ for (i=0; i<num_locks && lhead; i++) {
+ const struct lock_struct *lock = &plocks[i];
+ struct lock_list *l_curr;
+
+ /* Ignore all but read/write locks. */
+ if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
+ continue;
+ }
+
+ /* Ignore locks not owned by this process. */
+ if (!server_id_equal(&lock->context.pid, &lock_ctx->pid)) {
+ continue;
+ }
+
+ /*
+ * Walk the lock list, checking for overlaps. Note that
+ * the lock list can expand within this loop if the current
+ * range being examined needs to be split.
+ */
+
+ for (l_curr = lhead; l_curr;) {
+
+ DEBUG(10, ("posix_lock_list: lock: fnum=%ju: "
+ "start=%ju,size=%ju:type=%s",
+ (uintmax_t)lock->fnum,
+ (uintmax_t)lock->start,
+ (uintmax_t)lock->size,
+ posix_lock_type_name(lock->lock_type) ));
+
+ if ( (l_curr->start >= (lock->start + lock->size)) ||
+ (lock->start >= (l_curr->start + l_curr->size))) {
+
+ /* No overlap with existing lock - leave this range alone. */
+/*********************************************
+ +---------+
+ | l_curr |
+ +---------+
+ +-------+
+ | lock |
+ +-------+
+OR....
+ +---------+
+ | l_curr |
+ +---------+
+**********************************************/
+
+ DEBUG(10,(" no overlap case.\n" ));
+
+ l_curr = l_curr->next;
+
+ } else if ( (l_curr->start >= lock->start) &&
+ (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
+
+ /*
+ * This range is completely overlapped by this existing lock range
+ * and thus should have no effect. Delete it from the list.
+ */
+/*********************************************
+ +---------+
+ | l_curr |
+ +---------+
+ +---------------------------+
+ | lock |
+ +---------------------------+
+**********************************************/
+ /* Save the next pointer */
+ struct lock_list *ul_next = l_curr->next;
+
+ DEBUG(10,(" delete case.\n" ));
+
+ DLIST_REMOVE(lhead, l_curr);
+ if(lhead == NULL) {
+ break; /* No more list... */
+ }
+
+ l_curr = ul_next;
+
+ } else if ( (l_curr->start >= lock->start) &&
+ (l_curr->start < lock->start + lock->size) &&
+ (l_curr->start + l_curr->size > lock->start + lock->size) ) {
+
+ /*
+ * This range overlaps the existing lock range at the high end.
+ * Truncate by moving start to existing range end and reducing size.
+ */
+/*********************************************
+ +---------------+
+ | l_curr |
+ +---------------+
+ +---------------+
+ | lock |
+ +---------------+
+BECOMES....
+ +-------+
+ | l_curr|
+ +-------+
+**********************************************/
+
+ l_curr->size = (l_curr->start + l_curr->size) - (lock->start + lock->size);
+ l_curr->start = lock->start + lock->size;
+
+ DEBUG(10, (" truncate high case: start=%ju,"
+ "size=%ju\n",
+ (uintmax_t)l_curr->start,
+ (uintmax_t)l_curr->size ));
+
+ l_curr = l_curr->next;
+
+ } else if ( (l_curr->start < lock->start) &&
+ (l_curr->start + l_curr->size > lock->start) &&
+ (l_curr->start + l_curr->size <= lock->start + lock->size) ) {
+
+ /*
+ * This range overlaps the existing lock range at the low end.
+ * Truncate by reducing size.
+ */
+/*********************************************
+ +---------------+
+ | l_curr |
+ +---------------+
+ +---------------+
+ | lock |
+ +---------------+
+BECOMES....
+ +-------+
+ | l_curr|
+ +-------+
+**********************************************/
+
+ l_curr->size = lock->start - l_curr->start;
+
+ DEBUG(10, (" truncate low case: start=%ju,"
+ "size=%ju\n",
+ (uintmax_t)l_curr->start,
+ (uintmax_t)l_curr->size ));
+
+ l_curr = l_curr->next;
+
+ } else if ( (l_curr->start < lock->start) &&
+ (l_curr->start + l_curr->size > lock->start + lock->size) ) {
+ /*
+ * Worst case scenario. Range completely overlaps an existing
+ * lock range. Split the request into two, push the new (upper) request
+ * into the dlink list, and continue with the entry after l_new (as we
+ * know that l_new will not overlap with this lock).
+ */
+/*********************************************
+ +---------------------------+
+ | l_curr |
+ +---------------------------+
+ +---------+
+ | lock |
+ +---------+
+BECOMES.....
+ +-------+ +---------+
+ | l_curr| | l_new |
+ +-------+ +---------+
+**********************************************/
+ struct lock_list *l_new = talloc(ctx, struct lock_list);
+
+ if(l_new == NULL) {
+ DEBUG(0,("posix_lock_list: talloc fail.\n"));
+ return NULL; /* The talloc_destroy takes care of cleanup. */
+ }
+
+ ZERO_STRUCTP(l_new);
+ l_new->start = lock->start + lock->size;
+ l_new->size = l_curr->start + l_curr->size - l_new->start;
+
+ /* Truncate the l_curr. */
+ l_curr->size = lock->start - l_curr->start;
+
+ DEBUG(10, (" split case: curr: start=%ju,"
+ "size=%ju new: start=%ju,"
+ "size=%ju\n",
+ (uintmax_t)l_curr->start,
+ (uintmax_t)l_curr->size,
+ (uintmax_t)l_new->start,
+ (uintmax_t)l_new->size ));
+
+ /*
+ * Add into the dlink list after the l_curr point - NOT at lhead.
+ */
+ DLIST_ADD_AFTER(lhead, l_new, l_curr);
+
+ /* And move after the link we added. */
+ l_curr = l_new->next;
+
+ } else {
+
+ /*
+ * This logic case should never happen. Ensure this is the
+ * case by forcing an abort.... Remove in production.
+ */
+ char *msg = NULL;
+
+ if (asprintf(&msg, "logic flaw in cases: "
+ "l_curr: start = %ju, "
+ "size = %ju : lock: "
+ "start = %ju, size = %ju",
+ (uintmax_t)l_curr->start,
+ (uintmax_t)l_curr->size,
+ (uintmax_t)lock->start,
+ (uintmax_t)lock->size ) != -1) {
+ smb_panic(msg);
+ } else {
+ smb_panic("posix_lock_list");
+ }
+ }
+ } /* end for ( l_curr = lhead; l_curr;) */
+ } /* end for (i=0; i<num_locks && ul_head; i++) */
+
+ return lhead;
+}
+
+/****************************************************************************
+ POSIX function to acquire a lock. Returns True if the
+ lock could be granted, False if not.
+****************************************************************************/
+
+bool set_posix_lock_windows_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type lock_type,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks,
+ int *errno_ret)
+{
+ off_t offset;
+ off_t count;
+ int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+ bool ret = True;
+ size_t lock_count;
+ TALLOC_CTX *l_ctx = NULL;
+ struct lock_list *llist = NULL;
+ struct lock_list *ll = NULL;
+
+ DEBUG(5, ("set_posix_lock_windows_flavour: File %s, offset = %ju, "
+ "count = %ju, type = %s\n", fsp_str_dbg(fsp),
+ (uintmax_t)u_offset, (uintmax_t)u_count,
+ posix_lock_type_name(lock_type)));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ increment_lock_ref_count(fsp);
+ return True;
+ }
+
+ /*
+ * Windows is very strange. It allows read locks to be overlayed
+ * (even over a write lock), but leaves the write lock in force until the first
+ * unlock. It also reference counts the locks. This means the following sequence :
+ *
+ * process1 process2
+ * ------------------------------------------------------------------------
+ * WRITE LOCK : start = 2, len = 10
+ * READ LOCK: start =0, len = 10 - FAIL
+ * READ LOCK : start = 0, len = 14
+ * READ LOCK: start =0, len = 10 - FAIL
+ * UNLOCK : start = 2, len = 10
+ * READ LOCK: start =0, len = 10 - OK
+ *
+ * Under POSIX, the same sequence in steps 1 and 2 would not be reference counted, but
+ * would leave a single read lock over the 0-14 region.
+ */
+
+ if ((l_ctx = talloc_init("set_posix_lock")) == NULL) {
+ DEBUG(0,("set_posix_lock_windows_flavour: unable to init talloc context.\n"));
+ return False;
+ }
+
+ if ((ll = talloc(l_ctx, struct lock_list)) == NULL) {
+ DEBUG(0,("set_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
+ talloc_destroy(l_ctx);
+ return False;
+ }
+
+ /*
+ * Create the initial list entry containing the
+ * lock we want to add.
+ */
+
+ ZERO_STRUCTP(ll);
+ ll->start = offset;
+ ll->size = count;
+
+ DLIST_ADD(llist, ll);
+
+ /*
+ * The following call calculates if there are any
+ * overlapping locks held by this process on
+ * fd's open on the same file and splits this list
+ * into a list of lock ranges that do not overlap with existing
+ * POSIX locks.
+ */
+
+ llist = posix_lock_list(l_ctx,
+ llist,
+ lock_ctx, /* Lock context llist belongs to. */
+ plocks,
+ num_locks);
+
+ /*
+ * Add the POSIX locks on the list of ranges returned.
+ * As the lock is supposed to be added atomically, we need to
+ * back out all the locks if any one of these calls fail.
+ */
+
+ for (lock_count = 0, ll = llist; ll; ll = ll->next, lock_count++) {
+ offset = ll->start;
+ count = ll->size;
+
+ DEBUG(5, ("set_posix_lock_windows_flavour: Real lock: "
+ "Type = %s: offset = %ju, count = %ju\n",
+ posix_lock_type_name(posix_lock_type),
+ (uintmax_t)offset, (uintmax_t)count ));
+
+ if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,posix_lock_type)) {
+ *errno_ret = errno;
+ DEBUG(5, ("set_posix_lock_windows_flavour: Lock "
+ "fail !: Type = %s: offset = %ju, "
+ "count = %ju. Errno = %s\n",
+ posix_lock_type_name(posix_lock_type),
+ (uintmax_t)offset, (uintmax_t)count,
+ strerror(errno) ));
+ ret = False;
+ break;
+ }
+ }
+
+ if (!ret) {
+
+ /*
+ * Back out all the POSIX locks we have on fail.
+ */
+
+ for (ll = llist; lock_count; ll = ll->next, lock_count--) {
+ offset = ll->start;
+ count = ll->size;
+
+ DEBUG(5, ("set_posix_lock_windows_flavour: Backing "
+ "out locks: Type = %s: offset = %ju, "
+ "count = %ju\n",
+ posix_lock_type_name(posix_lock_type),
+ (uintmax_t)offset, (uintmax_t)count ));
+
+ posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK);
+ }
+ } else {
+ /* Remember the number of locks we have on this dev/ino pair. */
+ increment_lock_ref_count(fsp);
+ }
+
+ talloc_destroy(l_ctx);
+ return ret;
+}
+
+/****************************************************************************
+ POSIX function to release a lock. Returns True if the
+ lock could be released, False if not.
+****************************************************************************/
+
+bool release_posix_lock_windows_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type deleted_lock_type,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks)
+{
+ off_t offset;
+ off_t count;
+ bool ret = True;
+ TALLOC_CTX *ul_ctx = NULL;
+ struct lock_list *ulist = NULL;
+ struct lock_list *ul = NULL;
+
+ DEBUG(5, ("release_posix_lock_windows_flavour: File %s, offset = %ju, "
+ "count = %ju\n", fsp_str_dbg(fsp),
+ (uintmax_t)u_offset, (uintmax_t)u_count));
+
+ /* Remember the number of locks we have on this dev/ino pair. */
+ decrement_lock_ref_count(fsp);
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ return True;
+ }
+
+ if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to init talloc context.\n"));
+ return False;
+ }
+
+ if ((ul = talloc(ul_ctx, struct lock_list)) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
+ talloc_destroy(ul_ctx);
+ return False;
+ }
+
+ /*
+ * Create the initial list entry containing the
+ * lock we want to remove.
+ */
+
+ ZERO_STRUCTP(ul);
+ ul->start = offset;
+ ul->size = count;
+
+ DLIST_ADD(ulist, ul);
+
+ /*
+ * The following call calculates if there are any
+ * overlapping locks held by this process on
+ * fd's open on the same file and creates a
+ * list of unlock ranges that will allow
+ * POSIX lock ranges to remain on the file whilst the
+ * unlocks are performed.
+ */
+
+ ulist = posix_lock_list(ul_ctx,
+ ulist,
+ lock_ctx, /* Lock context ulist belongs to. */
+ plocks,
+ num_locks);
+
+ /*
+ * If there were any overlapped entries (list is > 1 or size or start have changed),
+ * and the lock_type we just deleted from
+ * the upper layer tdb was a write lock, then before doing the unlock we need to downgrade
+ * the POSIX lock to a read lock. This allows any overlapping read locks
+ * to be atomically maintained.
+ */
+
+ if (deleted_lock_type == WRITE_LOCK &&
+ (!ulist || ulist->next != NULL || ulist->start != offset || ulist->size != count)) {
+
+ DEBUG(5, ("release_posix_lock_windows_flavour: downgrading "
+ "lock to READ: offset = %ju, count = %ju\n",
+ (uintmax_t)offset, (uintmax_t)count ));
+
+ if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_RDLCK)) {
+ DEBUG(0,("release_posix_lock_windows_flavour: downgrade of lock failed with error %s !\n", strerror(errno) ));
+ talloc_destroy(ul_ctx);
+ return False;
+ }
+ }
+
+ /*
+ * Release the POSIX locks on the list of ranges returned.
+ */
+
+ for(; ulist; ulist = ulist->next) {
+ offset = ulist->start;
+ count = ulist->size;
+
+ DEBUG(5, ("release_posix_lock_windows_flavour: Real unlock: "
+ "offset = %ju, count = %ju\n",
+ (uintmax_t)offset, (uintmax_t)count ));
+
+ if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK)) {
+ ret = False;
+ }
+ }
+
+ talloc_destroy(ul_ctx);
+ return ret;
+}
+
+/****************************************************************************
+ Next - the functions that deal with mapping CIFS POSIX locks onto
+ the underlying system POSIX locks.
+****************************************************************************/
+
+/****************************************************************************
+ We only increment the lock ref count when we see a POSIX lock on a context
+ that doesn't already have them.
+****************************************************************************/
+
+static void increment_posix_lock_count(const files_struct *fsp,
+ uint64_t smblctx)
+{
+ NTSTATUS status;
+ TDB_DATA ctx_key;
+ TDB_DATA val = { 0 };
+
+ ctx_key.dptr = (uint8_t *)&smblctx;
+ ctx_key.dsize = sizeof(smblctx);
+
+ /*
+ * Don't increment if we already have any POSIX flavor
+ * locks on this context.
+ */
+ if (dbwrap_exists(posix_pending_close_db, ctx_key)) {
+ return;
+ }
+
+ /* Remember that we have POSIX flavor locks on this context. */
+ status = dbwrap_store(posix_pending_close_db, ctx_key, val, 0);
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+
+ increment_lock_ref_count(fsp);
+
+ DEBUG(10,("posix_locks set for file %s\n",
+ fsp_str_dbg(fsp)));
+}
+
+static void decrement_posix_lock_count(const files_struct *fsp, uint64_t smblctx)
+{
+ NTSTATUS status;
+ TDB_DATA ctx_key;
+
+ ctx_key.dptr = (uint8_t *)&smblctx;
+ ctx_key.dsize = sizeof(smblctx);
+
+ status = dbwrap_delete(posix_pending_close_db, ctx_key);
+ SMB_ASSERT(NT_STATUS_IS_OK(status));
+
+ decrement_lock_ref_count(fsp);
+
+ DEBUG(10,("posix_locks deleted for file %s\n",
+ fsp_str_dbg(fsp)));
+}
+
+/****************************************************************************
+ Return true if any locks exist on the given lock context.
+****************************************************************************/
+
+static bool locks_exist_on_context(const struct lock_struct *plocks,
+ int num_locks,
+ const struct lock_context *lock_ctx)
+{
+ int i;
+
+ for (i=0; i < num_locks; i++) {
+ const struct lock_struct *lock = &plocks[i];
+
+ /* Ignore all but read/write locks. */
+ if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
+ continue;
+ }
+
+ /* Ignore locks not owned by this process. */
+ if (!server_id_equal(&lock->context.pid, &lock_ctx->pid)) {
+ continue;
+ }
+
+ if (lock_ctx->smblctx == lock->context.smblctx) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/****************************************************************************
+ POSIX function to acquire a lock. Returns True if the
+ lock could be granted, False if not.
+ As POSIX locks don't stack or conflict (they just overwrite)
+ we can map the requested lock directly onto a system one. We
+ know it doesn't conflict with locks on other contexts as the
+ upper layer would have refused it.
+****************************************************************************/
+
+bool set_posix_lock_posix_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type lock_type,
+ const struct lock_context *lock_ctx,
+ int *errno_ret)
+{
+ off_t offset;
+ off_t count;
+ int posix_lock_type = map_posix_lock_type(fsp,lock_type);
+
+ DEBUG(5,("set_posix_lock_posix_flavour: File %s, offset = %ju, count "
+ "= %ju, type = %s\n", fsp_str_dbg(fsp),
+ (uintmax_t)u_offset, (uintmax_t)u_count,
+ posix_lock_type_name(lock_type)));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ increment_posix_lock_count(fsp, lock_ctx->smblctx);
+ return True;
+ }
+
+ if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,posix_lock_type)) {
+ *errno_ret = errno;
+ DEBUG(5,("set_posix_lock_posix_flavour: Lock fail !: Type = %s: offset = %ju, count = %ju. Errno = %s\n",
+ posix_lock_type_name(posix_lock_type), (intmax_t)offset, (intmax_t)count, strerror(errno) ));
+ return False;
+ }
+ increment_posix_lock_count(fsp, lock_ctx->smblctx);
+ return True;
+}
+
+/****************************************************************************
+ POSIX function to release a lock. Returns True if the
+ lock could be released, False if not.
+ We are given a complete lock state from the upper layer which is what the lock
+ state should be after the unlock has already been done, so what
+ we do is punch out holes in the unlock range where locks owned by this process
+ have a different lock context.
+****************************************************************************/
+
+bool release_posix_lock_posix_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks)
+{
+ bool ret = True;
+ off_t offset;
+ off_t count;
+ TALLOC_CTX *ul_ctx = NULL;
+ struct lock_list *ulist = NULL;
+ struct lock_list *ul = NULL;
+
+ DEBUG(5, ("release_posix_lock_posix_flavour: File %s, offset = %ju, "
+ "count = %ju\n", fsp_str_dbg(fsp),
+ (uintmax_t)u_offset, (uintmax_t)u_count));
+
+ /*
+ * If the requested lock won't fit in the POSIX range, we will
+ * pretend it was successful.
+ */
+
+ if(!posix_lock_in_range(&offset, &count, u_offset, u_count)) {
+ if (!locks_exist_on_context(plocks, num_locks, lock_ctx)) {
+ decrement_posix_lock_count(fsp, lock_ctx->smblctx);
+ }
+ return True;
+ }
+
+ if ((ul_ctx = talloc_init("release_posix_lock")) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to init talloc context.\n"));
+ return False;
+ }
+
+ if ((ul = talloc(ul_ctx, struct lock_list)) == NULL) {
+ DEBUG(0,("release_posix_lock_windows_flavour: unable to talloc unlock list.\n"));
+ talloc_destroy(ul_ctx);
+ return False;
+ }
+
+ /*
+ * Create the initial list entry containing the
+ * lock we want to remove.
+ */
+
+ ZERO_STRUCTP(ul);
+ ul->start = offset;
+ ul->size = count;
+
+ DLIST_ADD(ulist, ul);
+
+ /*
+ * Walk the given array creating a linked list
+ * of unlock requests.
+ */
+
+ ulist = posix_lock_list(ul_ctx,
+ ulist,
+ lock_ctx, /* Lock context ulist belongs to. */
+ plocks,
+ num_locks);
+
+ /*
+ * Release the POSIX locks on the list of ranges returned.
+ */
+
+ for(; ulist; ulist = ulist->next) {
+ offset = ulist->start;
+ count = ulist->size;
+
+ DEBUG(5, ("release_posix_lock_posix_flavour: Real unlock: "
+ "offset = %ju, count = %ju\n",
+ (uintmax_t)offset, (uintmax_t)count ));
+
+ if (!posix_fcntl_lock(fsp,F_SETLK,offset,count,F_UNLCK)) {
+ ret = False;
+ }
+ }
+
+ if (!locks_exist_on_context(plocks, num_locks, lock_ctx)) {
+ decrement_posix_lock_count(fsp, lock_ctx->smblctx);
+ }
+ talloc_destroy(ul_ctx);
+ return ret;
+}
diff --git a/source3/locking/proto.h b/source3/locking/proto.h
new file mode 100644
index 0000000..7fc177d
--- /dev/null
+++ b/source3/locking/proto.h
@@ -0,0 +1,215 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * Locking functions
+ *
+ * Copyright (C) Andrew Tridgell 1992-2000
+ * Copyright (C) Jeremy Allison 1992-2006
+ * Copyright (C) Volker Lendecke 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LOCKING_PROTO_H_
+#define _LOCKING_PROTO_H_
+
+#include <tdb.h>
+
+/* The following definitions come from locking/brlock.c */
+
+void brl_init(bool read_only);
+void brl_shutdown(void);
+
+unsigned int brl_num_locks(const struct byte_range_lock *brl);
+struct files_struct *brl_fsp(struct byte_range_lock *brl);
+TALLOC_CTX *brl_req_mem_ctx(const struct byte_range_lock *brl);
+const struct GUID *brl_req_guid(const struct byte_range_lock *brl);
+
+bool byte_range_valid(uint64_t ofs, uint64_t len);
+bool byte_range_overlap(uint64_t ofs1,
+ uint64_t len1,
+ uint64_t ofs2,
+ uint64_t len2);
+
+NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
+ struct lock_struct *plock);
+
+NTSTATUS brl_lock(
+ struct byte_range_lock *br_lck,
+ uint64_t smblctx,
+ struct server_id pid,
+ br_off start,
+ br_off size,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ struct server_id *blocker_pid,
+ uint64_t *psmblctx);
+bool brl_unlock(struct byte_range_lock *br_lck,
+ uint64_t smblctx,
+ struct server_id pid,
+ br_off start,
+ br_off size,
+ enum brl_flavour lock_flav);
+bool brl_unlock_windows_default(struct byte_range_lock *br_lck,
+ const struct lock_struct *plock);
+bool brl_locktest(struct byte_range_lock *br_lck,
+ const struct lock_struct *rw_probe);
+NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
+ uint64_t *psmblctx,
+ struct server_id pid,
+ br_off *pstart,
+ br_off *psize,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav);
+bool brl_mark_disconnected(struct files_struct *fsp);
+bool brl_reconnect_disconnected(struct files_struct *fsp);
+void brl_close_fnum(struct byte_range_lock *br_lck);
+int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ br_off start, br_off size,
+ void *private_data),
+ void *private_data);
+struct byte_range_lock *brl_get_locks_for_locking(TALLOC_CTX *mem_ctx,
+ files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid);
+struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
+ files_struct *fsp);
+struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp);
+bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id);
+
+/* The following definitions come from locking/locking.c */
+
+const char *lock_type_name(enum brl_type lock_type);
+const char *lock_flav_name(enum brl_flavour lock_flav);
+void init_strict_lock_struct(files_struct *fsp,
+ uint64_t smblctx,
+ br_off start,
+ br_off size,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ struct lock_struct *plock);
+bool strict_lock_check_default(files_struct *fsp,
+ struct lock_struct *plock);
+NTSTATUS query_lock(files_struct *fsp,
+ uint64_t *psmblctx,
+ uint64_t *pcount,
+ uint64_t *poffset,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav);
+NTSTATUS do_lock(files_struct *fsp,
+ TALLOC_CTX *req_mem_ctx,
+ const struct GUID *req_guid,
+ uint64_t smblctx,
+ uint64_t count,
+ uint64_t offset,
+ enum brl_type lock_type,
+ enum brl_flavour lock_flav,
+ struct server_id *pblocker_pid,
+ uint64_t *psmblctx);
+NTSTATUS do_unlock(files_struct *fsp,
+ uint64_t smblctx,
+ uint64_t count,
+ uint64_t offset,
+ enum brl_flavour lock_flav);
+void locking_close_file(files_struct *fsp,
+ enum file_close_type close_type);
+char *share_mode_str(TALLOC_CTX *ctx, int num,
+ const struct file_id *id,
+ const struct share_mode_entry *e);
+
+bool rename_share_filename(struct messaging_context *msg_ctx,
+ struct share_mode_lock *lck,
+ struct file_id id,
+ const char *servicepath,
+ uint32_t orig_name_hash,
+ uint32_t new_name_hash,
+ const struct smb_filename *smb_fname);
+void get_file_infos(struct file_id id,
+ uint32_t name_hash,
+ bool *delete_on_close,
+ struct timespec *write_time);
+bool is_valid_share_mode_entry(const struct share_mode_entry *e);
+bool share_entry_stale_pid(struct share_mode_entry *e);
+NTSTATUS remove_lease_if_stale(struct share_mode_lock *lck,
+ const struct GUID *client_guid,
+ const struct smb2_lease_key *lease_key);
+bool get_delete_on_close_token(struct share_mode_lock *lck,
+ uint32_t name_hash,
+ const struct security_token **pp_nt_tok,
+ const struct security_unix_token **pp_tok);
+void reset_delete_on_close_lck(files_struct *fsp,
+ struct share_mode_lock *lck);
+void set_delete_on_close_lck(files_struct *fsp,
+ struct share_mode_lock *lck,
+ const struct security_token *nt_tok,
+ const struct security_unix_token *tok);
+bool set_delete_on_close(files_struct *fsp, bool delete_on_close,
+ const struct security_token *nt_tok,
+ const struct security_unix_token *tok);
+bool is_delete_on_close_set(struct share_mode_lock *lck, uint32_t name_hash);
+bool set_sticky_write_time(struct file_id fileid, struct timespec write_time);
+bool set_write_time(struct file_id fileid, struct timespec write_time);
+struct timespec get_share_mode_write_time(struct share_mode_lock *lck);
+bool file_has_open_streams(files_struct *fsp);
+bool share_mode_forall_leases(
+ struct share_mode_lock *lck,
+ bool (*fn)(struct share_mode_entry *e,
+ void *private_data),
+ void *private_data);
+
+/* The following definitions come from locking/posix.c */
+
+bool is_posix_locked(files_struct *fsp,
+ uint64_t *pu_offset,
+ uint64_t *pu_count,
+ enum brl_type *plock_type,
+ enum brl_flavour lock_flav);
+bool posix_locking_init(bool read_only);
+bool posix_locking_end(void);
+int fd_close_posix(const struct files_struct *fsp);
+bool set_posix_lock_windows_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type lock_type,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks,
+ int *errno_ret);
+bool release_posix_lock_windows_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type deleted_lock_type,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks);
+bool set_posix_lock_posix_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ enum brl_type lock_type,
+ const struct lock_context *lock_ctx,
+ int *errno_ret);
+bool release_posix_lock_posix_flavour(files_struct *fsp,
+ uint64_t u_offset,
+ uint64_t u_count,
+ const struct lock_context *lock_ctx,
+ const struct lock_struct *plocks,
+ int num_locks);
+
+/* The following definitions come from locking/leases_util.c */
+uint32_t map_oplock_to_lease_type(uint16_t op_type);
+uint32_t fsp_lease_type(struct files_struct *fsp);
+const struct GUID *fsp_client_guid(const files_struct *fsp);
+
+#endif /* _LOCKING_PROTO_H_ */
diff --git a/source3/locking/share_mode_lock.c b/source3/locking/share_mode_lock.c
new file mode 100644
index 0000000..75912ec
--- /dev/null
+++ b/source3/locking/share_mode_lock.c
@@ -0,0 +1,2517 @@
+/*
+ Unix SMB/CIFS implementation.
+ Locking functions
+ Copyright (C) Andrew Tridgell 1992-2000
+ Copyright (C) Jeremy Allison 1992-2006
+ Copyright (C) Volker Lendecke 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ Revision History:
+
+ 12 aug 96: Erik.Devriendt@te6.siemens.be
+ added support for shared memory implementation of share mode locking
+
+ May 1997. Jeremy Allison (jallison@whistle.com). Modified share mode
+ locking to deal with multiple share modes per open file.
+
+ September 1997. Jeremy Allison (jallison@whistle.com). Added oplock
+ support.
+
+ rewritten completely to use new tdb code. Tridge, Dec '99
+
+ Added POSIX locking support. Jeremy Allison (jeremy@valinux.com), Apr. 2000.
+ Added Unix Extensions POSIX locking support. Jeremy Allison Mar 2006.
+*/
+
+#include "includes.h"
+#include "system/filesys.h"
+#include "lib/util/server_id.h"
+#include "share_mode_lock.h"
+#include "share_mode_lock_private.h"
+#include "locking/proto.h"
+#include "smbd/globals.h"
+#include "dbwrap/dbwrap.h"
+#include "dbwrap/dbwrap_open.h"
+#include "dbwrap/dbwrap_private.h"
+#include "../libcli/security/security.h"
+#include "serverid.h"
+#include "messages.h"
+#include "util_tdb.h"
+#include "../librpc/gen_ndr/ndr_open_files.h"
+#include "source3/lib/dbwrap/dbwrap_watch.h"
+#include "locking/leases_db.h"
+#include "../lib/util/memcache.h"
+#include "lib/util/tevent_ntstatus.h"
+#include "g_lock.h"
+#include "smbd/fd_handle.h"
+#include "lib/global_contexts.h"
+
+#undef DBGC_CLASS
+#define DBGC_CLASS DBGC_LOCKING
+
+#define NO_LOCKING_COUNT (-1)
+
+/* the locking database handle */
+static struct g_lock_ctx *lock_ctx;
+
+static bool locking_init_internal(bool read_only)
+{
+ struct db_context *backend;
+ char *db_path;
+
+ brl_init(read_only);
+
+ if (lock_ctx != NULL) {
+ return True;
+ }
+
+ db_path = lock_path(talloc_tos(), "locking.tdb");
+ if (db_path == NULL) {
+ return false;
+ }
+
+ backend = db_open(NULL, db_path,
+ SMB_OPEN_DATABASE_TDB_HASH_SIZE,
+ TDB_DEFAULT|
+ TDB_VOLATILE|
+ TDB_CLEAR_IF_FIRST|
+ TDB_INCOMPATIBLE_HASH|
+ TDB_SEQNUM,
+ read_only?O_RDONLY:O_RDWR|O_CREAT, 0644,
+ DBWRAP_LOCK_ORDER_NONE,
+ DBWRAP_FLAG_NONE);
+ TALLOC_FREE(db_path);
+ if (!backend) {
+ DEBUG(0,("ERROR: Failed to initialise locking database\n"));
+ return False;
+ }
+
+ lock_ctx = g_lock_ctx_init_backend(
+ NULL, global_messaging_context(), &backend);
+ if (lock_ctx == NULL) {
+ TALLOC_FREE(backend);
+ return false;
+ }
+ g_lock_set_lock_order(lock_ctx, DBWRAP_LOCK_ORDER_1);
+
+ if (!posix_locking_init(read_only)) {
+ TALLOC_FREE(lock_ctx);
+ return False;
+ }
+
+ return True;
+}
+
+bool locking_init(void)
+{
+ return locking_init_internal(false);
+}
+
+bool locking_init_readonly(void)
+{
+ return locking_init_internal(true);
+}
+
+/*******************************************************************
+ Deinitialize the share_mode management.
+******************************************************************/
+
+bool locking_end(void)
+{
+ brl_shutdown();
+ TALLOC_FREE(lock_ctx);
+ return true;
+}
+
+/*******************************************************************
+ Form a static locking key for a dev/inode pair.
+******************************************************************/
+
+static TDB_DATA locking_key(const struct file_id *id)
+{
+ return make_tdb_data((const uint8_t *)id, sizeof(*id));
+}
+
+/*******************************************************************
+ Share mode cache utility functions that store/delete/retrieve
+ entries from memcache.
+
+ For now share the statcache (global cache) memory space. If
+ a lock record gets orphaned (which shouldn't happen as we're
+ using the same locking_key data as lookup) it will eventually
+ fall out of the cache via the normal LRU trim mechanism. If
+ necessary we can always make this a separate (smaller) cache.
+******************************************************************/
+
+static DATA_BLOB memcache_key(const struct file_id *id)
+{
+ return data_blob_const((const void *)id, sizeof(*id));
+}
+
+static void share_mode_memcache_store(struct share_mode_data *d)
+{
+ const DATA_BLOB key = memcache_key(&d->id);
+ struct file_id_buf idbuf;
+
+ DBG_DEBUG("stored entry for file %s epoch %"PRIx64" key %s\n",
+ d->base_name,
+ d->unique_content_epoch,
+ file_id_str_buf(d->id, &idbuf));
+
+ /* Ensure everything stored in the cache is pristine. */
+ d->modified = false;
+ d->fresh = false;
+
+ /*
+ * Ensure the memory going into the cache
+ * doesn't have a destructor so it can be
+ * cleanly evicted by the memcache LRU
+ * mechanism.
+ */
+ talloc_set_destructor(d, NULL);
+
+ /* Cache will own d after this call. */
+ memcache_add_talloc(NULL,
+ SHARE_MODE_LOCK_CACHE,
+ key,
+ &d);
+}
+
+/*
+ * NB. We use ndr_pull_hyper on a stack-created
+ * struct ndr_pull with no talloc allowed, as we
+ * need this to be really fast as an ndr-peek into
+ * the first 10 bytes of the blob.
+ */
+
+static enum ndr_err_code get_share_mode_blob_header(
+ const uint8_t *buf, size_t buflen, uint64_t *pepoch, uint16_t *pflags)
+{
+ struct ndr_pull ndr = {
+ .data = discard_const_p(uint8_t, buf),
+ .data_size = buflen,
+ };
+ NDR_CHECK(ndr_pull_hyper(&ndr, NDR_SCALARS, pepoch));
+ NDR_CHECK(ndr_pull_uint16(&ndr, NDR_SCALARS, pflags));
+ return NDR_ERR_SUCCESS;
+}
+
+struct fsp_update_share_mode_flags_state {
+ enum ndr_err_code ndr_err;
+ uint16_t share_mode_flags;
+};
+
+static void fsp_update_share_mode_flags_fn(
+ const uint8_t *buf,
+ size_t buflen,
+ bool *modified_dependent,
+ void *private_data)
+{
+ struct fsp_update_share_mode_flags_state *state = private_data;
+ uint64_t seq;
+
+ state->ndr_err = get_share_mode_blob_header(
+ buf, buflen, &seq, &state->share_mode_flags);
+}
+
+static NTSTATUS fsp_update_share_mode_flags(struct files_struct *fsp)
+{
+ struct fsp_update_share_mode_flags_state state = {0};
+ int seqnum = g_lock_seqnum(lock_ctx);
+ NTSTATUS status;
+
+ if (seqnum == fsp->share_mode_flags_seqnum) {
+ return NT_STATUS_OK;
+ }
+
+ status = share_mode_do_locked(
+ fsp->file_id, fsp_update_share_mode_flags_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("share_mode_do_locked returned %s\n",
+ nt_errstr(status));
+ return status;
+ }
+
+ if (!NDR_ERR_CODE_IS_SUCCESS(state.ndr_err)) {
+ DBG_DEBUG("get_share_mode_blob_header returned %s\n",
+ ndr_errstr(state.ndr_err));
+ return ndr_map_error2ntstatus(state.ndr_err);
+ }
+
+ fsp->share_mode_flags_seqnum = seqnum;
+ fsp->share_mode_flags = state.share_mode_flags;
+
+ return NT_STATUS_OK;
+}
+
+bool file_has_read_lease(struct files_struct *fsp)
+{
+ NTSTATUS status;
+
+ status = fsp_update_share_mode_flags(fsp);
+ if (!NT_STATUS_IS_OK(status)) {
+ /* Safe default for leases */
+ return true;
+ }
+
+ return (fsp->share_mode_flags & SHARE_MODE_LEASE_READ) != 0;
+}
+
+static int share_mode_data_nofree_destructor(struct share_mode_data *d)
+{
+ return -1;
+}
+
+static struct share_mode_data *share_mode_memcache_fetch(
+ TALLOC_CTX *mem_ctx,
+ struct file_id id,
+ const uint8_t *buf,
+ size_t buflen)
+{
+ const DATA_BLOB key = memcache_key(&id);
+ enum ndr_err_code ndr_err;
+ struct share_mode_data *d;
+ uint64_t unique_content_epoch;
+ uint16_t flags;
+ void *ptr;
+ struct file_id_buf idbuf;
+
+ ptr = memcache_lookup_talloc(NULL,
+ SHARE_MODE_LOCK_CACHE,
+ key);
+ if (ptr == NULL) {
+ DBG_DEBUG("failed to find entry for key %s\n",
+ file_id_str_buf(id, &idbuf));
+ return NULL;
+ }
+ /* sequence number key is at start of blob. */
+ ndr_err = get_share_mode_blob_header(
+ buf, buflen, &unique_content_epoch, &flags);
+ if (ndr_err != NDR_ERR_SUCCESS) {
+ /* Bad blob. Remove entry. */
+ DBG_DEBUG("bad blob %u key %s\n",
+ (unsigned int)ndr_err,
+ file_id_str_buf(id, &idbuf));
+ memcache_delete(NULL,
+ SHARE_MODE_LOCK_CACHE,
+ key);
+ return NULL;
+ }
+
+ d = (struct share_mode_data *)ptr;
+ if (d->unique_content_epoch != unique_content_epoch) {
+ DBG_DEBUG("epoch changed (cached %"PRIx64") (new %"PRIx64") "
+ "for key %s\n",
+ d->unique_content_epoch,
+ unique_content_epoch,
+ file_id_str_buf(id, &idbuf));
+ /* Cache out of date. Remove entry. */
+ memcache_delete(NULL,
+ SHARE_MODE_LOCK_CACHE,
+ key);
+ return NULL;
+ }
+
+ /* Move onto mem_ctx. */
+ d = talloc_move(mem_ctx, &ptr);
+
+ /*
+ * Now we own d, prevent the cache from freeing it
+ * when we delete the entry.
+ */
+ talloc_set_destructor(d, share_mode_data_nofree_destructor);
+
+ /* Remove from the cache. We own it now. */
+ memcache_delete(NULL,
+ SHARE_MODE_LOCK_CACHE,
+ key);
+
+ /* And reset the destructor to none. */
+ talloc_set_destructor(d, NULL);
+
+ DBG_DEBUG("fetched entry for file %s epoch %"PRIx64" key %s\n",
+ d->base_name,
+ d->unique_content_epoch,
+ file_id_str_buf(id, &idbuf));
+
+ return d;
+}
+
+/*
+ * 132 is the sizeof an ndr-encoded struct share_mode_entry_buf.
+ * Reading/writing entries will immediately error out if this
+ * size differs (push/pull is done without allocs).
+ */
+
+struct share_mode_entry_buf {
+ uint8_t buf[132];
+};
+#define SHARE_MODE_ENTRY_SIZE (sizeof(struct share_mode_entry_buf))
+
+static bool share_mode_entry_put(
+ const struct share_mode_entry *e,
+ struct share_mode_entry_buf *dst)
+{
+ DATA_BLOB blob = { .data = dst->buf, .length = sizeof(dst->buf) };
+ enum ndr_err_code ndr_err;
+
+ if (DEBUGLEVEL>=10) {
+ DBG_DEBUG("share_mode_entry:\n");
+ NDR_PRINT_DEBUG(share_mode_entry, discard_const_p(void, e));
+ }
+
+ ndr_err = ndr_push_struct_into_fixed_blob(
+ &blob,
+ e,
+ (ndr_push_flags_fn_t)ndr_push_share_mode_entry);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_WARNING("ndr_push_share_mode_entry failed: %s\n",
+ ndr_errstr(ndr_err));
+ return false;
+ }
+
+ return true;
+}
+
+static bool share_mode_entry_get(
+ const uint8_t ptr[SHARE_MODE_ENTRY_SIZE], struct share_mode_entry *e)
+{
+ enum ndr_err_code ndr_err = NDR_ERR_SUCCESS;
+ DATA_BLOB blob = {
+ .data = discard_const_p(uint8_t, ptr),
+ .length = SHARE_MODE_ENTRY_SIZE,
+ };
+
+ ndr_err = ndr_pull_struct_blob_all_noalloc(
+ &blob, e, (ndr_pull_flags_fn_t)ndr_pull_share_mode_entry);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_WARNING("ndr_pull_share_mode_entry failed\n");
+ return false;
+ }
+ return true;
+}
+
+/*
+ * locking.tdb records consist of
+ *
+ * uint32_t share_mode_data_len
+ * uint8_t [share_mode_data] This is struct share_mode_data in NDR
+ *
+ * 0 [SHARE_MODE_ENTRY_SIZE] Sorted array of share modes,
+ * 1 [SHARE_MODE_ENTRY_SIZE] filling up the rest of the data in the
+ * 2 [SHARE_MODE_ENTRY_SIZE] g_lock.c maintained record in locking.tdb
+ */
+
+struct locking_tdb_data {
+ const uint8_t *share_mode_data_buf;
+ size_t share_mode_data_len;
+ const uint8_t *share_entries;
+ size_t num_share_entries;
+};
+
+static bool locking_tdb_data_get(
+ struct locking_tdb_data *data, const uint8_t *buf, size_t buflen)
+{
+ uint32_t share_mode_data_len, share_entries_len;
+
+ if (buflen == 0) {
+ *data = (struct locking_tdb_data) { 0 };
+ return true;
+ }
+ if (buflen < sizeof(uint32_t)) {
+ return false;
+ }
+
+ share_mode_data_len = PULL_LE_U32(buf, 0);
+
+ buf += sizeof(uint32_t);
+ buflen -= sizeof(uint32_t);
+
+ if (buflen < share_mode_data_len) {
+ return false;
+ }
+
+ share_entries_len = buflen - share_mode_data_len;
+
+ if ((share_entries_len % SHARE_MODE_ENTRY_SIZE) != 0) {
+ return false;
+ }
+
+ *data = (struct locking_tdb_data) {
+ .share_mode_data_buf = buf,
+ .share_mode_data_len = share_mode_data_len,
+ .share_entries = buf + share_mode_data_len,
+ .num_share_entries = share_entries_len / SHARE_MODE_ENTRY_SIZE,
+ };
+
+ return true;
+}
+
+struct locking_tdb_data_fetch_state {
+ TALLOC_CTX *mem_ctx;
+ uint8_t *data;
+ size_t datalen;
+};
+
+static void locking_tdb_data_fetch_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct locking_tdb_data_fetch_state *state = private_data;
+ state->datalen = datalen;
+ state->data = talloc_memdup(state->mem_ctx, data, datalen);
+}
+
+static NTSTATUS locking_tdb_data_fetch(
+ TDB_DATA key, TALLOC_CTX *mem_ctx, struct locking_tdb_data **ltdb)
+{
+ struct locking_tdb_data_fetch_state state = { 0 };
+ struct locking_tdb_data *result = NULL;
+ NTSTATUS status;
+ bool ok;
+
+ result = talloc_zero(mem_ctx, struct locking_tdb_data);
+ if (result == NULL) {
+ return NT_STATUS_NO_MEMORY;
+ }
+ state.mem_ctx = result;
+
+ status = g_lock_dump(lock_ctx, key, locking_tdb_data_fetch_fn, &state);
+
+ if (NT_STATUS_EQUAL(status, NT_STATUS_NOT_FOUND)) {
+ /*
+ * Just return an empty record
+ */
+ goto done;
+ }
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_dump failed: %s\n", nt_errstr(status));
+ return status;
+ }
+ if (state.datalen == 0) {
+ goto done;
+ }
+
+ ok = locking_tdb_data_get(result, state.data, state.datalen);
+ if (!ok) {
+ DBG_DEBUG("locking_tdb_data_get failed for %zu bytes\n",
+ state.datalen);
+ TALLOC_FREE(result);
+ return NT_STATUS_INTERNAL_DB_CORRUPTION;
+ }
+
+done:
+ *ltdb = result;
+ return NT_STATUS_OK;
+}
+
+static NTSTATUS locking_tdb_data_store(
+ TDB_DATA key,
+ const struct locking_tdb_data *ltdb,
+ const TDB_DATA *share_mode_dbufs,
+ size_t num_share_mode_dbufs)
+{
+ uint8_t share_mode_data_len_buf[4];
+ TDB_DATA dbufs[num_share_mode_dbufs+3];
+ NTSTATUS status;
+
+ if ((ltdb->share_mode_data_len == 0) &&
+ (ltdb->num_share_entries == 0) &&
+ (num_share_mode_dbufs == 0)) {
+ /*
+ * Nothing to write
+ */
+ status = g_lock_write_data(lock_ctx, key, NULL, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_writev_data() failed: %s\n",
+ nt_errstr(status));
+ }
+ return status;
+ }
+
+ PUSH_LE_U32(share_mode_data_len_buf, 0, ltdb->share_mode_data_len);
+
+ dbufs[0] = (TDB_DATA) {
+ .dptr = share_mode_data_len_buf,
+ .dsize = sizeof(share_mode_data_len_buf),
+ };
+ dbufs[1] = (TDB_DATA) {
+ .dptr = discard_const_p(uint8_t, ltdb->share_mode_data_buf),
+ .dsize = ltdb->share_mode_data_len,
+ };
+
+ if (ltdb->num_share_entries > SIZE_MAX/SHARE_MODE_ENTRY_SIZE) {
+ /* overflow */
+ return NT_STATUS_BUFFER_OVERFLOW;
+ }
+ dbufs[2] = (TDB_DATA) {
+ .dptr = discard_const_p(uint8_t, ltdb->share_entries),
+ .dsize = ltdb->num_share_entries * SHARE_MODE_ENTRY_SIZE,
+ };
+
+ if (num_share_mode_dbufs != 0) {
+ memcpy(&dbufs[3],
+ share_mode_dbufs,
+ num_share_mode_dbufs * sizeof(TDB_DATA));
+ }
+
+ status = g_lock_writev_data(lock_ctx, key, dbufs, ARRAY_SIZE(dbufs));
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_writev_data() failed: %s\n",
+ nt_errstr(status));
+ }
+ return status;
+}
+
+/*******************************************************************
+ Get all share mode entries for a dev/inode pair.
+********************************************************************/
+
+static struct share_mode_data *parse_share_modes(
+ TALLOC_CTX *mem_ctx,
+ struct file_id id,
+ const uint8_t *buf,
+ size_t buflen)
+{
+ struct share_mode_data *d;
+ enum ndr_err_code ndr_err;
+ DATA_BLOB blob;
+
+ /* See if we already have a cached copy of this key. */
+ d = share_mode_memcache_fetch(mem_ctx, id, buf, buflen);
+ if (d != NULL) {
+ return d;
+ }
+
+ d = talloc(mem_ctx, struct share_mode_data);
+ if (d == NULL) {
+ DEBUG(0, ("talloc failed\n"));
+ goto fail;
+ }
+
+ blob = (DATA_BLOB) {
+ .data = discard_const_p(uint8_t, buf),
+ .length = buflen,
+ };
+ ndr_err = ndr_pull_struct_blob_all(
+ &blob, d, d, (ndr_pull_flags_fn_t)ndr_pull_share_mode_data);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_WARNING("ndr_pull_share_mode_data failed: %s\n",
+ ndr_errstr(ndr_err));
+ goto fail;
+ }
+
+ if (DEBUGLEVEL >= 10) {
+ DEBUG(10, ("parse_share_modes:\n"));
+ NDR_PRINT_DEBUG(share_mode_data, d);
+ }
+
+ return d;
+fail:
+ TALLOC_FREE(d);
+ return NULL;
+}
+
+/*******************************************************************
+ If modified, store the share_mode_data back into the database.
+********************************************************************/
+
+static NTSTATUS share_mode_data_store(
+ struct share_mode_data *d, bool *have_share_entries)
+{
+ TDB_DATA key = locking_key(&d->id);
+ struct locking_tdb_data *ltdb = NULL;
+ DATA_BLOB blob = { 0 };
+ NTSTATUS status;
+
+ if (!d->modified) {
+ DBG_DEBUG("not modified\n");
+ return NT_STATUS_OK;
+ }
+
+ if (DEBUGLEVEL >= 10) {
+ DBG_DEBUG("\n");
+ NDR_PRINT_DEBUG(share_mode_data, d);
+ }
+
+ d->unique_content_epoch = generate_unique_u64(d->unique_content_epoch);
+
+ status = locking_tdb_data_fetch(key, d, &ltdb);
+ if (!NT_STATUS_IS_OK(status)) {
+ return status;
+ }
+
+ if (ltdb->num_share_entries != 0) {
+ enum ndr_err_code ndr_err;
+
+ ndr_err = ndr_push_struct_blob(
+ &blob,
+ ltdb,
+ d,
+ (ndr_push_flags_fn_t)ndr_push_share_mode_data);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_DEBUG("ndr_push_share_mode_data failed: %s\n",
+ ndr_errstr(ndr_err));
+ TALLOC_FREE(ltdb);
+ return ndr_map_error2ntstatus(ndr_err);
+ }
+
+ *have_share_entries = true;
+ }
+
+ ltdb->share_mode_data_buf = blob.data;
+ ltdb->share_mode_data_len = blob.length;
+
+ status = locking_tdb_data_store(key, ltdb, NULL, 0);
+ TALLOC_FREE(ltdb);
+ return status;
+}
+
+/*******************************************************************
+ Allocate a new share_mode_data struct, mark it unmodified.
+ fresh is set to note that currently there is no database entry.
+********************************************************************/
+
+static struct share_mode_data *fresh_share_mode_lock(
+ TALLOC_CTX *mem_ctx, const char *servicepath,
+ const struct smb_filename *smb_fname,
+ const struct timespec *old_write_time)
+{
+ struct share_mode_data *d;
+
+ if ((servicepath == NULL) || (smb_fname == NULL) ||
+ (old_write_time == NULL)) {
+ return NULL;
+ }
+
+ d = talloc_zero(mem_ctx, struct share_mode_data);
+ if (d == NULL) {
+ goto fail;
+ }
+ d->unique_content_epoch = generate_unique_u64(0);
+
+ d->base_name = talloc_strdup(d, smb_fname->base_name);
+ if (d->base_name == NULL) {
+ goto fail;
+ }
+ if (smb_fname->stream_name != NULL) {
+ d->stream_name = talloc_strdup(d, smb_fname->stream_name);
+ if (d->stream_name == NULL) {
+ goto fail;
+ }
+ }
+ d->servicepath = talloc_strdup(d, servicepath);
+ if (d->servicepath == NULL) {
+ goto fail;
+ }
+ d->old_write_time = full_timespec_to_nt_time(old_write_time);
+ d->flags = SHARE_MODE_SHARE_DELETE |
+ SHARE_MODE_SHARE_WRITE |
+ SHARE_MODE_SHARE_READ;
+ d->modified = false;
+ d->fresh = true;
+ return d;
+fail:
+ DEBUG(0, ("talloc failed\n"));
+ TALLOC_FREE(d);
+ return NULL;
+}
+
+/*
+ * Key that's locked with g_lock
+ */
+static uint8_t share_mode_lock_key_data[sizeof(struct file_id)];
+static TDB_DATA share_mode_lock_key = {
+ .dptr = share_mode_lock_key_data,
+ .dsize = sizeof(share_mode_lock_key_data),
+};
+static size_t share_mode_lock_key_refcount = 0;
+
+/*
+ * We can only ever have one share mode locked. Use a static
+ * share_mode_data pointer that is shared by multiple nested
+ * share_mode_lock structures, explicitly refcounted.
+ */
+static struct share_mode_data *static_share_mode_data = NULL;
+static size_t static_share_mode_data_refcount = 0;
+
+/*******************************************************************
+ Either fetch a share mode from the database, or allocate a fresh
+ one if the record doesn't exist.
+********************************************************************/
+
+struct get_static_share_mode_data_state {
+ TALLOC_CTX *mem_ctx;
+ struct file_id id;
+ const char *servicepath;
+ const struct smb_filename *smb_fname;
+ const struct timespec *old_write_time;
+ NTSTATUS status;
+};
+
+static void get_static_share_mode_data_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct get_static_share_mode_data_state *state = private_data;
+ struct share_mode_data *d = NULL;
+ struct locking_tdb_data ltdb = { 0 };
+
+ if (datalen != 0) {
+ bool ok;
+
+ ok = locking_tdb_data_get(&ltdb, data, datalen);
+ if (!ok) {
+ DBG_DEBUG("locking_tdb_data_get failed\n");
+ state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
+ return;
+ }
+ }
+
+ if (ltdb.share_mode_data_len == 0) {
+ if (state->smb_fname == NULL) {
+ state->status = NT_STATUS_NOT_FOUND;
+ return;
+ }
+ d = fresh_share_mode_lock(
+ state->mem_ctx,
+ state->servicepath,
+ state->smb_fname,
+ state->old_write_time);
+ if (d == NULL) {
+ state->status = NT_STATUS_NO_MEMORY;
+ return;
+ }
+ } else {
+ d = parse_share_modes(
+ lock_ctx,
+ state->id,
+ ltdb.share_mode_data_buf,
+ ltdb.share_mode_data_len);
+ if (d == NULL) {
+ state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
+ return;
+ }
+ }
+
+ d->id = state->id;
+ static_share_mode_data = d;
+}
+
+static NTSTATUS get_static_share_mode_data(
+ struct file_id id,
+ const char *servicepath,
+ const struct smb_filename *smb_fname,
+ const struct timespec *old_write_time)
+{
+ struct get_static_share_mode_data_state state = {
+ .mem_ctx = lock_ctx,
+ .id = id,
+ .servicepath = servicepath,
+ .smb_fname = smb_fname,
+ .old_write_time = old_write_time,
+ };
+ NTSTATUS status;
+
+ SMB_ASSERT(static_share_mode_data == NULL);
+
+ status = g_lock_dump(
+ lock_ctx,
+ share_mode_lock_key,
+ get_static_share_mode_data_fn,
+ &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_dump failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ if (!NT_STATUS_IS_OK(state.status)) {
+ DBG_DEBUG("get_static_share_mode_data_fn failed: %s\n",
+ nt_errstr(state.status));
+ return state.status;
+ }
+
+ return NT_STATUS_OK;
+}
+
+/*******************************************************************
+ Get a share_mode_lock, Reference counted to allow nested calls.
+********************************************************************/
+
+static int share_mode_lock_destructor(struct share_mode_lock *lck);
+
+struct share_mode_lock *get_share_mode_lock(
+ TALLOC_CTX *mem_ctx,
+ struct file_id id,
+ const char *servicepath,
+ const struct smb_filename *smb_fname,
+ const struct timespec *old_write_time)
+{
+ TDB_DATA key = locking_key(&id);
+ struct share_mode_lock *lck = NULL;
+ NTSTATUS status;
+ int cmp;
+
+ lck = talloc(mem_ctx, struct share_mode_lock);
+ if (lck == NULL) {
+ DEBUG(1, ("talloc failed\n"));
+ return NULL;
+ }
+
+ if (static_share_mode_data != NULL) {
+ if (!file_id_equal(&static_share_mode_data->id, &id)) {
+ struct file_id_buf existing;
+ struct file_id_buf requested;
+
+ DBG_ERR("Can not lock two share modes "
+ "simultaneously: existing %s requested %s\n",
+ file_id_str_buf(static_share_mode_data->id, &existing),
+ file_id_str_buf(id, &requested));
+
+ smb_panic(__location__);
+ goto fail;
+ }
+ goto done;
+ }
+
+ if (share_mode_lock_key_refcount == 0) {
+ status = g_lock_lock(
+ lock_ctx,
+ key,
+ G_LOCK_WRITE,
+ (struct timeval) { .tv_sec = 3600 });
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_lock failed: %s\n",
+ nt_errstr(status));
+ goto fail;
+ }
+ memcpy(share_mode_lock_key_data, key.dptr, key.dsize);
+ }
+
+ cmp = tdb_data_cmp(share_mode_lock_key, key);
+ if (cmp != 0) {
+ DBG_WARNING("Can not lock two share modes simultaneously\n");
+ smb_panic(__location__);
+ goto fail;
+ }
+
+ SMB_ASSERT(share_mode_lock_key_refcount < SIZE_MAX);
+ share_mode_lock_key_refcount += 1;
+
+ SMB_ASSERT(static_share_mode_data_refcount == 0);
+
+ status = get_static_share_mode_data(
+ id,
+ servicepath,
+ smb_fname,
+ old_write_time);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("get_static_share_mode_data failed: %s\n",
+ nt_errstr(status));
+ share_mode_lock_key_refcount -= 1;
+ goto fail;
+ }
+done:
+ static_share_mode_data_refcount += 1;
+ lck->data = static_share_mode_data;
+
+ talloc_set_destructor(lck, share_mode_lock_destructor);
+
+ if (CHECK_DEBUGLVL(DBGLVL_DEBUG)) {
+ struct file_id_buf returned;
+
+ DBG_DEBUG("Returning %s (data_refcount=%zu key_refcount=%zu)\n",
+ file_id_str_buf(id, &returned),
+ static_share_mode_data_refcount,
+ share_mode_lock_key_refcount);
+ }
+
+ return lck;
+fail:
+ TALLOC_FREE(lck);
+ if (share_mode_lock_key_refcount == 0) {
+ status = g_lock_unlock(lock_ctx, share_mode_lock_key);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_ERR("g_lock_unlock failed: %s\n",
+ nt_errstr(status));
+ }
+ }
+ return NULL;
+}
+
+static int share_mode_lock_destructor(struct share_mode_lock *lck)
+{
+ bool have_share_entries = false;
+ NTSTATUS status;
+
+ SMB_ASSERT(static_share_mode_data_refcount > 0);
+ static_share_mode_data_refcount -= 1;
+
+ if (static_share_mode_data_refcount > 0) {
+ return 0;
+ }
+
+ status = share_mode_data_store(
+ static_share_mode_data, &have_share_entries);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_ERR("share_mode_data_store failed: %s\n",
+ nt_errstr(status));
+ smb_panic("Could not store share mode data\n");
+ }
+
+ SMB_ASSERT(share_mode_lock_key_refcount > 0);
+ share_mode_lock_key_refcount -= 1;
+
+ if (share_mode_lock_key_refcount == 0) {
+ status = g_lock_unlock(lock_ctx, share_mode_lock_key);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_ERR("g_lock_unlock failed: %s\n",
+ nt_errstr(status));
+ smb_panic("Could not unlock share mode\n");
+ }
+ }
+
+ if (have_share_entries) {
+ /*
+ * This is worth keeping. Without share modes,
+ * share_mode_data_store above has left nothing in the
+ * database.
+ */
+ share_mode_memcache_store(static_share_mode_data);
+ static_share_mode_data = NULL;
+ }
+
+ TALLOC_FREE(static_share_mode_data);
+ return 0;
+}
+
+/*******************************************************************
+ Fetch a share mode where we know one MUST exist. This call reference
+ counts it internally to allow for nested lock fetches.
+********************************************************************/
+
+struct share_mode_lock *get_existing_share_mode_lock(TALLOC_CTX *mem_ctx,
+ const struct file_id id)
+{
+ return get_share_mode_lock(mem_ctx, id, NULL, NULL, NULL);
+}
+
+struct share_mode_do_locked_state {
+ TDB_DATA key;
+ void (*fn)(const uint8_t *buf,
+ size_t buflen,
+ bool *modified_dependent,
+ void *private_data);
+ void *private_data;
+};
+
+static void share_mode_do_locked_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct share_mode_do_locked_state *state = private_data;
+ bool modified_dependent = false;
+ struct locking_tdb_data ltdb = { 0 };
+ bool ok;
+
+ ok = locking_tdb_data_get(
+ &ltdb, discard_const_p(uint8_t, data), datalen);
+ if (!ok) {
+ DBG_WARNING("locking_tdb_data_get failed\n");
+ return;
+ }
+
+ state->fn(ltdb.share_mode_data_buf,
+ ltdb.share_mode_data_len,
+ &modified_dependent,
+ state->private_data);
+
+ if (modified_dependent) {
+ g_lock_wake_watchers(lock_ctx, state->key);
+ }
+}
+
+NTSTATUS share_mode_do_locked(
+ struct file_id id,
+ void (*fn)(const uint8_t *buf,
+ size_t buflen,
+ bool *modified_dependent,
+ void *private_data),
+ void *private_data)
+{
+ TDB_DATA key = locking_key(&id);
+ size_t data_refcount, key_refcount;
+ struct share_mode_do_locked_state state = {
+ .key = key, .fn = fn, .private_data = private_data,
+ };
+ NTSTATUS status;
+
+ if (share_mode_lock_key_refcount == 0) {
+ status = g_lock_lock(
+ lock_ctx,
+ key,
+ G_LOCK_WRITE,
+ (struct timeval) { .tv_sec = 3600 });
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_lock failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ memcpy(share_mode_lock_key_data, key.dptr, key.dsize);
+ }
+
+ SMB_ASSERT(share_mode_lock_key_refcount < SIZE_MAX);
+ share_mode_lock_key_refcount += 1;
+
+ key_refcount = share_mode_lock_key_refcount;
+ data_refcount = static_share_mode_data_refcount;
+
+ status = g_lock_dump(
+ lock_ctx, key, share_mode_do_locked_fn, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_dump failed: %s\n",
+ nt_errstr(status));
+ }
+
+ SMB_ASSERT(data_refcount == static_share_mode_data_refcount);
+ SMB_ASSERT(key_refcount == share_mode_lock_key_refcount);
+ share_mode_lock_key_refcount -= 1;
+
+ if (share_mode_lock_key_refcount == 0) {
+ status = g_lock_unlock(lock_ctx, key);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_unlock failed: %s\n",
+ nt_errstr(status));
+ }
+ }
+
+ return status;
+}
+
+static void share_mode_wakeup_waiters_fn(
+ const uint8_t *buf,
+ size_t buflen,
+ bool *modified_dependent,
+ void *private_data)
+{
+ *modified_dependent = true;
+}
+
+NTSTATUS share_mode_wakeup_waiters(struct file_id id)
+{
+ return share_mode_do_locked(id, share_mode_wakeup_waiters_fn, NULL);
+}
+
+NTTIME share_mode_changed_write_time(struct share_mode_lock *lck)
+{
+ return lck->data->changed_write_time;
+}
+
+const char *share_mode_servicepath(struct share_mode_lock *lck)
+{
+ return lck->data->servicepath;
+}
+
+char *share_mode_filename(TALLOC_CTX *mem_ctx, struct share_mode_lock *lck)
+{
+ struct share_mode_data *d = lck->data;
+ bool has_stream = (d->stream_name != NULL);
+ char *fname = NULL;
+
+ fname = talloc_asprintf(
+ mem_ctx,
+ "%s%s%s",
+ d->base_name,
+ has_stream ? ":" : "",
+ has_stream ? d->stream_name : "");
+ return fname;
+}
+
+char *share_mode_data_dump(
+ TALLOC_CTX *mem_ctx, struct share_mode_lock *lck)
+{
+ struct ndr_print *p = talloc(mem_ctx, struct ndr_print);
+ char *ret = NULL;
+
+ if (p == NULL) {
+ return NULL;
+ }
+
+ *p = (struct ndr_print) {
+ .print = ndr_print_string_helper,
+ .depth = 1,
+ .private_data = talloc_strdup(mem_ctx, ""),
+ };
+
+ if (p->private_data == NULL) {
+ TALLOC_FREE(p);
+ return NULL;
+ }
+
+ ndr_print_share_mode_data(p, "SHARE_MODE_DATA", lck->data);
+
+ ret = p->private_data;
+
+ TALLOC_FREE(p);
+
+ return ret;
+}
+
+void share_mode_flags_get(
+ struct share_mode_lock *lck,
+ uint32_t *access_mask,
+ uint32_t *share_mode,
+ uint32_t *lease_type)
+{
+ uint16_t flags = lck->data->flags;
+
+ if (access_mask != NULL) {
+ *access_mask =
+ ((flags & SHARE_MODE_ACCESS_READ) ?
+ FILE_READ_DATA : 0) |
+ ((flags & SHARE_MODE_ACCESS_WRITE) ?
+ FILE_WRITE_DATA : 0) |
+ ((flags & SHARE_MODE_ACCESS_DELETE) ?
+ DELETE_ACCESS : 0);
+ }
+ if (share_mode != NULL) {
+ *share_mode =
+ ((flags & SHARE_MODE_SHARE_READ) ?
+ FILE_SHARE_READ : 0) |
+ ((flags & SHARE_MODE_SHARE_WRITE) ?
+ FILE_SHARE_WRITE : 0) |
+ ((flags & SHARE_MODE_SHARE_DELETE) ?
+ FILE_SHARE_DELETE : 0);
+ }
+ if (lease_type != NULL) {
+ *lease_type =
+ ((flags & SHARE_MODE_LEASE_READ) ?
+ SMB2_LEASE_READ : 0) |
+ ((flags & SHARE_MODE_LEASE_WRITE) ?
+ SMB2_LEASE_WRITE : 0) |
+ ((flags & SHARE_MODE_LEASE_HANDLE) ?
+ SMB2_LEASE_HANDLE : 0);
+ }
+}
+
+void share_mode_flags_set(
+ struct share_mode_lock *lck,
+ uint32_t access_mask,
+ uint32_t share_mode,
+ uint32_t lease_type,
+ bool *modified)
+{
+ struct share_mode_data *d = lck->data;
+ uint16_t flags = 0;
+
+ flags |= (access_mask & (FILE_READ_DATA | FILE_EXECUTE)) ?
+ SHARE_MODE_ACCESS_READ : 0;
+ flags |= (access_mask & (FILE_WRITE_DATA | FILE_APPEND_DATA)) ?
+ SHARE_MODE_ACCESS_WRITE : 0;
+ flags |= (access_mask & (DELETE_ACCESS)) ?
+ SHARE_MODE_ACCESS_DELETE : 0;
+
+ flags |= (share_mode & FILE_SHARE_READ) ?
+ SHARE_MODE_SHARE_READ : 0;
+ flags |= (share_mode & FILE_SHARE_WRITE) ?
+ SHARE_MODE_SHARE_WRITE : 0;
+ flags |= (share_mode & FILE_SHARE_DELETE) ?
+ SHARE_MODE_SHARE_DELETE : 0;
+
+ flags |= (lease_type & SMB2_LEASE_READ) ?
+ SHARE_MODE_LEASE_READ : 0;
+ flags |= (lease_type & SMB2_LEASE_WRITE) ?
+ SHARE_MODE_LEASE_WRITE : 0;
+ flags |= (lease_type & SMB2_LEASE_HANDLE) ?
+ SHARE_MODE_LEASE_HANDLE : 0;
+
+ if (d->flags == flags) {
+ return;
+ }
+
+ if (modified != NULL) {
+ *modified = true;
+ }
+ d->flags = flags;
+ d->modified = true;
+}
+
+struct share_mode_watch_state {
+ bool blockerdead;
+ struct server_id blocker;
+};
+
+static void share_mode_watch_done(struct tevent_req *subreq);
+
+struct tevent_req *share_mode_watch_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct share_mode_lock *lck,
+ struct server_id blocker)
+{
+ TDB_DATA key = locking_key(&lck->data->id);
+ struct tevent_req *req = NULL, *subreq = NULL;
+ struct share_mode_watch_state *state = NULL;
+
+ req = tevent_req_create(
+ mem_ctx, &state, struct share_mode_watch_state);
+ if (req == NULL) {
+ return NULL;
+ }
+
+ subreq = g_lock_watch_data_send(state, ev, lock_ctx, key, blocker);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, share_mode_watch_done, req);
+ return req;
+}
+
+static void share_mode_watch_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct share_mode_watch_state *state = tevent_req_data(
+ req, struct share_mode_watch_state);
+ NTSTATUS status;
+
+ status = g_lock_watch_data_recv(
+ subreq, &state->blockerdead, &state->blocker);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+ tevent_req_done(req);
+}
+
+NTSTATUS share_mode_watch_recv(
+ struct tevent_req *req, bool *blockerdead, struct server_id *blocker)
+{
+ struct share_mode_watch_state *state = tevent_req_data(
+ req, struct share_mode_watch_state);
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ return status;
+ }
+ if (blockerdead != NULL) {
+ *blockerdead = state->blockerdead;
+ }
+ if (blocker != NULL) {
+ *blocker = state->blocker;
+ }
+ return NT_STATUS_OK;
+}
+
+struct fetch_share_mode_unlocked_state {
+ TALLOC_CTX *mem_ctx;
+ struct file_id id;
+ struct share_mode_lock *lck;
+};
+
+static void fetch_share_mode_unlocked_parser(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct fetch_share_mode_unlocked_state *state = private_data;
+ struct locking_tdb_data ltdb = { 0 };
+
+ if (datalen != 0) {
+ bool ok = locking_tdb_data_get(&ltdb, data, datalen);
+ if (!ok) {
+ DBG_DEBUG("locking_tdb_data_get failed\n");
+ return;
+ }
+ }
+
+ if (ltdb.share_mode_data_len == 0) {
+ /* Likely a ctdb tombstone record, ignore it */
+ return;
+ }
+
+ state->lck = talloc(state->mem_ctx, struct share_mode_lock);
+ if (state->lck == NULL) {
+ DEBUG(0, ("talloc failed\n"));
+ return;
+ }
+
+ state->lck->data = parse_share_modes(
+ state->lck,
+ state->id,
+ ltdb.share_mode_data_buf,
+ ltdb.share_mode_data_len);
+ if (state->lck->data == NULL) {
+ DBG_DEBUG("parse_share_modes failed\n");
+ TALLOC_FREE(state->lck);
+ }
+}
+
+/*******************************************************************
+ Get a share_mode_lock without locking the database or reference
+ counting. Used by smbstatus to display existing share modes.
+********************************************************************/
+
+struct share_mode_lock *fetch_share_mode_unlocked(TALLOC_CTX *mem_ctx,
+ struct file_id id)
+{
+ struct fetch_share_mode_unlocked_state state = {
+ .mem_ctx = mem_ctx,
+ .id = id,
+ };
+ TDB_DATA key = locking_key(&id);
+ NTSTATUS status;
+
+ status = g_lock_dump(
+ lock_ctx, key, fetch_share_mode_unlocked_parser, &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_dump failed: %s\n", nt_errstr(status));
+ return NULL;
+ }
+ return state.lck;
+}
+
+struct fetch_share_mode_state {
+ struct file_id id;
+ struct share_mode_lock *lck;
+ NTSTATUS status;
+};
+
+static void fetch_share_mode_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data);
+static void fetch_share_mode_done(struct tevent_req *subreq);
+
+/**
+ * @brief Get a share_mode_lock without locking or refcounting
+ *
+ * This can be used in a clustered Samba environment where the async dbwrap
+ * request is sent over a socket to the local ctdbd. If the send queue is full
+ * and the caller was issuing multiple async dbwrap requests in a loop, the
+ * caller knows it's probably time to stop sending requests for now and try
+ * again later.
+ *
+ * @param[in] mem_ctx The talloc memory context to use.
+ *
+ * @param[in] ev The event context to work on.
+ *
+ * @param[in] id The file id for the locking.tdb key
+ *
+ * @param[out] queued This boolean out parameter tells the caller whether the
+ * async request is blocked in a full send queue:
+ *
+ * false := request is dispatched
+ *
+ * true := send queue is full, request waiting to be
+ * dispatched
+ *
+ * @return The new async request, NULL on error.
+ **/
+struct tevent_req *fetch_share_mode_send(TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct file_id id,
+ bool *queued)
+{
+ struct tevent_req *req = NULL, *subreq = NULL;
+ struct fetch_share_mode_state *state = NULL;
+
+ *queued = false;
+
+ req = tevent_req_create(mem_ctx, &state,
+ struct fetch_share_mode_state);
+ if (req == NULL) {
+ return NULL;
+ }
+ state->id = id;
+
+ subreq = g_lock_dump_send(
+ state,
+ ev,
+ lock_ctx,
+ locking_key(&id),
+ fetch_share_mode_fn,
+ state);
+ if (tevent_req_nomem(subreq, req)) {
+ return tevent_req_post(req, ev);
+ }
+ tevent_req_set_callback(subreq, fetch_share_mode_done, req);
+ return req;
+}
+
+static void fetch_share_mode_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct fetch_share_mode_state *state = talloc_get_type_abort(
+ private_data, struct fetch_share_mode_state);
+ struct locking_tdb_data ltdb = { 0 };
+
+ if (datalen != 0) {
+ bool ok = locking_tdb_data_get(&ltdb, data, datalen);
+ if (!ok) {
+ DBG_DEBUG("locking_tdb_data_get failed\n");
+ return;
+ }
+ }
+
+ if (ltdb.share_mode_data_len == 0) {
+ /* Likely a ctdb tombstone record, ignore it */
+ return;
+ }
+
+ state->lck = talloc(state, struct share_mode_lock);
+ if (state->lck == NULL) {
+ DBG_WARNING("talloc failed\n");
+ state->status = NT_STATUS_NO_MEMORY;
+ return;
+ }
+
+ state->lck->data = parse_share_modes(
+ state->lck,
+ state->id,
+ ltdb.share_mode_data_buf,
+ ltdb.share_mode_data_len);
+ if (state->lck->data == NULL) {
+ DBG_DEBUG("parse_share_modes failed\n");
+ state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
+ TALLOC_FREE(state->lck);
+ return;
+ }
+}
+
+static void fetch_share_mode_done(struct tevent_req *subreq)
+{
+ struct tevent_req *req = tevent_req_callback_data(
+ subreq, struct tevent_req);
+ struct fetch_share_mode_state *state = tevent_req_data(
+ req, struct fetch_share_mode_state);
+ NTSTATUS status;
+
+ status = g_lock_dump_recv(subreq);
+ TALLOC_FREE(subreq);
+ if (tevent_req_nterror(req, status)) {
+ return;
+ }
+ if (tevent_req_nterror(req, state->status)) {
+ return;
+ }
+ tevent_req_done(req);
+}
+
+NTSTATUS fetch_share_mode_recv(struct tevent_req *req,
+ TALLOC_CTX *mem_ctx,
+ struct share_mode_lock **_lck)
+{
+ struct fetch_share_mode_state *state = tevent_req_data(
+ req, struct fetch_share_mode_state);
+ struct share_mode_lock *lck = NULL;
+
+ NTSTATUS status;
+
+ if (tevent_req_is_nterror(req, &status)) {
+ tevent_req_received(req);
+ return status;
+ }
+
+ if (state->lck == NULL) {
+ tevent_req_received(req);
+ return NT_STATUS_NOT_FOUND;
+ }
+
+ lck = talloc_move(mem_ctx, &state->lck);
+
+ if (DEBUGLEVEL >= 10) {
+ DBG_DEBUG("share_mode_data:\n");
+ NDR_PRINT_DEBUG(share_mode_data, lck->data);
+ }
+
+ *_lck = lck;
+ tevent_req_received(req);
+ return NT_STATUS_OK;
+}
+
+struct share_mode_forall_state {
+ TDB_DATA key;
+ int (*fn)(struct file_id fid,
+ const struct share_mode_data *data,
+ void *private_data);
+ void *private_data;
+};
+
+static void share_mode_forall_dump_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct share_mode_forall_state *state = private_data;
+ struct file_id fid;
+ struct locking_tdb_data ltdb = { 0 };
+ bool ok;
+ struct share_mode_data *d;
+
+ if (state->key.dsize != sizeof(fid)) {
+ DBG_DEBUG("Got invalid key length %zu\n", state->key.dsize);
+ return;
+ }
+ memcpy(&fid, state->key.dptr, sizeof(fid));
+
+ ok = locking_tdb_data_get(&ltdb, data, datalen);
+ if (!ok) {
+ DBG_DEBUG("locking_tdb_data_get() failed\n");
+ return;
+ }
+
+ d = parse_share_modes(
+ talloc_tos(),
+ fid,
+ ltdb.share_mode_data_buf,
+ ltdb.share_mode_data_len);
+ if (d == NULL) {
+ DBG_DEBUG("parse_share_modes() failed\n");
+ return;
+ }
+
+ state->fn(fid, d, state->private_data);
+ TALLOC_FREE(d);
+}
+
+static int share_mode_forall_fn(TDB_DATA key, void *private_data)
+{
+ struct share_mode_forall_state *state = private_data;
+ NTSTATUS status;
+
+ state->key = key;
+
+ status = g_lock_dump(
+ lock_ctx, key, share_mode_forall_dump_fn, private_data);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_dump failed: %s\n",
+ nt_errstr(status));
+ }
+ return 0;
+}
+
+int share_mode_forall(int (*fn)(struct file_id fid,
+ const struct share_mode_data *data,
+ void *private_data),
+ void *private_data)
+{
+ struct share_mode_forall_state state = {
+ .fn = fn,
+ .private_data = private_data
+ };
+ int ret;
+
+ if (lock_ctx == NULL) {
+ return 0;
+ }
+
+ ret = g_lock_locks(
+ lock_ctx, share_mode_forall_fn, &state);
+ if (ret < 0) {
+ DBG_DEBUG("g_lock_locks failed\n");
+ }
+ return ret;
+}
+
+struct share_entry_forall_state {
+ struct file_id fid;
+ const struct share_mode_data *data;
+ int (*fn)(struct file_id fid,
+ const struct share_mode_data *data,
+ const struct share_mode_entry *entry,
+ void *private_data);
+ void *private_data;
+ int ret;
+};
+
+static bool share_entry_traverse_walker(
+ struct share_mode_entry *e,
+ bool *modified,
+ void *private_data)
+{
+ struct share_entry_forall_state *state = private_data;
+
+ state->ret = state->fn(
+ state->fid, state->data, e, state->private_data);
+ return (state->ret != 0);
+}
+
+static int share_entry_traverse_fn(struct file_id fid,
+ const struct share_mode_data *data,
+ void *private_data)
+{
+ struct share_entry_forall_state *state = private_data;
+ struct share_mode_lock lck = {
+ .data = discard_const_p(struct share_mode_data, data)
+ };
+ bool ok;
+
+ state->fid = fid;
+ state->data = data;
+
+ ok = share_mode_forall_entries(
+ &lck, share_entry_traverse_walker, state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_forall_entries failed\n");
+ return false;
+ }
+
+ return state->ret;
+}
+
+/*******************************************************************
+ Call the specified function on each entry under management by the
+ share mode system.
+********************************************************************/
+
+int share_entry_forall(int (*fn)(struct file_id fid,
+ const struct share_mode_data *data,
+ const struct share_mode_entry *entry,
+ void *private_data),
+ void *private_data)
+{
+ struct share_entry_forall_state state = {
+ .fn = fn, .private_data = private_data };
+
+ return share_mode_forall(share_entry_traverse_fn, &state);
+}
+
+static int share_mode_entry_cmp(
+ struct server_id pid1,
+ uint64_t share_file_id1,
+ struct server_id pid2,
+ uint64_t share_file_id2)
+{
+ int cmp;
+
+ cmp = server_id_cmp(&pid1, &pid2);
+ if (cmp != 0) {
+ return cmp;
+ }
+ if (share_file_id1 != share_file_id2) {
+ return (share_file_id1 < share_file_id2) ? -1 : 1;
+ }
+ return 0;
+}
+
+static size_t share_mode_entry_find(
+ const uint8_t *data,
+ size_t num_share_modes,
+ struct server_id pid,
+ uint64_t share_file_id,
+ struct share_mode_entry *e,
+ bool *match)
+{
+ ssize_t left, right, middle;
+
+ *match = false;
+
+ if (num_share_modes == 0) {
+ return 0;
+ }
+
+ left = 0;
+ right = (num_share_modes-1);
+
+ while (left <= right) {
+ const uint8_t *middle_ptr = NULL;
+ int cmp;
+ bool ok;
+
+ middle = left + ((right - left) / 2);
+ middle_ptr = data + middle * SHARE_MODE_ENTRY_SIZE;
+
+ DBG_DEBUG("left=%zu, right=%zu, middle=%zu, middle_ptr=%p\n",
+ left,
+ right,
+ middle,
+ middle_ptr);
+
+ ok = share_mode_entry_get(middle_ptr, e);
+ if (!ok) {
+ DBG_DEBUG("share_mode_entry_get failed\n");
+ return 0;
+ }
+
+ cmp = share_mode_entry_cmp(
+ e->pid, e->share_file_id, pid, share_file_id);
+ if (cmp == 0) {
+ *match = true;
+ return middle;
+ }
+
+ if (cmp < 0) {
+ right = middle-1;
+ } else {
+ left = middle+1;
+ }
+ }
+
+ return left;
+}
+
+bool set_share_mode(struct share_mode_lock *lck,
+ struct files_struct *fsp,
+ uid_t uid,
+ uint64_t mid,
+ uint16_t op_type,
+ const struct smb2_lease_key *lease_key,
+ uint32_t share_access,
+ uint32_t access_mask)
+{
+ struct share_mode_data *d = lck->data;
+ TDB_DATA key = locking_key(&d->id);
+ struct server_id my_pid = messaging_server_id(
+ fsp->conn->sconn->msg_ctx);
+ struct locking_tdb_data *ltdb = NULL;
+ size_t idx;
+ struct share_mode_entry e = { .pid.pid = 0 };
+ struct share_mode_entry_buf e_buf;
+ NTSTATUS status;
+ bool ok, found;
+
+ TDB_DATA dbufs[3];
+ size_t num_dbufs = 0;
+
+ status = locking_tdb_data_fetch(key, talloc_tos(), &ltdb);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_fetch failed: %s\n",
+ nt_errstr(status));
+ return false;
+ }
+ DBG_DEBUG("num_share_modes=%zu\n", ltdb->num_share_entries);
+
+ idx = share_mode_entry_find(
+ ltdb->share_entries,
+ ltdb->num_share_entries,
+ my_pid,
+ fh_get_gen_id(fsp->fh),
+ &e,
+ &found);
+ if (found) {
+ DBG_WARNING("Found duplicate share mode\n");
+ status = NT_STATUS_INTERNAL_DB_CORRUPTION;
+ goto done;
+ }
+
+ e = (struct share_mode_entry) {
+ .pid = my_pid,
+ .share_access = share_access,
+ .private_options = fh_get_private_options(fsp->fh),
+ .access_mask = access_mask,
+ .op_mid = mid,
+ .op_type = op_type,
+ .time.tv_sec = fsp->open_time.tv_sec,
+ .time.tv_usec = fsp->open_time.tv_usec,
+ .share_file_id = fh_get_gen_id(fsp->fh),
+ .uid = (uint32_t)uid,
+ .flags = (fsp->posix_flags & FSP_POSIX_FLAGS_OPEN) ?
+ SHARE_MODE_FLAG_POSIX_OPEN : 0,
+ .name_hash = fsp->name_hash,
+ };
+
+ if (op_type == LEASE_OPLOCK) {
+ const struct GUID *client_guid = fsp_client_guid(fsp);
+ e.client_guid = *client_guid;
+ e.lease_key = *lease_key;
+ }
+
+ ok = share_mode_entry_put(&e, &e_buf);
+ if (!ok) {
+ DBG_DEBUG("share_mode_entry_put failed\n");
+ status = NT_STATUS_INTERNAL_ERROR;
+ goto done;
+ }
+
+ DBG_DEBUG("idx=%zu, found=%d\n", idx, (int)found);
+
+ if (idx > 0) {
+ dbufs[num_dbufs] = (TDB_DATA) {
+ .dptr = discard_const_p(uint8_t, ltdb->share_entries),
+ .dsize = idx * SHARE_MODE_ENTRY_SIZE,
+ };
+ num_dbufs += 1;
+ }
+
+ dbufs[num_dbufs] = (TDB_DATA) {
+ .dptr = e_buf.buf, .dsize = SHARE_MODE_ENTRY_SIZE,
+ };
+ num_dbufs += 1;
+
+ if (idx < ltdb->num_share_entries) {
+ size_t num_after_idx = (ltdb->num_share_entries-idx);
+ dbufs[num_dbufs] = (TDB_DATA) {
+ .dptr = discard_const_p(uint8_t, ltdb->share_entries) +
+ idx * SHARE_MODE_ENTRY_SIZE,
+ .dsize = num_after_idx * SHARE_MODE_ENTRY_SIZE,
+ };
+ num_dbufs += 1;
+ }
+
+ {
+ size_t i;
+ for (i=0; i<num_dbufs; i++) {
+ DBG_DEBUG("dbufs[%zu]=(%p, %zu)\n",
+ i,
+ dbufs[i].dptr,
+ dbufs[i].dsize);
+ }
+ }
+
+ if (num_dbufs == 1) {
+ /*
+ * Storing a fresh record with just one share entry
+ */
+ d->modified = true;
+ }
+
+ /*
+ * If there was any existing data in
+ * ltdb->share_entries, it's now been
+ * moved and we've split it into:
+ *
+ * num_dbufs = 3
+ * dbufs[0] -> old sorted data less than new_entry
+ * dbufs[1] -> new_share_mode_entry
+ * dbufs[2] -> old sorted_data greater than new entry.
+ *
+ * So the old data inside ltdb->share_entries is
+ * no longer valid.
+ *
+ * If we're storing a brand new entry the
+ * dbufs look like:
+ *
+ * num_dbufs = 1
+ * dbufs[0] -> new_share_mode_entry
+ *
+ * Either way we must set ltdb->share_entries = NULL
+ * and ltdb->num_share_entries = 0 so that
+ * locking_tdb_data_store() doesn't use it to
+ * store any data. It's no longer there.
+ */
+
+ ltdb->share_entries = NULL;
+ ltdb->num_share_entries = 0;
+
+ status = locking_tdb_data_store(key, ltdb, dbufs, num_dbufs);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_store failed: %s\n",
+ nt_errstr(status));
+ }
+done:
+ TALLOC_FREE(ltdb);
+ return NT_STATUS_IS_OK(status);
+}
+
+static bool share_mode_for_one_entry(
+ bool (*fn)(struct share_mode_entry *e,
+ bool *modified,
+ void *private_data),
+ void *private_data,
+ size_t *i,
+ uint8_t *data,
+ size_t *num_share_modes,
+ bool *writeback)
+{
+ DATA_BLOB blob = {
+ .data = data + (*i) * SHARE_MODE_ENTRY_SIZE,
+ .length = SHARE_MODE_ENTRY_SIZE,
+ };
+ struct share_mode_entry e = {.pid.pid=0};
+ enum ndr_err_code ndr_err = NDR_ERR_SUCCESS;
+ bool modified = false;
+ bool stop = false;
+ struct server_id e_pid;
+ uint64_t e_share_file_id;
+
+ ndr_err = ndr_pull_struct_blob_all_noalloc(
+ &blob,
+ &e,
+ (ndr_pull_flags_fn_t)ndr_pull_share_mode_entry);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_WARNING("ndr_pull_share_mode_entry failed\n");
+ *i += 1;
+ return false;
+ }
+ if (DEBUGLEVEL >= 10) {
+ DBG_DEBUG("entry[%zu]:\n", *i);
+ NDR_PRINT_DEBUG(share_mode_entry, &e);
+ }
+
+ e_pid = e.pid;
+ e_share_file_id = e.share_file_id;
+
+ stop = fn(&e, &modified, private_data);
+
+ DBG_DEBUG("entry[%zu]: modified=%d, e.stale=%d\n",
+ *i,
+ (int)modified,
+ (int)e.stale);
+
+ if (e.stale) {
+ if (DEBUGLEVEL>=10) {
+ DBG_DEBUG("share_mode_entry:\n");
+ NDR_PRINT_DEBUG(share_mode_entry, &e);
+ }
+
+ if (*i < *num_share_modes) {
+ memmove(blob.data,
+ blob.data + SHARE_MODE_ENTRY_SIZE,
+ (*num_share_modes - *i - 1) *
+ SHARE_MODE_ENTRY_SIZE);
+ }
+ *num_share_modes -= 1;
+ *writeback = true;
+ return stop;
+ }
+
+ if (modified) {
+ if (DEBUGLEVEL>=10) {
+ DBG_DEBUG("share_mode_entry:\n");
+ NDR_PRINT_DEBUG(share_mode_entry, &e);
+ }
+
+ /*
+ * Make sure sorting order is kept intact
+ */
+ SMB_ASSERT(server_id_equal(&e_pid, &e.pid));
+ SMB_ASSERT(e_share_file_id == e.share_file_id);
+
+ ndr_err = ndr_push_struct_into_fixed_blob(
+ &blob,
+ &e,
+ (ndr_push_flags_fn_t)
+ ndr_push_share_mode_entry);
+ if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) {
+ DBG_WARNING("ndr_push_share_mode_entry "
+ "failed: %s\n",
+ ndr_errstr(ndr_err));
+ /*
+ * Not much we can do, just ignore it
+ */
+ }
+ *i += 1;
+ *writeback = true;
+ return stop;
+ }
+
+ if (stop) {
+ return true;
+ }
+
+ *i += 1;
+ return false;
+}
+
+bool share_mode_forall_entries(
+ struct share_mode_lock *lck,
+ bool (*fn)(struct share_mode_entry *e,
+ bool *modified,
+ void *private_data),
+ void *private_data)
+{
+ struct share_mode_data *d = lck->data;
+ TDB_DATA key = locking_key(&d->id);
+ struct locking_tdb_data *ltdb = NULL;
+ uint8_t *share_entries = NULL;
+ size_t num_share_entries;
+ bool writeback = false;
+ NTSTATUS status;
+ bool stop = false;
+ size_t i;
+
+ status = locking_tdb_data_fetch(key, talloc_tos(), &ltdb);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_fetch failed: %s\n",
+ nt_errstr(status));
+ return false;
+ }
+ DBG_DEBUG("num_share_modes=%zu\n", ltdb->num_share_entries);
+
+ num_share_entries = ltdb->num_share_entries;
+ share_entries = discard_const_p(uint8_t, ltdb->share_entries);
+
+ i = 0;
+ while (i<num_share_entries) {
+ stop = share_mode_for_one_entry(
+ fn,
+ private_data,
+ &i,
+ share_entries,
+ &num_share_entries,
+ &writeback);
+ if (stop) {
+ break;
+ }
+ }
+
+ DBG_DEBUG("num_share_entries=%zu, writeback=%d\n",
+ num_share_entries,
+ (int)writeback);
+
+ if (!writeback) {
+ return true;
+ }
+
+ if ((ltdb->num_share_entries != 0 ) && (num_share_entries == 0)) {
+ /*
+ * This routine wiped all share entries, let
+ * share_mode_data_store() delete the record
+ */
+ d->modified = true;
+ }
+
+ ltdb->num_share_entries = num_share_entries;
+ ltdb->share_entries = share_entries;
+
+ status = locking_tdb_data_store(key, ltdb, NULL, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_store failed: %s\n",
+ nt_errstr(status));
+ return false;
+ }
+
+ return true;
+}
+
+struct share_mode_count_entries_state {
+ size_t num_share_modes;
+ NTSTATUS status;
+};
+
+static void share_mode_count_entries_fn(
+ struct server_id exclusive,
+ size_t num_shared,
+ const struct server_id *shared,
+ const uint8_t *data,
+ size_t datalen,
+ void *private_data)
+{
+ struct share_mode_count_entries_state *state = private_data;
+ struct locking_tdb_data ltdb = { 0 };
+ bool ok;
+
+ ok = locking_tdb_data_get(&ltdb, data, datalen);
+ if (!ok) {
+ DBG_WARNING("locking_tdb_data_get failed for %zu\n", datalen);
+ state->status = NT_STATUS_INTERNAL_DB_CORRUPTION;
+ return;
+ }
+ state->num_share_modes = ltdb.num_share_entries;
+ state->status = NT_STATUS_OK;
+}
+
+NTSTATUS share_mode_count_entries(struct file_id fid, size_t *num_share_modes)
+{
+ struct share_mode_count_entries_state state = {
+ .status = NT_STATUS_NOT_FOUND,
+ };
+ NTSTATUS status;
+
+ status = g_lock_dump(
+ lock_ctx,
+ locking_key(&fid),
+ share_mode_count_entries_fn,
+ &state);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("g_lock_dump failed: %s\n",
+ nt_errstr(status));
+ return status;
+ }
+ if (!NT_STATUS_IS_OK(state.status)) {
+ DBG_DEBUG("share_mode_count_entries_fn failed: %s\n",
+ nt_errstr(state.status));
+ return state.status;
+ }
+
+ *num_share_modes = state.num_share_modes;
+ return NT_STATUS_OK;
+}
+
+static bool share_mode_entry_do(
+ struct share_mode_lock *lck,
+ struct server_id pid,
+ uint64_t share_file_id,
+ void (*fn)(struct share_mode_entry *e,
+ size_t num_share_modes,
+ bool *modified,
+ void *private_data),
+ void *private_data)
+{
+ struct share_mode_data *d = lck->data;
+ TDB_DATA key = locking_key(&d->id);
+ struct locking_tdb_data *ltdb = NULL;
+ size_t idx;
+ bool found = false;
+ bool modified = false;
+ struct share_mode_entry e;
+ uint8_t *e_ptr = NULL;
+ NTSTATUS status;
+ bool ret = false;
+
+ status = locking_tdb_data_fetch(key, talloc_tos(), &ltdb);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_fetch failed: %s\n",
+ nt_errstr(status));
+ return false;
+ }
+ DBG_DEBUG("num_share_modes=%zu\n", ltdb->num_share_entries);
+
+ idx = share_mode_entry_find(
+ ltdb->share_entries,
+ ltdb->num_share_entries,
+ pid,
+ share_file_id,
+ &e,
+ &found);
+ if (!found) {
+ DBG_WARNING("Did not find share mode entry for %"PRIu64"\n",
+ share_file_id);
+ goto done;
+ }
+
+ if (DEBUGLEVEL>=10) {
+ DBG_DEBUG("entry[%zu]:\n", idx);
+ NDR_PRINT_DEBUG(share_mode_entry, &e);
+ }
+
+ fn(&e, ltdb->num_share_entries, &modified, private_data);
+
+ DBG_DEBUG("entry[%zu]: modified=%d, e.stale=%d\n",
+ idx,
+ (int)modified,
+ (int)e.stale);
+
+ if (!e.stale && !modified) {
+ ret = true;
+ goto done;
+ }
+
+ e_ptr = discard_const_p(uint8_t, ltdb->share_entries) +
+ idx * SHARE_MODE_ENTRY_SIZE;
+
+ if (e.stale) {
+ /*
+ * Move the rest down one entry
+ */
+ size_t behind = ltdb->num_share_entries - idx - 1;
+ if (behind != 0) {
+ memmove(e_ptr,
+ e_ptr + SHARE_MODE_ENTRY_SIZE,
+ behind * SHARE_MODE_ENTRY_SIZE);
+ }
+ ltdb->num_share_entries -= 1;
+
+ if (ltdb->num_share_entries == 0) {
+ /*
+ * Tell share_mode_lock_destructor() to delete
+ * the whole record
+ */
+ d->modified = true;
+ }
+
+ if (DEBUGLEVEL>=10) {
+ DBG_DEBUG("share_mode_entry:\n");
+ NDR_PRINT_DEBUG(share_mode_entry, &e);
+ }
+ } else {
+ struct share_mode_entry_buf buf;
+ bool ok;
+
+ if (ltdb->num_share_entries != 1) {
+ /*
+ * Make sure the sorting order stays intact
+ */
+ SMB_ASSERT(server_id_equal(&e.pid, &pid));
+ SMB_ASSERT(e.share_file_id == share_file_id);
+ }
+
+ ok = share_mode_entry_put(&e, &buf);
+ if (!ok) {
+ DBG_DEBUG("share_mode_entry_put failed\n");
+ goto done;
+ }
+ memcpy(e_ptr, buf.buf, SHARE_MODE_ENTRY_SIZE);
+ }
+
+ status = locking_tdb_data_store(key, ltdb, NULL, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_store failed: %s\n",
+ nt_errstr(status));
+ goto done;
+ }
+
+ ret = true;
+done:
+ TALLOC_FREE(ltdb);
+ return ret;
+}
+
+struct del_share_mode_state {
+ bool ok;
+};
+
+static void del_share_mode_fn(
+ struct share_mode_entry *e,
+ size_t num_share_modes,
+ bool *modified,
+ void *private_data)
+{
+ struct del_share_mode_state *state = private_data;
+ e->stale = true;
+ state->ok = true;
+}
+
+bool del_share_mode(struct share_mode_lock *lck, files_struct *fsp)
+{
+ struct del_share_mode_state state = { .ok = false };
+ bool ok;
+
+ ok = share_mode_entry_do(
+ lck,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ fh_get_gen_id(fsp->fh),
+ del_share_mode_fn,
+ &state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_entry_do failed\n");
+ return false;
+ }
+ if (!state.ok) {
+ DBG_DEBUG("del_share_mode_fn failed\n");
+ return false;
+ }
+ return true;
+}
+
+struct remove_share_oplock_state {
+ bool ok;
+};
+
+static void remove_share_oplock_fn(
+ struct share_mode_entry *e,
+ size_t num_share_modes,
+ bool *modified,
+ void *private_data)
+{
+ struct remove_share_oplock_state *state = private_data;
+
+ e->op_type = NO_OPLOCK;
+ *modified = true;
+ state->ok = true;
+}
+
+bool remove_share_oplock(struct share_mode_lock *lck, files_struct *fsp)
+{
+ struct remove_share_oplock_state state = { .ok = false };
+ bool ok;
+
+ ok = share_mode_entry_do(
+ lck,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ fh_get_gen_id(fsp->fh),
+ remove_share_oplock_fn,
+ &state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_entry_do failed\n");
+ return false;
+ }
+ if (!state.ok) {
+ DBG_DEBUG("remove_share_oplock_fn failed\n");
+ return false;
+ }
+
+ if (fsp->oplock_type == LEASE_OPLOCK) {
+ remove_lease_if_stale(
+ lck,
+ fsp_client_guid(fsp),
+ &fsp->lease->lease.lease_key);
+ }
+
+ share_mode_wakeup_waiters(fsp->file_id);
+
+ return true;
+}
+
+struct downgrade_share_oplock_state {
+ bool ok;
+};
+
+static void downgrade_share_oplock_fn(
+ struct share_mode_entry *e,
+ size_t num_share_modes,
+ bool *modified,
+ void *private_data)
+{
+ struct downgrade_share_oplock_state *state = private_data;
+
+ e->op_type = LEVEL_II_OPLOCK;
+ *modified = true;
+ state->ok = true;
+}
+
+bool downgrade_share_oplock(struct share_mode_lock *lck, files_struct *fsp)
+{
+ struct downgrade_share_oplock_state state = { .ok = false };
+ bool ok;
+
+ ok = share_mode_entry_do(
+ lck,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ fh_get_gen_id(fsp->fh),
+ downgrade_share_oplock_fn,
+ &state);
+ if (!ok) {
+ DBG_DEBUG("share_mode_entry_do failed\n");
+ return false;
+ }
+ if (!state.ok) {
+ DBG_DEBUG("downgrade_share_oplock_fn failed\n");
+ return false;
+ }
+
+ lck->data->flags |= SHARE_MODE_LEASE_READ;
+ lck->data->modified = true;
+
+ return true;
+}
+
+bool mark_share_mode_disconnected(struct share_mode_lock *lck,
+ struct files_struct *fsp)
+{
+ struct server_id disconnected_pid = { .pid = 0 };
+ bool ok;
+
+ if (fsp->op == NULL) {
+ return false;
+ }
+ if (!fsp->op->global->durable) {
+ return false;
+ }
+
+ server_id_set_disconnected(&disconnected_pid);
+
+ ok = reset_share_mode_entry(
+ lck,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ fh_get_gen_id(fsp->fh),
+ disconnected_pid,
+ UINT64_MAX,
+ fsp->op->global->open_persistent_id);
+
+ return ok;
+}
+
+bool reset_share_mode_entry(
+ struct share_mode_lock *lck,
+ struct server_id old_pid,
+ uint64_t old_share_file_id,
+ struct server_id new_pid,
+ uint64_t new_mid,
+ uint64_t new_share_file_id)
+{
+ struct share_mode_data *d = lck->data;
+ TDB_DATA key = locking_key(&d->id);
+ struct locking_tdb_data *ltdb = NULL;
+ struct share_mode_entry e;
+ struct share_mode_entry_buf e_buf;
+ NTSTATUS status;
+ bool ret = false;
+ bool ok;
+
+ status = locking_tdb_data_fetch(key, talloc_tos(), &ltdb);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_fetch failed: %s\n",
+ nt_errstr(status));
+ return false;
+ }
+
+ if (ltdb->num_share_entries != 1) {
+ DBG_DEBUG("num_share_modes=%zu\n", ltdb->num_share_entries);
+ goto done;
+ }
+
+ ok = share_mode_entry_get(ltdb->share_entries, &e);
+ if (!ok) {
+ DBG_WARNING("share_mode_entry_get failed\n");
+ goto done;
+ }
+
+ ret = share_mode_entry_cmp(
+ old_pid, old_share_file_id, e.pid, e.share_file_id);
+ if (ret != 0) {
+ struct server_id_buf tmp1, tmp2;
+ DBG_WARNING("Expected pid=%s, file_id=%"PRIu64", "
+ "got pid=%s, file_id=%"PRIu64"\n",
+ server_id_str_buf(old_pid, &tmp1),
+ old_share_file_id,
+ server_id_str_buf(e.pid, &tmp2),
+ e.share_file_id);
+ goto done;
+ }
+
+ e.pid = new_pid;
+ if (new_mid != UINT64_MAX) {
+ e.op_mid = new_mid;
+ }
+ e.share_file_id = new_share_file_id;
+
+ ok = share_mode_entry_put(&e, &e_buf);
+ if (!ok) {
+ DBG_WARNING("share_mode_entry_put failed\n");
+ goto done;
+ }
+
+ ltdb->share_entries = e_buf.buf;
+
+ status = locking_tdb_data_store(key, ltdb, NULL, 0);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("locking_tdb_data_store failed: %s\n",
+ nt_errstr(status));
+ goto done;
+ }
+
+ d->modified = true;
+ ret = true;
+done:
+ TALLOC_FREE(ltdb);
+ return true;
+}
diff --git a/source3/locking/share_mode_lock.h b/source3/locking/share_mode_lock.h
new file mode 100644
index 0000000..4f47b6f
--- /dev/null
+++ b/source3/locking/share_mode_lock.h
@@ -0,0 +1,140 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCKING_SHARE_MODE_LOCK_H__
+#define __LOCKING_SHARE_MODE_LOCK_H__
+
+#include "replace.h"
+#include <tevent.h>
+#include "librpc/gen_ndr/file_id.h"
+#include "lib/util/time.h"
+
+struct share_mode_data;
+struct share_mode_lock;
+struct share_mode_entry;
+struct smb_filename;
+struct files_struct;
+struct smb2_lease_key;
+
+bool locking_init(void);
+bool locking_init_readonly(void);
+bool locking_end(void);
+
+struct share_mode_lock *get_share_mode_lock(
+ TALLOC_CTX *mem_ctx,
+ struct file_id id,
+ const char *servicepath,
+ const struct smb_filename *smb_fname,
+ const struct timespec *old_write_time);
+struct share_mode_lock *get_existing_share_mode_lock(TALLOC_CTX *mem_ctx,
+ struct file_id id);
+
+bool del_share_mode(struct share_mode_lock *lck,
+ struct files_struct *fsp);
+bool downgrade_share_oplock(struct share_mode_lock *lck,
+ struct files_struct *fsp);
+bool remove_share_oplock(struct share_mode_lock *lck,
+ struct files_struct *fsp);
+bool file_has_read_lease(struct files_struct *fsp);
+
+bool set_share_mode(
+ struct share_mode_lock *lck,
+ struct files_struct *fsp,
+ uid_t uid,
+ uint64_t mid,
+ uint16_t op_type,
+ const struct smb2_lease_key *lease_key,
+ uint32_t share_access,
+ uint32_t access_mask);
+bool reset_share_mode_entry(
+ struct share_mode_lock *lck,
+ struct server_id old_pid,
+ uint64_t old_share_file_id,
+ struct server_id new_pid,
+ uint64_t new_mid,
+ uint64_t new_share_file_id);
+
+bool mark_share_mode_disconnected(
+ struct share_mode_lock *lck, struct files_struct *fsp);
+
+struct share_mode_lock *fetch_share_mode_unlocked(
+ TALLOC_CTX *mem_ctx,
+ struct file_id id);
+
+struct tevent_req *fetch_share_mode_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct file_id id,
+ bool *queued);
+NTSTATUS fetch_share_mode_recv(
+ struct tevent_req *req,
+ TALLOC_CTX *mem_ctx,
+ struct share_mode_lock **_lck);
+
+int share_entry_forall(
+ int (*fn)(struct file_id fid,
+ const struct share_mode_data *data,
+ const struct share_mode_entry *entry,
+ void *private_data),
+ void *private_data);
+
+NTSTATUS share_mode_count_entries(struct file_id fid, size_t *num_share_modes);
+NTSTATUS share_mode_do_locked(
+ struct file_id id,
+ void (*fn)(const uint8_t *buf,
+ size_t buflen,
+ bool *modified_dependent,
+ void *private_data),
+ void *private_data);
+int share_mode_forall(
+ int (*fn)(struct file_id fid,
+ const struct share_mode_data *data,
+ void *private_data),
+ void *private_data);
+bool share_mode_forall_entries(
+ struct share_mode_lock *lck,
+ bool (*fn)(struct share_mode_entry *e,
+ bool *modified,
+ void *private_data),
+ void *private_data);
+
+NTTIME share_mode_changed_write_time(struct share_mode_lock *lck);
+const char *share_mode_servicepath(struct share_mode_lock *lck);
+char *share_mode_filename(TALLOC_CTX *mem_ctx, struct share_mode_lock *lck);
+char *share_mode_data_dump(
+ TALLOC_CTX *mem_ctx, struct share_mode_lock *lck);
+
+void share_mode_flags_get(
+ struct share_mode_lock *lck,
+ uint32_t *access_mask,
+ uint32_t *share_mode,
+ uint32_t *lease_type);
+void share_mode_flags_set(
+ struct share_mode_lock *lck,
+ uint32_t access_mask,
+ uint32_t share_mode,
+ uint32_t lease_type,
+ bool *modified);
+
+struct tevent_req *share_mode_watch_send(
+ TALLOC_CTX *mem_ctx,
+ struct tevent_context *ev,
+ struct share_mode_lock *lck,
+ struct server_id blocker);
+NTSTATUS share_mode_watch_recv(
+ struct tevent_req *req, bool *blockerdead, struct server_id *blocker);
+NTSTATUS share_mode_wakeup_waiters(struct file_id id);
+
+#endif
diff --git a/source3/locking/share_mode_lock_private.h b/source3/locking/share_mode_lock_private.h
new file mode 100644
index 0000000..14a3c1b
--- /dev/null
+++ b/source3/locking/share_mode_lock_private.h
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCKING_SHARE_MODE_LOCK_PRIVATE_H__
+#define __LOCKING_SHARE_MODE_LOCK_PRIVATE_H__
+
+struct share_mode_data;
+struct share_mode_lock {
+ struct share_mode_data *data;
+};
+
+#endif