summaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/dlm
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--fs/ocfs2/dlm/Makefile5
-rw-r--r--fs/ocfs2/dlm/dlmapi.h203
-rw-r--r--fs/ocfs2/dlm/dlmast.c475
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1132
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c555
-rw-r--r--fs/ocfs2/dlm/dlmconvert.h18
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c894
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h49
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2362
-rw-r--r--fs/ocfs2/dlm/dlmdomain.h41
-rw-r--r--fs/ocfs2/dlm/dlmlock.c743
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c3564
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2955
-rw-r--r--fs/ocfs2/dlm/dlmthread.c809
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c695
-rw-r--r--fs/ocfs2/dlmfs/Makefile4
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c630
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c682
-rw-r--r--fs/ocfs2/dlmfs/userdlm.h95
-rw-r--r--fs/ocfs2/dlmglue.c4473
-rw-r--r--fs/ocfs2/dlmglue.h209
21 files changed, 20593 insertions, 0 deletions
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
new file mode 100644
index 0000000000..5e700b45d3
--- /dev/null
+++ b/fs/ocfs2/dlm/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
+
+ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
+ dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
new file mode 100644
index 0000000000..bae60ca267
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * dlmapi.h
+ *
+ * externally exported dlm interfaces
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+#ifndef DLMAPI_H
+#define DLMAPI_H
+
+struct dlm_lock;
+struct dlm_ctxt;
+
+/* NOTE: changes made to this enum should be reflected in dlmdebug.c */
+enum dlm_status {
+ DLM_NORMAL = 0, /* 0: request in progress */
+ DLM_GRANTED, /* 1: request granted */
+ DLM_DENIED, /* 2: request denied */
+ DLM_DENIED_NOLOCKS, /* 3: request denied, out of system resources */
+ DLM_WORKING, /* 4: async request in progress */
+ DLM_BLOCKED, /* 5: lock request blocked */
+ DLM_BLOCKED_ORPHAN, /* 6: lock request blocked by a orphan lock*/
+ DLM_DENIED_GRACE_PERIOD, /* 7: topological change in progress */
+ DLM_SYSERR, /* 8: system error */
+ DLM_NOSUPPORT, /* 9: unsupported */
+ DLM_CANCELGRANT, /* 10: can't cancel convert: already granted */
+ DLM_IVLOCKID, /* 11: bad lockid */
+ DLM_SYNC, /* 12: synchronous request granted */
+ DLM_BADTYPE, /* 13: bad resource type */
+ DLM_BADRESOURCE, /* 14: bad resource handle */
+ DLM_MAXHANDLES, /* 15: no more resource handles */
+ DLM_NOCLINFO, /* 16: can't contact cluster manager */
+ DLM_NOLOCKMGR, /* 17: can't contact lock manager */
+ DLM_NOPURGED, /* 18: can't contact purge daemon */
+ DLM_BADARGS, /* 19: bad api args */
+ DLM_VOID, /* 20: no status */
+ DLM_NOTQUEUED, /* 21: NOQUEUE was specified and request failed */
+ DLM_IVBUFLEN, /* 22: invalid resource name length */
+ DLM_CVTUNGRANT, /* 23: attempted to convert ungranted lock */
+ DLM_BADPARAM, /* 24: invalid lock mode specified */
+ DLM_VALNOTVALID, /* 25: value block has been invalidated */
+ DLM_REJECTED, /* 26: request rejected, unrecognized client */
+ DLM_ABORT, /* 27: blocked lock request cancelled */
+ DLM_CANCEL, /* 28: conversion request cancelled */
+ DLM_IVRESHANDLE, /* 29: invalid resource handle */
+ DLM_DEADLOCK, /* 30: deadlock recovery refused this request */
+ DLM_DENIED_NOASTS, /* 31: failed to allocate AST */
+ DLM_FORWARD, /* 32: request must wait for primary's response */
+ DLM_TIMEOUT, /* 33: timeout value for lock has expired */
+ DLM_IVGROUPID, /* 34: invalid group specification */
+ DLM_VERS_CONFLICT, /* 35: version conflicts prevent request handling */
+ DLM_BAD_DEVICE_PATH, /* 36: Locks device does not exist or path wrong */
+ DLM_NO_DEVICE_PERMISSION, /* 37: Client has insufficient pers for device */
+ DLM_NO_CONTROL_DEVICE, /* 38: Cannot set options on opened device */
+
+ DLM_RECOVERING, /* 39: extension, allows caller to fail a lock
+ request if it is being recovered */
+ DLM_MIGRATING, /* 40: extension, allows caller to fail a lock
+ request if it is being migrated */
+ DLM_MAXSTATS, /* 41: upper limit for return code validation */
+};
+
+/* for pretty-printing dlm_status error messages */
+const char *dlm_errmsg(enum dlm_status err);
+/* for pretty-printing dlm_status error names */
+const char *dlm_errname(enum dlm_status err);
+
+/* Eventually the DLM will use standard errno values, but in the
+ * meantime this lets us track dlm errors as they bubble up. When we
+ * bring its error reporting into line with the rest of the stack,
+ * these can just be replaced with calls to mlog_errno. */
+#define dlm_error(st) do { \
+ if ((st) != DLM_RECOVERING && \
+ (st) != DLM_MIGRATING && \
+ (st) != DLM_FORWARD) \
+ mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
+} while (0)
+
+#define DLM_LKSB_UNUSED1 0x01
+#define DLM_LKSB_PUT_LVB 0x02
+#define DLM_LKSB_GET_LVB 0x04
+#define DLM_LKSB_UNUSED2 0x08
+#define DLM_LKSB_UNUSED3 0x10
+#define DLM_LKSB_UNUSED4 0x20
+#define DLM_LKSB_UNUSED5 0x40
+#define DLM_LKSB_UNUSED6 0x80
+
+#define DLM_LVB_LEN 64
+
+/* Callers are only allowed access to the lvb and status members of
+ * this struct. */
+struct dlm_lockstatus {
+ enum dlm_status status;
+ u32 flags;
+ struct dlm_lock *lockid;
+ char lvb[DLM_LVB_LEN];
+};
+
+/* Valid lock modes. */
+#define LKM_IVMODE (-1) /* invalid mode */
+#define LKM_NLMODE 0 /* null lock */
+#define LKM_CRMODE 1 /* concurrent read unsupported */
+#define LKM_CWMODE 2 /* concurrent write unsupported */
+#define LKM_PRMODE 3 /* protected read */
+#define LKM_PWMODE 4 /* protected write unsupported */
+#define LKM_EXMODE 5 /* exclusive */
+#define LKM_MAXMODE 5
+#define LKM_MODEMASK 0xff
+
+/* Flags passed to dlmlock and dlmunlock:
+ * reserved: flags used by the "real" dlm
+ * only a few are supported by this dlm
+ * (U) = unsupported by ocfs2 dlm */
+#define LKM_ORPHAN 0x00000010 /* this lock is orphanable (U) */
+#define LKM_PARENTABLE 0x00000020 /* this lock was orphaned (U) */
+#define LKM_BLOCK 0x00000040 /* blocking lock request (U) */
+#define LKM_LOCAL 0x00000080 /* local lock request */
+#define LKM_VALBLK 0x00000100 /* lock value block request */
+#define LKM_NOQUEUE 0x00000200 /* non blocking request */
+#define LKM_CONVERT 0x00000400 /* conversion request */
+#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
+#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
+#define LKM_CANCEL 0x00002000 /* cancel conversion request */
+#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */
+#define LKM_INVVALBLK 0x00008000 /* invalidate lock value block */
+#define LKM_SYNCSTS 0x00010000 /* return synchronous status if poss (U) */
+#define LKM_TIMEOUT 0x00020000 /* lock request contains timeout (U) */
+#define LKM_SNGLDLCK 0x00040000 /* request can self-deadlock (U) */
+#define LKM_FINDLOCAL 0x00080000 /* find local lock request (U) */
+#define LKM_PROC_OWNED 0x00100000 /* owned by process, not group (U) */
+#define LKM_XID 0x00200000 /* use transaction id for deadlock (U) */
+#define LKM_XID_CONFLICT 0x00400000 /* do not allow lock inheritance (U) */
+#define LKM_FORCE 0x00800000 /* force unlock flag */
+#define LKM_REVVALBLK 0x01000000 /* temporary solution: re-validate
+ lock value block (U) */
+/* unused */
+#define LKM_UNUSED1 0x00000001 /* unused */
+#define LKM_UNUSED2 0x00000002 /* unused */
+#define LKM_UNUSED3 0x00000004 /* unused */
+#define LKM_UNUSED4 0x00000008 /* unused */
+#define LKM_UNUSED5 0x02000000 /* unused */
+#define LKM_UNUSED6 0x04000000 /* unused */
+#define LKM_UNUSED7 0x08000000 /* unused */
+
+/* ocfs2 extensions: internal only
+ * should never be used by caller */
+#define LKM_MIGRATION 0x10000000 /* extension: lockres is to be migrated
+ to another node */
+#define LKM_PUT_LVB 0x20000000 /* extension: lvb is being passed
+ should be applied to lockres */
+#define LKM_GET_LVB 0x40000000 /* extension: lvb should be copied
+ from lockres when lock is granted */
+#define LKM_RECOVERY 0x80000000 /* extension: flag for recovery lock
+ used to avoid recovery rwsem */
+
+
+typedef void (dlm_astlockfunc_t)(void *);
+typedef void (dlm_bastlockfunc_t)(void *, int);
+typedef void (dlm_astunlockfunc_t)(void *, enum dlm_status);
+
+enum dlm_status dlmlock(struct dlm_ctxt *dlm,
+ int mode,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ const char *name,
+ int namelen,
+ dlm_astlockfunc_t *ast,
+ void *data,
+ dlm_bastlockfunc_t *bast);
+
+enum dlm_status dlmunlock(struct dlm_ctxt *dlm,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ dlm_astunlockfunc_t *unlockast,
+ void *data);
+
+struct dlm_protocol_version {
+ u8 pv_major;
+ u8 pv_minor;
+};
+struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key,
+ struct dlm_protocol_version *fs_proto);
+
+void dlm_unregister_domain(struct dlm_ctxt *dlm);
+
+void dlm_print_one_lock(struct dlm_lock *lockid);
+
+typedef void (dlm_eviction_func)(int, void *);
+struct dlm_eviction_cb {
+ struct list_head ec_item;
+ dlm_eviction_func *ec_func;
+ void *ec_data;
+};
+void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
+ dlm_eviction_func *f,
+ void *data);
+void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
+ struct dlm_eviction_cb *cb);
+void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb);
+
+#endif /* DLMAPI_H */
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
new file mode 100644
index 0000000000..c681ba9579
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmast.c
+ *
+ * AST and BAST functionality for local and remote nodes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "../cluster/masklog.h"
+
+static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+
+/* Should be called as an ast gets queued to see if the new
+ * lock level will obsolete a pending bast.
+ * For example, if dlm_thread queued a bast for an EX lock that
+ * was blocking another EX, but before sending the bast the
+ * lock owner downconverted to NL, the bast is now obsolete.
+ * Only the ast should be sent.
+ * This is needed because the lock and convert paths can queue
+ * asts out-of-band (not waiting for dlm_thread) in order to
+ * allow for LKM_NOQUEUE to get immediate responses. */
+static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ assert_spin_locked(&dlm->ast_lock);
+ assert_spin_locked(&lock->spinlock);
+
+ if (lock->ml.highest_blocked == LKM_IVMODE)
+ return 0;
+ BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
+
+ if (lock->bast_pending &&
+ list_empty(&lock->bast_list))
+ /* old bast already sent, ok */
+ return 0;
+
+ if (lock->ml.type == LKM_EXMODE)
+ /* EX blocks anything left, any bast still valid */
+ return 0;
+ else if (lock->ml.type == LKM_NLMODE)
+ /* NL blocks nothing, no reason to send any bast, cancel it */
+ return 1;
+ else if (lock->ml.highest_blocked != LKM_EXMODE)
+ /* PR only blocks EX */
+ return 1;
+
+ return 0;
+}
+
+void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ struct dlm_lock_resource *res;
+
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+
+ res = lock->lockres;
+
+ assert_spin_locked(&dlm->ast_lock);
+
+ if (!list_empty(&lock->ast_list)) {
+ mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
+ "AST list not empty, pending %d, newlevel %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ast_pending, lock->ml.type);
+ BUG();
+ }
+ if (lock->ast_pending)
+ mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
+
+ /* putting lock on list, add a ref */
+ dlm_lock_get(lock);
+ spin_lock(&lock->spinlock);
+
+ /* check to see if this ast obsoletes the bast */
+ if (dlm_should_cancel_bast(dlm, lock)) {
+ mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
+ lock->bast_pending = 0;
+ list_del_init(&lock->bast_list);
+ lock->ml.highest_blocked = LKM_IVMODE;
+ /* removing lock from list, remove a ref. guaranteed
+ * this won't be the last ref because of the get above,
+ * so res->spinlock will not be taken here */
+ dlm_lock_put(lock);
+ /* free up the reserved bast that we are cancelling.
+ * guaranteed that this will not be the last reserved
+ * ast because *both* an ast and a bast were reserved
+ * to get to this point. the res->spinlock will not be
+ * taken here */
+ dlm_lockres_release_ast(dlm, res);
+ }
+ list_add_tail(&lock->ast_list, &dlm->pending_asts);
+ lock->ast_pending = 1;
+ spin_unlock(&lock->spinlock);
+}
+
+void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+
+ spin_lock(&dlm->ast_lock);
+ __dlm_queue_ast(dlm, lock);
+ spin_unlock(&dlm->ast_lock);
+}
+
+
+void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ struct dlm_lock_resource *res;
+
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+
+ assert_spin_locked(&dlm->ast_lock);
+
+ res = lock->lockres;
+
+ BUG_ON(!list_empty(&lock->bast_list));
+ if (lock->bast_pending)
+ mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
+
+ /* putting lock on list, add a ref */
+ dlm_lock_get(lock);
+ spin_lock(&lock->spinlock);
+ list_add_tail(&lock->bast_list, &dlm->pending_basts);
+ lock->bast_pending = 1;
+ spin_unlock(&lock->spinlock);
+}
+
+static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ struct dlm_lockstatus *lksb = lock->lksb;
+ BUG_ON(!lksb);
+
+ /* only updates if this node masters the lockres */
+ spin_lock(&res->spinlock);
+ if (res->owner == dlm->node_num) {
+ /* check the lksb flags for the direction */
+ if (lksb->flags & DLM_LKSB_GET_LVB) {
+ mlog(0, "getting lvb from lockres for %s node\n",
+ lock->ml.node == dlm->node_num ? "master" :
+ "remote");
+ memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
+ }
+ /* Do nothing for lvb put requests - they should be done in
+ * place when the lock is downconverted - otherwise we risk
+ * racing gets and puts which could result in old lvb data
+ * being propagated. We leave the put flag set and clear it
+ * here. In the future we might want to clear it at the time
+ * the put is actually done.
+ */
+ }
+ spin_unlock(&res->spinlock);
+
+ /* reset any lvb flags on the lksb */
+ lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
+}
+
+void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ dlm_astlockfunc_t *fn;
+
+ mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
+
+ fn = lock->ast;
+ BUG_ON(lock->ml.node != dlm->node_num);
+
+ dlm_update_lvb(dlm, res, lock);
+ (*fn)(lock->astdata);
+}
+
+
+int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ int ret;
+ struct dlm_lockstatus *lksb;
+ int lksbflags;
+
+ mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
+
+ lksb = lock->lksb;
+ BUG_ON(lock->ml.node == dlm->node_num);
+
+ lksbflags = lksb->flags;
+ dlm_update_lvb(dlm, res, lock);
+
+ /* lock request came from another node
+ * go do the ast over there */
+ ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
+ return ret;
+}
+
+void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int blocked_type)
+{
+ dlm_bastlockfunc_t *fn = lock->bast;
+
+ BUG_ON(lock->ml.node != dlm->node_num);
+
+ mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ blocked_type);
+
+ (*fn)(lock->astdata, blocked_type);
+}
+
+
+
+int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ int ret;
+ unsigned int locklen;
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *lock = NULL;
+ struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
+ char *name;
+ struct list_head *head = NULL;
+ __be64 cookie;
+ u32 flags;
+ u8 node;
+
+ if (!dlm_grab(dlm)) {
+ dlm_error(DLM_REJECTED);
+ return DLM_REJECTED;
+ }
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ name = past->name;
+ locklen = past->namelen;
+ cookie = past->cookie;
+ flags = be32_to_cpu(past->flags);
+ node = past->node_idx;
+
+ if (locklen > DLM_LOCKID_NAME_MAX) {
+ ret = DLM_IVBUFLEN;
+ mlog(ML_ERROR, "Invalid name length (%d) in proxy ast "
+ "handler!\n", locklen);
+ goto leave;
+ }
+
+ if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
+ (LKM_PUT_LVB|LKM_GET_LVB)) {
+ mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n",
+ flags);
+ ret = DLM_BADARGS;
+ goto leave;
+ }
+
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
+ (flags & LKM_GET_LVB ? "get lvb" : "none"));
+
+ mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
+
+ if (past->type != DLM_AST &&
+ past->type != DLM_BAST) {
+ mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
+ "name=%.*s, node=%u\n", past->type,
+ dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+ locklen, name, node);
+ ret = DLM_IVLOCKID;
+ goto leave;
+ }
+
+ res = dlm_lookup_lockres(dlm, name, locklen);
+ if (!res) {
+ mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, "
+ "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"),
+ dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+ locklen, name, node);
+ ret = DLM_IVLOCKID;
+ goto leave;
+ }
+
+ /* cannot get a proxy ast message if this node owns it */
+ BUG_ON(res->owner == dlm->node_num);
+
+ mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ mlog(0, "Responding with DLM_RECOVERING!\n");
+ ret = DLM_RECOVERING;
+ goto unlock_out;
+ }
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ mlog(0, "Responding with DLM_MIGRATING!\n");
+ ret = DLM_MIGRATING;
+ goto unlock_out;
+ }
+ /* try convert queue for both ast/bast */
+ head = &res->converting;
+ lock = NULL;
+ list_for_each_entry(lock, head, list) {
+ if (lock->ml.cookie == cookie)
+ goto do_ast;
+ }
+
+ /* if not on convert, try blocked for ast, granted for bast */
+ if (past->type == DLM_AST)
+ head = &res->blocked;
+ else
+ head = &res->granted;
+
+ list_for_each_entry(lock, head, list) {
+ /* if lock is found but unlock is pending ignore the bast */
+ if (lock->ml.cookie == cookie) {
+ if (lock->unlock_pending)
+ break;
+ goto do_ast;
+ }
+ }
+
+ mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
+ "node=%u\n", past->type == DLM_AST ? "" : "b",
+ dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+ locklen, name, node);
+
+ ret = DLM_NORMAL;
+unlock_out:
+ spin_unlock(&res->spinlock);
+ goto leave;
+
+do_ast:
+ ret = DLM_NORMAL;
+ if (past->type == DLM_AST) {
+ /* do not alter lock refcount. switching lists. */
+ list_move_tail(&lock->list, &res->granted);
+ mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+ lock->ml.type, lock->ml.convert_type);
+
+ if (lock->ml.convert_type != LKM_IVMODE) {
+ lock->ml.type = lock->ml.convert_type;
+ lock->ml.convert_type = LKM_IVMODE;
+ } else {
+ // should already be there....
+ }
+
+ lock->lksb->status = DLM_NORMAL;
+
+ /* if we requested the lvb, fetch it into our lksb now */
+ if (flags & LKM_GET_LVB) {
+ BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
+ memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
+ }
+ }
+ spin_unlock(&res->spinlock);
+
+ if (past->type == DLM_AST)
+ dlm_do_local_ast(dlm, res, lock);
+ else
+ dlm_do_local_bast(dlm, res, lock, past->blocked_type);
+
+leave:
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+ return ret;
+}
+
+
+
+int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int msg_type,
+ int blocked_type, int flags)
+{
+ int ret = 0;
+ struct dlm_proxy_ast past;
+ struct kvec vec[2];
+ size_t veclen = 1;
+ int status;
+
+ mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
+ res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
+ blocked_type);
+
+ memset(&past, 0, sizeof(struct dlm_proxy_ast));
+ past.node_idx = dlm->node_num;
+ past.type = msg_type;
+ past.blocked_type = blocked_type;
+ past.namelen = res->lockname.len;
+ memcpy(past.name, res->lockname.name, past.namelen);
+ past.cookie = lock->ml.cookie;
+
+ vec[0].iov_len = sizeof(struct dlm_proxy_ast);
+ vec[0].iov_base = &past;
+ if (flags & DLM_LKSB_GET_LVB) {
+ be32_add_cpu(&past.flags, LKM_GET_LVB);
+ vec[1].iov_len = DLM_LVB_LEN;
+ vec[1].iov_base = lock->lksb->lvb;
+ veclen++;
+ }
+
+ ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
+ lock->ml.node, &status);
+ if (ret < 0)
+ mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
+ dlm->name, res->lockname.len, res->lockname.name, ret,
+ lock->ml.node);
+ else {
+ if (status == DLM_RECOVERING) {
+ mlog(ML_ERROR, "sent AST to node %u, it thinks this "
+ "node is dead!\n", lock->ml.node);
+ BUG();
+ } else if (status == DLM_MIGRATING) {
+ mlog(ML_ERROR, "sent AST to node %u, it returned "
+ "DLM_MIGRATING!\n", lock->ml.node);
+ BUG();
+ } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) {
+ mlog(ML_ERROR, "AST to node %u returned %d!\n",
+ lock->ml.node, status);
+ /* ignore it */
+ }
+ ret = 0;
+ }
+ return ret;
+}
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
new file mode 100644
index 0000000000..20f790a474
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -0,0 +1,1132 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * dlmcommon.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+#ifndef DLMCOMMON_H
+#define DLMCOMMON_H
+
+#include <linux/kref.h>
+
+#define DLM_HB_NODE_DOWN_PRI (0xf000000)
+#define DLM_HB_NODE_UP_PRI (0x8000000)
+
+#define DLM_LOCKID_NAME_MAX 32
+
+#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
+
+#define DLM_HASH_SIZE_DEFAULT (1 << 17)
+#if DLM_HASH_SIZE_DEFAULT < PAGE_SIZE
+# define DLM_HASH_PAGES 1
+#else
+# define DLM_HASH_PAGES (DLM_HASH_SIZE_DEFAULT / PAGE_SIZE)
+#endif
+#define DLM_BUCKETS_PER_PAGE (PAGE_SIZE / sizeof(struct hlist_head))
+#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
+
+/* Intended to make it easier for us to switch out hash functions */
+#define dlm_lockid_hash(_n, _l) full_name_hash(NULL, _n, _l)
+
+enum dlm_mle_type {
+ DLM_MLE_BLOCK = 0,
+ DLM_MLE_MASTER = 1,
+ DLM_MLE_MIGRATION = 2,
+ DLM_MLE_NUM_TYPES = 3,
+};
+
+struct dlm_master_list_entry {
+ struct hlist_node master_hash_node;
+ struct list_head hb_events;
+ struct dlm_ctxt *dlm;
+ spinlock_t spinlock;
+ wait_queue_head_t wq;
+ atomic_t woken;
+ struct kref mle_refs;
+ int inuse;
+ unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ u8 master;
+ u8 new_master;
+ enum dlm_mle_type type;
+ struct o2hb_callback_func mle_hb_up;
+ struct o2hb_callback_func mle_hb_down;
+ struct dlm_lock_resource *mleres;
+ unsigned char mname[DLM_LOCKID_NAME_MAX];
+ unsigned int mnamelen;
+ unsigned int mnamehash;
+};
+
+enum dlm_ast_type {
+ DLM_AST = 0,
+ DLM_BAST = 1,
+ DLM_ASTUNLOCK = 2,
+};
+
+
+#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
+ LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
+ LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
+
+#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
+#define DLM_RECOVERY_LOCK_NAME_LEN 9
+
+static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
+{
+ if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
+ memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
+ return 1;
+ return 0;
+}
+
+#define DLM_RECO_STATE_ACTIVE 0x0001
+#define DLM_RECO_STATE_FINALIZE 0x0002
+
+struct dlm_recovery_ctxt
+{
+ struct list_head resources;
+ struct list_head node_data;
+ u8 new_master;
+ u8 dead_node;
+ u16 state;
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ wait_queue_head_t event;
+};
+
+enum dlm_ctxt_state {
+ DLM_CTXT_NEW = 0,
+ DLM_CTXT_JOINED = 1,
+ DLM_CTXT_IN_SHUTDOWN = 2,
+ DLM_CTXT_LEAVING = 3,
+};
+
+struct dlm_ctxt
+{
+ struct list_head list;
+ struct hlist_head **lockres_hash;
+ struct list_head dirty_list;
+ struct list_head purge_list;
+ struct list_head pending_asts;
+ struct list_head pending_basts;
+ struct list_head tracking_list;
+ unsigned int purge_count;
+ spinlock_t spinlock;
+ spinlock_t ast_lock;
+ spinlock_t track_lock;
+ char *name;
+ u8 node_num;
+ u32 key;
+ u8 joining_node;
+ u8 migrate_done; /* set to 1 means node has migrated all lock resources */
+ wait_queue_head_t dlm_join_events;
+ unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ struct dlm_recovery_ctxt reco;
+ spinlock_t master_lock;
+ struct hlist_head **master_hash;
+ struct list_head mle_hb_events;
+
+ /* these give a really vague idea of the system load */
+ atomic_t mle_tot_count[DLM_MLE_NUM_TYPES];
+ atomic_t mle_cur_count[DLM_MLE_NUM_TYPES];
+ atomic_t res_tot_count;
+ atomic_t res_cur_count;
+
+ struct dentry *dlm_debugfs_subroot;
+
+ /* NOTE: Next three are protected by dlm_domain_lock */
+ struct kref dlm_refs;
+ enum dlm_ctxt_state dlm_state;
+ unsigned int num_joins;
+
+ struct o2hb_callback_func dlm_hb_up;
+ struct o2hb_callback_func dlm_hb_down;
+ struct task_struct *dlm_thread_task;
+ struct task_struct *dlm_reco_thread_task;
+ struct workqueue_struct *dlm_worker;
+ wait_queue_head_t dlm_thread_wq;
+ wait_queue_head_t dlm_reco_thread_wq;
+ wait_queue_head_t ast_wq;
+ wait_queue_head_t migration_wq;
+
+ struct work_struct dispatched_work;
+ struct list_head work_list;
+ spinlock_t work_lock;
+ struct list_head dlm_domain_handlers;
+ struct list_head dlm_eviction_callbacks;
+
+ /* The filesystem specifies this at domain registration. We
+ * cache it here to know what to tell other nodes. */
+ struct dlm_protocol_version fs_locking_proto;
+ /* This is the inter-dlm communication version */
+ struct dlm_protocol_version dlm_locking_proto;
+};
+
+static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
+{
+ return dlm->lockres_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] + (i % DLM_BUCKETS_PER_PAGE);
+}
+
+static inline struct hlist_head *dlm_master_hash(struct dlm_ctxt *dlm,
+ unsigned i)
+{
+ return dlm->master_hash[(i / DLM_BUCKETS_PER_PAGE) % DLM_HASH_PAGES] +
+ (i % DLM_BUCKETS_PER_PAGE);
+}
+
+/* these keventd work queue items are for less-frequently
+ * called functions that cannot be directly called from the
+ * net message handlers for some reason, usually because
+ * they need to send net messages of their own. */
+void dlm_dispatch_work(struct work_struct *work);
+
+struct dlm_lock_resource;
+struct dlm_work_item;
+
+typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
+
+struct dlm_request_all_locks_priv
+{
+ u8 reco_master;
+ u8 dead_node;
+};
+
+struct dlm_mig_lockres_priv
+{
+ struct dlm_lock_resource *lockres;
+ u8 real_master;
+ u8 extra_ref;
+};
+
+struct dlm_assert_master_priv
+{
+ struct dlm_lock_resource *lockres;
+ u8 request_from;
+ u32 flags;
+ unsigned ignore_higher:1;
+};
+
+struct dlm_deref_lockres_priv
+{
+ struct dlm_lock_resource *deref_res;
+ u8 deref_node;
+};
+
+struct dlm_work_item
+{
+ struct list_head list;
+ dlm_workfunc_t *func;
+ struct dlm_ctxt *dlm;
+ void *data;
+ union {
+ struct dlm_request_all_locks_priv ral;
+ struct dlm_mig_lockres_priv ml;
+ struct dlm_assert_master_priv am;
+ struct dlm_deref_lockres_priv dl;
+ } u;
+};
+
+static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
+ struct dlm_work_item *i,
+ dlm_workfunc_t *f, void *data)
+{
+ memset(i, 0, sizeof(*i));
+ i->func = f;
+ INIT_LIST_HEAD(&i->list);
+ i->data = data;
+ i->dlm = dlm; /* must have already done a dlm_grab on this! */
+}
+
+
+
+static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
+ u8 node)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ dlm->joining_node = node;
+ wake_up(&dlm->dlm_join_events);
+}
+
+#define DLM_LOCK_RES_UNINITED 0x00000001
+#define DLM_LOCK_RES_RECOVERING 0x00000002
+#define DLM_LOCK_RES_READY 0x00000004
+#define DLM_LOCK_RES_DIRTY 0x00000008
+#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
+#define DLM_LOCK_RES_MIGRATING 0x00000020
+#define DLM_LOCK_RES_DROPPING_REF 0x00000040
+#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
+#define DLM_LOCK_RES_SETREF_INPROG 0x00002000
+#define DLM_LOCK_RES_RECOVERY_WAITING 0x00004000
+
+/* max milliseconds to wait to sync up a network failure with a node death */
+#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
+
+#define DLM_PURGE_INTERVAL_MS (8 * 1000)
+
+struct dlm_lock_resource
+{
+ /* WARNING: Please see the comment in dlm_init_lockres before
+ * adding fields here. */
+ struct hlist_node hash_node;
+ struct qstr lockname;
+ struct kref refs;
+
+ /*
+ * Please keep granted, converting, and blocked in this order,
+ * as some funcs want to iterate over all lists.
+ *
+ * All four lists are protected by the hash's reference.
+ */
+ struct list_head granted;
+ struct list_head converting;
+ struct list_head blocked;
+ struct list_head purge;
+
+ /*
+ * These two lists require you to hold an additional reference
+ * while they are on the list.
+ */
+ struct list_head dirty;
+ struct list_head recovering; // dlm_recovery_ctxt.resources list
+
+ /* Added during init and removed during release */
+ struct list_head tracking; /* dlm->tracking_list */
+
+ /* unused lock resources have their last_used stamped and are
+ * put on a list for the dlm thread to run. */
+ unsigned long last_used;
+
+ struct dlm_ctxt *dlm;
+
+ unsigned migration_pending:1;
+ atomic_t asts_reserved;
+ spinlock_t spinlock;
+ wait_queue_head_t wq;
+ u8 owner; //node which owns the lock resource, or unknown
+ u16 state;
+ char lvb[DLM_LVB_LEN];
+ unsigned int inflight_locks;
+ unsigned int inflight_assert_workers;
+ unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+};
+
+struct dlm_migratable_lock
+{
+ __be64 cookie;
+
+ /* these 3 are just padding for the in-memory structure, but
+ * list and flags are actually used when sent over the wire */
+ __be16 pad1;
+ u8 list; // 0=granted, 1=converting, 2=blocked
+ u8 flags;
+
+ s8 type;
+ s8 convert_type;
+ s8 highest_blocked;
+ u8 node;
+}; // 16 bytes
+
+struct dlm_lock
+{
+ struct dlm_migratable_lock ml;
+
+ struct list_head list;
+ struct list_head ast_list;
+ struct list_head bast_list;
+ struct dlm_lock_resource *lockres;
+ spinlock_t spinlock;
+ struct kref lock_refs;
+
+ // ast and bast must be callable while holding a spinlock!
+ dlm_astlockfunc_t *ast;
+ dlm_bastlockfunc_t *bast;
+ void *astdata;
+ struct dlm_lockstatus *lksb;
+ unsigned ast_pending:1,
+ bast_pending:1,
+ convert_pending:1,
+ lock_pending:1,
+ cancel_pending:1,
+ unlock_pending:1,
+ lksb_kernel_allocated:1;
+};
+
+enum dlm_lockres_list {
+ DLM_GRANTED_LIST = 0,
+ DLM_CONVERTING_LIST = 1,
+ DLM_BLOCKED_LIST = 2,
+};
+
+static inline int dlm_lvb_is_empty(char *lvb)
+{
+ int i;
+ for (i=0; i<DLM_LVB_LEN; i++)
+ if (lvb[i])
+ return 0;
+ return 1;
+}
+
+static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
+{
+ if (idx == DLM_GRANTED_LIST)
+ return "granted";
+ else if (idx == DLM_CONVERTING_LIST)
+ return "converting";
+ else if (idx == DLM_BLOCKED_LIST)
+ return "blocked";
+ else
+ return "unknown";
+}
+
+static inline struct list_head *
+dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
+{
+ struct list_head *ret = NULL;
+ if (idx == DLM_GRANTED_LIST)
+ ret = &res->granted;
+ else if (idx == DLM_CONVERTING_LIST)
+ ret = &res->converting;
+ else if (idx == DLM_BLOCKED_LIST)
+ ret = &res->blocked;
+ else
+ BUG();
+ return ret;
+}
+
+
+
+
+struct dlm_node_iter
+{
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int curnode;
+};
+
+
+enum {
+ DLM_MASTER_REQUEST_MSG = 500,
+ DLM_UNUSED_MSG1 = 501,
+ DLM_ASSERT_MASTER_MSG = 502,
+ DLM_CREATE_LOCK_MSG = 503,
+ DLM_CONVERT_LOCK_MSG = 504,
+ DLM_PROXY_AST_MSG = 505,
+ DLM_UNLOCK_LOCK_MSG = 506,
+ DLM_DEREF_LOCKRES_MSG = 507,
+ DLM_MIGRATE_REQUEST_MSG = 508,
+ DLM_MIG_LOCKRES_MSG = 509,
+ DLM_QUERY_JOIN_MSG = 510,
+ DLM_ASSERT_JOINED_MSG = 511,
+ DLM_CANCEL_JOIN_MSG = 512,
+ DLM_EXIT_DOMAIN_MSG = 513,
+ DLM_MASTER_REQUERY_MSG = 514,
+ DLM_LOCK_REQUEST_MSG = 515,
+ DLM_RECO_DATA_DONE_MSG = 516,
+ DLM_BEGIN_RECO_MSG = 517,
+ DLM_FINALIZE_RECO_MSG = 518,
+ DLM_QUERY_REGION = 519,
+ DLM_QUERY_NODEINFO = 520,
+ DLM_BEGIN_EXIT_DOMAIN_MSG = 521,
+ DLM_DEREF_LOCKRES_DONE = 522,
+};
+
+struct dlm_reco_node_data
+{
+ int state;
+ u8 node_num;
+ struct list_head list;
+};
+
+enum {
+ DLM_RECO_NODE_DATA_DEAD = -1,
+ DLM_RECO_NODE_DATA_INIT = 0,
+ DLM_RECO_NODE_DATA_REQUESTING = 1,
+ DLM_RECO_NODE_DATA_REQUESTED = 2,
+ DLM_RECO_NODE_DATA_RECEIVING = 3,
+ DLM_RECO_NODE_DATA_DONE = 4,
+ DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
+};
+
+
+enum {
+ DLM_MASTER_RESP_NO = 0,
+ DLM_MASTER_RESP_YES = 1,
+ DLM_MASTER_RESP_MAYBE = 2,
+ DLM_MASTER_RESP_ERROR = 3,
+};
+
+
+struct dlm_master_request
+{
+ u8 node_idx;
+ u8 namelen;
+ __be16 pad1;
+ __be32 flags;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+#define DLM_ASSERT_RESPONSE_REASSERT 0x00000001
+#define DLM_ASSERT_RESPONSE_MASTERY_REF 0x00000002
+
+#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
+#define DLM_ASSERT_MASTER_REQUERY 0x00000002
+#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
+struct dlm_assert_master
+{
+ u8 node_idx;
+ u8 namelen;
+ __be16 pad1;
+ __be32 flags;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+#define DLM_MIGRATE_RESPONSE_MASTERY_REF 0x00000001
+
+struct dlm_migrate_request
+{
+ u8 master;
+ u8 new_master;
+ u8 namelen;
+ u8 pad1;
+ __be32 pad2;
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_master_requery
+{
+ u8 pad1;
+ u8 pad2;
+ u8 node_idx;
+ u8 namelen;
+ __be32 pad3;
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+#define DLM_MRES_RECOVERY 0x01
+#define DLM_MRES_MIGRATION 0x02
+#define DLM_MRES_ALL_DONE 0x04
+
+/*
+ * We would like to get one whole lockres into a single network
+ * message whenever possible. Generally speaking, there will be
+ * at most one dlm_lock on a lockres for each node in the cluster,
+ * plus (infrequently) any additional locks coming in from userdlm.
+ *
+ * struct _dlm_lockres_page
+ * {
+ * dlm_migratable_lockres mres;
+ * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
+ * u8 pad[DLM_MIG_LOCKRES_RESERVED];
+ * };
+ *
+ * from ../cluster/tcp.h
+ * O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
+ * (roughly 4080 bytes)
+ * and sizeof(dlm_migratable_lockres) = 112 bytes
+ * and sizeof(dlm_migratable_lock) = 16 bytes
+ *
+ * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
+ * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
+ *
+ * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
+ * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
+ * NET_MAX_PAYLOAD_BYTES
+ * (240 * 16) + 112 + 128 = 4080
+ *
+ * So a lockres would need more than 240 locks before it would
+ * use more than one network packet to recover. Not too bad.
+ */
+#define DLM_MAX_MIGRATABLE_LOCKS 240
+
+struct dlm_migratable_lockres
+{
+ u8 master;
+ u8 lockname_len;
+ u8 num_locks; // locks sent in this structure
+ u8 flags;
+ __be32 total_locks; // locks to be sent for this migration cookie
+ __be64 mig_cookie; // cookie for this lockres migration
+ // or zero if not needed
+ // 16 bytes
+ u8 lockname[DLM_LOCKID_NAME_MAX];
+ // 48 bytes
+ u8 lvb[DLM_LVB_LEN];
+ // 112 bytes
+ struct dlm_migratable_lock ml[]; // 16 bytes each, begins at byte 112
+};
+#define DLM_MIG_LOCKRES_MAX_LEN \
+ (sizeof(struct dlm_migratable_lockres) + \
+ (sizeof(struct dlm_migratable_lock) * \
+ DLM_MAX_MIGRATABLE_LOCKS) )
+
+/* from above, 128 bytes
+ * for some undetermined future use */
+#define DLM_MIG_LOCKRES_RESERVED (O2NET_MAX_PAYLOAD_BYTES - \
+ DLM_MIG_LOCKRES_MAX_LEN)
+
+struct dlm_create_lock
+{
+ __be64 cookie;
+
+ __be32 flags;
+ u8 pad1;
+ u8 node_idx;
+ s8 requested_type;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_convert_lock
+{
+ __be64 cookie;
+
+ __be32 flags;
+ u8 pad1;
+ u8 node_idx;
+ s8 requested_type;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+
+ s8 lvb[];
+};
+#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
+
+struct dlm_unlock_lock
+{
+ __be64 cookie;
+
+ __be32 flags;
+ __be16 pad1;
+ u8 node_idx;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+
+ s8 lvb[];
+};
+#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
+
+struct dlm_proxy_ast
+{
+ __be64 cookie;
+
+ __be32 flags;
+ u8 node_idx;
+ u8 type;
+ u8 blocked_type;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+
+ s8 lvb[];
+};
+#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
+
+#define DLM_MOD_KEY (0x666c6172)
+enum dlm_query_join_response_code {
+ JOIN_DISALLOW = 0,
+ JOIN_OK = 1,
+ JOIN_OK_NO_MAP = 2,
+ JOIN_PROTOCOL_MISMATCH = 3,
+};
+
+struct dlm_query_join_packet {
+ u8 code; /* Response code. dlm_minor and fs_minor
+ are only valid if this is JOIN_OK */
+ u8 dlm_minor; /* The minor version of the protocol the
+ dlm is speaking. */
+ u8 fs_minor; /* The minor version of the protocol the
+ filesystem is speaking. */
+ u8 reserved;
+};
+
+union dlm_query_join_response {
+ __be32 intval;
+ struct dlm_query_join_packet packet;
+};
+
+struct dlm_lock_request
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+};
+
+struct dlm_reco_data_done
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+
+ /* unused for now */
+ /* eventually we can use this to attempt
+ * lvb recovery based on each node's info */
+ u8 reco_lvb[DLM_LVB_LEN];
+};
+
+struct dlm_begin_reco
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+};
+
+struct dlm_query_join_request
+{
+ u8 node_idx;
+ u8 pad1[2];
+ u8 name_len;
+ struct dlm_protocol_version dlm_proto;
+ struct dlm_protocol_version fs_proto;
+ u8 domain[O2NM_MAX_NAME_LEN];
+ u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
+};
+
+struct dlm_assert_joined
+{
+ u8 node_idx;
+ u8 pad1[2];
+ u8 name_len;
+ u8 domain[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_cancel_join
+{
+ u8 node_idx;
+ u8 pad1[2];
+ u8 name_len;
+ u8 domain[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_query_region {
+ u8 qr_node;
+ u8 qr_numregions;
+ u8 qr_namelen;
+ u8 pad1;
+ u8 qr_domain[O2NM_MAX_NAME_LEN];
+ u8 qr_regions[O2HB_MAX_REGION_NAME_LEN * O2NM_MAX_REGIONS];
+};
+
+struct dlm_node_info {
+ u8 ni_nodenum;
+ u8 pad1;
+ __be16 ni_ipv4_port;
+ __be32 ni_ipv4_address;
+};
+
+struct dlm_query_nodeinfo {
+ u8 qn_nodenum;
+ u8 qn_numnodes;
+ u8 qn_namelen;
+ u8 pad1;
+ u8 qn_domain[O2NM_MAX_NAME_LEN];
+ struct dlm_node_info qn_nodes[O2NM_MAX_NODES];
+};
+
+struct dlm_exit_domain
+{
+ u8 node_idx;
+ u8 pad1[3];
+};
+
+struct dlm_finalize_reco
+{
+ u8 node_idx;
+ u8 dead_node;
+ u8 flags;
+ u8 pad1;
+ __be32 pad2;
+};
+
+struct dlm_deref_lockres
+{
+ u32 pad1;
+ u16 pad2;
+ u8 node_idx;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+enum {
+ DLM_DEREF_RESPONSE_DONE = 0,
+ DLM_DEREF_RESPONSE_INPROG = 1,
+};
+
+struct dlm_deref_lockres_done {
+ u32 pad1;
+ u16 pad2;
+ u8 node_idx;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+static inline enum dlm_status
+__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
+{
+ enum dlm_status status = DLM_NORMAL;
+
+ assert_spin_locked(&res->spinlock);
+
+ if (res->state & (DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_RECOVERY_WAITING))
+ status = DLM_RECOVERING;
+ else if (res->state & DLM_LOCK_RES_MIGRATING)
+ status = DLM_MIGRATING;
+ else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
+ status = DLM_FORWARD;
+
+ return status;
+}
+
+static inline u8 dlm_get_lock_cookie_node(u64 cookie)
+{
+ u8 ret;
+ cookie >>= 56;
+ ret = (u8)(cookie & 0xffULL);
+ return ret;
+}
+
+static inline unsigned long long dlm_get_lock_cookie_seq(u64 cookie)
+{
+ unsigned long long ret;
+ ret = ((unsigned long long)cookie) & 0x00ffffffffffffffULL;
+ return ret;
+}
+
+struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
+ struct dlm_lockstatus *lksb);
+void dlm_lock_get(struct dlm_lock *lock);
+void dlm_lock_put(struct dlm_lock *lock);
+
+void dlm_lock_attach_lockres(struct dlm_lock *lock,
+ struct dlm_lock_resource *res);
+
+int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+
+void dlm_revert_pending_convert(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+void dlm_revert_pending_lock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+
+int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+
+int dlm_launch_thread(struct dlm_ctxt *dlm);
+void dlm_complete_thread(struct dlm_ctxt *dlm);
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
+void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
+int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout);
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout);
+
+void dlm_put(struct dlm_ctxt *dlm);
+struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
+int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
+
+void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+static inline void dlm_lockres_get(struct dlm_lock_resource *res)
+{
+ /* This is called on every lookup, so it might be worth
+ * inlining. */
+ kref_get(&res->refs);
+}
+void dlm_lockres_put(struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len,
+ unsigned int hash);
+struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len,
+ unsigned int hash);
+struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len);
+
+int dlm_is_host_down(int errno);
+
+struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
+ const char *lockid,
+ int namelen,
+ int flags);
+struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int namelen);
+
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit);
+
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+
+void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void dlm_do_local_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+int dlm_do_remote_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+void dlm_do_local_bast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int blocked_type);
+int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int msg_type,
+ int blocked_type, int flags);
+static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int blocked_type)
+{
+ return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
+ blocked_type, 0);
+}
+
+static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int flags)
+{
+ return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
+ 0, flags);
+}
+
+void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
+void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
+
+void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+
+
+void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
+void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
+
+int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+int dlm_finish_migration(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 old_master);
+void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
+
+int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+void dlm_assert_master_post_handler(int status, void *data, void *ret_data);
+int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ u8 nodenum, u8 *real_master);
+
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+
+int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ int ignore_higher,
+ u8 request_from,
+ u32 flags);
+
+
+int dlm_send_one_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to,
+ u8 flags);
+void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+
+/* will exit holding res->spinlock, but may drop in function */
+void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
+
+/* will exit holding res->spinlock, but may drop in function */
+static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
+{
+ __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
+ DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_RECOVERY_WAITING|
+ DLM_LOCK_RES_MIGRATING));
+}
+
+void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
+void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle);
+
+/* create/destroy slab caches */
+int dlm_init_master_caches(void);
+void dlm_destroy_master_caches(void);
+
+int dlm_init_lock_cache(void);
+void dlm_destroy_lock_cache(void);
+
+int dlm_init_mle_cache(void);
+void dlm_destroy_mle_cache(void);
+
+void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
+int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_clean_master_list(struct dlm_ctxt *dlm,
+ u8 dead_node);
+void dlm_force_free_mles(struct dlm_ctxt *dlm);
+int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
+int __dlm_lockres_unused(struct dlm_lock_resource *res);
+
+static inline const char * dlm_lock_mode_name(int mode)
+{
+ switch (mode) {
+ case LKM_EXMODE:
+ return "EX";
+ case LKM_PRMODE:
+ return "PR";
+ case LKM_NLMODE:
+ return "NL";
+ }
+ return "UNKNOWN";
+}
+
+
+static inline int dlm_lock_compatible(int existing, int request)
+{
+ /* NO_LOCK compatible with all */
+ if (request == LKM_NLMODE ||
+ existing == LKM_NLMODE)
+ return 1;
+
+ /* EX incompatible with all non-NO_LOCK */
+ if (request == LKM_EXMODE)
+ return 0;
+
+ /* request must be PR, which is compatible with PR */
+ if (existing == LKM_PRMODE)
+ return 1;
+
+ return 0;
+}
+
+static inline int dlm_lock_on_list(struct list_head *head,
+ struct dlm_lock *lock)
+{
+ struct dlm_lock *tmplock;
+
+ list_for_each_entry(tmplock, head, list) {
+ if (tmplock == lock)
+ return 1;
+ }
+ return 0;
+}
+
+
+static inline enum dlm_status dlm_err_to_dlm_status(int err)
+{
+ enum dlm_status ret;
+ if (err == -ENOMEM)
+ ret = DLM_SYSERR;
+ else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
+ ret = DLM_NOLOCKMGR;
+ else if (err == -EINVAL)
+ ret = DLM_BADPARAM;
+ else if (err == -ENAMETOOLONG)
+ ret = DLM_IVBUFLEN;
+ else
+ ret = DLM_BADARGS;
+ return ret;
+}
+
+
+static inline void dlm_node_iter_init(unsigned long *map,
+ struct dlm_node_iter *iter)
+{
+ bitmap_copy(iter->node_map, map, O2NM_MAX_NODES);
+ iter->curnode = -1;
+}
+
+static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
+{
+ int bit;
+ bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
+ if (bit >= O2NM_MAX_NODES) {
+ iter->curnode = O2NM_MAX_NODES;
+ return -ENOENT;
+ }
+ iter->curnode = bit;
+ return bit;
+}
+
+static inline void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 owner)
+{
+ assert_spin_locked(&res->spinlock);
+
+ res->owner = owner;
+}
+
+static inline void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 owner)
+{
+ assert_spin_locked(&res->spinlock);
+
+ if (owner != res->owner)
+ dlm_set_lockres_owner(dlm, res, owner);
+}
+
+#endif /* DLMCOMMON_H */
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
new file mode 100644
index 0000000000..450d46eefa
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmconvert.c
+ *
+ * underlying calls for lock conversion
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#include "dlmconvert.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "../cluster/masklog.h"
+
+/* NOTE: __dlmconvert_master is the only function in here that
+ * needs a spinlock held on entry (res->spinlock) and it is the
+ * only one that holds a lock on exit (res->spinlock).
+ * All other functions in here need no locks and drop all of
+ * the locks that they acquire. */
+static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags,
+ int type, int *call_ast,
+ int *kick_thread);
+static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type);
+
+/*
+ * this is only called directly by dlmlock(), and only when the
+ * local node is the owner of the lockres
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: see __dlmconvert_master
+ */
+enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type)
+{
+ int call_ast = 0, kick_thread = 0;
+ enum dlm_status status;
+
+ spin_lock(&res->spinlock);
+ /* we are not in a network handler, this is fine */
+ __dlm_wait_on_lockres(res);
+ __dlm_lockres_reserve_ast(res);
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+
+ status = __dlmconvert_master(dlm, res, lock, flags, type,
+ &call_ast, &kick_thread);
+
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ if (status != DLM_NORMAL && status != DLM_NOTQUEUED)
+ dlm_error(status);
+
+ /* either queue the ast or release it */
+ if (call_ast)
+ dlm_queue_ast(dlm, lock);
+ else
+ dlm_lockres_release_ast(dlm, res);
+
+ if (kick_thread)
+ dlm_kick_thread(dlm, res);
+
+ return status;
+}
+
+/* performs lock conversion at the lockres master site
+ * locking:
+ * caller needs: res->spinlock
+ * taken: takes and drops lock->spinlock
+ * held on exit: res->spinlock
+ * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED
+ * call_ast: whether ast should be called for this lock
+ * kick_thread: whether dlm_kick_thread should be called
+ */
+static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags,
+ int type, int *call_ast,
+ int *kick_thread)
+{
+ enum dlm_status status = DLM_NORMAL;
+ struct dlm_lock *tmplock=NULL;
+
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
+ lock->ml.type, lock->ml.convert_type, type);
+
+ spin_lock(&lock->spinlock);
+
+ /* already converting? */
+ if (lock->ml.convert_type != LKM_IVMODE) {
+ mlog(ML_ERROR, "attempted to convert a lock with a lock "
+ "conversion pending\n");
+ status = DLM_DENIED;
+ goto unlock_exit;
+ }
+
+ /* must be on grant queue to convert */
+ if (!dlm_lock_on_list(&res->granted, lock)) {
+ mlog(ML_ERROR, "attempted to convert a lock not on grant "
+ "queue\n");
+ status = DLM_DENIED;
+ goto unlock_exit;
+ }
+
+ if (flags & LKM_VALBLK) {
+ switch (lock->ml.type) {
+ case LKM_EXMODE:
+ /* EX + LKM_VALBLK + convert == set lvb */
+ mlog(0, "will set lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
+ lock->lksb->flags |= DLM_LKSB_PUT_LVB;
+ break;
+ case LKM_PRMODE:
+ case LKM_NLMODE:
+ /* refetch if new level is not NL */
+ if (type > LKM_NLMODE) {
+ mlog(0, "will fetch new value into "
+ "lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
+ lock->lksb->flags |= DLM_LKSB_GET_LVB;
+ } else {
+ mlog(0, "will NOT fetch new value "
+ "into lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
+ flags &= ~(LKM_VALBLK);
+ }
+ break;
+ }
+ }
+
+
+ /* in-place downconvert? */
+ if (type <= lock->ml.type)
+ goto grant;
+
+ /* upconvert from here on */
+ status = DLM_NORMAL;
+ list_for_each_entry(tmplock, &res->granted, list) {
+ if (tmplock == lock)
+ continue;
+ if (!dlm_lock_compatible(tmplock->ml.type, type))
+ goto switch_queues;
+ }
+
+ list_for_each_entry(tmplock, &res->converting, list) {
+ if (!dlm_lock_compatible(tmplock->ml.type, type))
+ goto switch_queues;
+ /* existing conversion requests take precedence */
+ if (!dlm_lock_compatible(tmplock->ml.convert_type, type))
+ goto switch_queues;
+ }
+
+ /* fall thru to grant */
+
+grant:
+ mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
+ res->lockname.name, dlm_lock_mode_name(type));
+ /* immediately grant the new lock type */
+ lock->lksb->status = DLM_NORMAL;
+ if (lock->ml.node == dlm->node_num)
+ mlog(0, "doing in-place convert for nonlocal lock\n");
+ lock->ml.type = type;
+ if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
+ memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+
+ /*
+ * Move the lock to the tail because it may be the only lock which has
+ * an invalid lvb.
+ */
+ list_move_tail(&lock->list, &res->granted);
+
+ status = DLM_NORMAL;
+ *call_ast = 1;
+ goto unlock_exit;
+
+switch_queues:
+ if (flags & LKM_NOQUEUE) {
+ mlog(0, "failed to convert NOQUEUE lock %.*s from "
+ "%d to %d...\n", res->lockname.len, res->lockname.name,
+ lock->ml.type, type);
+ status = DLM_NOTQUEUED;
+ goto unlock_exit;
+ }
+ mlog(0, "res %.*s, queueing...\n", res->lockname.len,
+ res->lockname.name);
+
+ lock->ml.convert_type = type;
+ /* do not alter lock refcount. switching lists. */
+ list_move_tail(&lock->list, &res->converting);
+
+unlock_exit:
+ spin_unlock(&lock->spinlock);
+ if (status == DLM_DENIED) {
+ __dlm_print_one_lock_resource(res);
+ }
+ if (status == DLM_NORMAL)
+ *kick_thread = 1;
+ return status;
+}
+
+void dlm_revert_pending_convert(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ /* do not alter lock refcount. switching lists. */
+ list_move_tail(&lock->list, &res->granted);
+ lock->ml.convert_type = LKM_IVMODE;
+ lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
+}
+
+/* messages the master site to do lock conversion
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
+ */
+enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type)
+{
+ enum dlm_status status;
+
+ mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ mlog(0, "bailing out early since res is RECOVERING "
+ "on secondary queue\n");
+ /* __dlm_print_one_lock_resource(res); */
+ status = DLM_RECOVERING;
+ goto bail;
+ }
+ /* will exit this call with spinlock held */
+ __dlm_wait_on_lockres(res);
+
+ if (lock->ml.convert_type != LKM_IVMODE) {
+ __dlm_print_one_lock_resource(res);
+ mlog(ML_ERROR, "converting a remote lock that is already "
+ "converting! (cookie=%u:%llu, conv=%d)\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ml.convert_type);
+ status = DLM_DENIED;
+ goto bail;
+ }
+
+ if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
+ mlog(0, "last convert request returned DLM_RECOVERING, but "
+ "owner has already queued and sent ast to me. res %.*s, "
+ "(cookie=%u:%llu, type=%d, conv=%d)\n",
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ml.type, lock->ml.convert_type);
+ status = DLM_NORMAL;
+ goto bail;
+ }
+
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ /* move lock to local convert queue */
+ /* do not alter lock refcount. switching lists. */
+ list_move_tail(&lock->list, &res->converting);
+ lock->convert_pending = 1;
+ lock->ml.convert_type = type;
+
+ if (flags & LKM_VALBLK) {
+ if (lock->ml.type == LKM_EXMODE) {
+ flags |= LKM_PUT_LVB;
+ lock->lksb->flags |= DLM_LKSB_PUT_LVB;
+ } else {
+ if (lock->ml.convert_type == LKM_NLMODE)
+ flags &= ~LKM_VALBLK;
+ else {
+ flags |= LKM_GET_LVB;
+ lock->lksb->flags |= DLM_LKSB_GET_LVB;
+ }
+ }
+ }
+ spin_unlock(&res->spinlock);
+
+ /* no locks held here.
+ * need to wait for a reply as to whether it got queued or not. */
+ status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
+
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ /* if it failed, move it back to granted queue.
+ * if master returns DLM_NORMAL and then down before sending ast,
+ * it may have already been moved to granted queue, reset to
+ * DLM_RECOVERING and retry convert */
+ if (status != DLM_NORMAL) {
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ dlm_revert_pending_convert(res, lock);
+ } else if (!lock->convert_pending) {
+ mlog(0, "%s: res %.*s, owner died and lock has been moved back "
+ "to granted list, retry convert.\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ status = DLM_RECOVERING;
+ }
+
+ lock->convert_pending = 0;
+bail:
+ spin_unlock(&res->spinlock);
+
+ /* TODO: should this be a wake_one? */
+ /* wake up any IN_PROGRESS waiters */
+ wake_up(&res->wq);
+
+ return status;
+}
+
+/* sends DLM_CONVERT_LOCK_MSG to master site
+ * locking:
+ * caller needs: none
+ * taken: none
+ * held on exit: none
+ * returns: DLM_NOLOCKMGR, status from remote node
+ */
+static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type)
+{
+ struct dlm_convert_lock convert;
+ int tmpret;
+ enum dlm_status ret;
+ int status = 0;
+ struct kvec vec[2];
+ size_t veclen = 1;
+
+ mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
+
+ memset(&convert, 0, sizeof(struct dlm_convert_lock));
+ convert.node_idx = dlm->node_num;
+ convert.requested_type = type;
+ convert.cookie = lock->ml.cookie;
+ convert.namelen = res->lockname.len;
+ convert.flags = cpu_to_be32(flags);
+ memcpy(convert.name, res->lockname.name, convert.namelen);
+
+ vec[0].iov_len = sizeof(struct dlm_convert_lock);
+ vec[0].iov_base = &convert;
+
+ if (flags & LKM_PUT_LVB) {
+ /* extra data to send if we are updating lvb */
+ vec[1].iov_len = DLM_LVB_LEN;
+ vec[1].iov_base = lock->lksb->lvb;
+ veclen++;
+ }
+
+ tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key,
+ vec, veclen, res->owner, &status);
+ if (tmpret >= 0) {
+ // successfully sent and received
+ ret = status; // this is already a dlm_status
+ if (ret == DLM_RECOVERING) {
+ mlog(0, "node %u returned DLM_RECOVERING from convert "
+ "message!\n", res->owner);
+ } else if (ret == DLM_MIGRATING) {
+ mlog(0, "node %u returned DLM_MIGRATING from convert "
+ "message!\n", res->owner);
+ } else if (ret == DLM_FORWARD) {
+ mlog(0, "node %u returned DLM_FORWARD from convert "
+ "message!\n", res->owner);
+ } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
+ dlm_error(ret);
+ } else {
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
+ res->owner);
+ if (dlm_is_host_down(tmpret)) {
+ /* instead of logging the same network error over
+ * and over, sleep here and wait for the heartbeat
+ * to notice the node is dead. times out after 5s. */
+ dlm_wait_for_node_death(dlm, res->owner,
+ DLM_NODE_DEATH_WAIT_MAX);
+ ret = DLM_RECOVERING;
+ mlog(0, "node %u died so returning DLM_RECOVERING "
+ "from convert message!\n", res->owner);
+ } else {
+ ret = dlm_err_to_dlm_status(tmpret);
+ }
+ }
+
+ return ret;
+}
+
+/* handler for DLM_CONVERT_LOCK_MSG on master site
+ * locking:
+ * caller needs: none
+ * taken: takes and drop res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
+ * status from __dlmconvert_master
+ */
+int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *lock = NULL;
+ struct dlm_lock *tmp_lock;
+ struct dlm_lockstatus *lksb;
+ enum dlm_status status = DLM_NORMAL;
+ u32 flags;
+ int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
+
+ if (!dlm_grab(dlm)) {
+ dlm_error(DLM_REJECTED);
+ return DLM_REJECTED;
+ }
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
+ status = DLM_IVBUFLEN;
+ dlm_error(status);
+ goto leave;
+ }
+
+ flags = be32_to_cpu(cnv->flags);
+
+ if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
+ (LKM_PUT_LVB|LKM_GET_LVB)) {
+ mlog(ML_ERROR, "both PUT and GET lvb specified\n");
+ status = DLM_BADARGS;
+ goto leave;
+ }
+
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
+ (flags & LKM_GET_LVB ? "get lvb" : "none"));
+
+ status = DLM_IVLOCKID;
+ res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
+ if (!res) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ spin_lock(&res->spinlock);
+ status = __dlm_lockres_state_to_status(res);
+ if (status != DLM_NORMAL) {
+ spin_unlock(&res->spinlock);
+ dlm_error(status);
+ goto leave;
+ }
+ list_for_each_entry(tmp_lock, &res->granted, list) {
+ if (tmp_lock->ml.cookie == cnv->cookie &&
+ tmp_lock->ml.node == cnv->node_idx) {
+ lock = tmp_lock;
+ dlm_lock_get(lock);
+ break;
+ }
+ }
+ spin_unlock(&res->spinlock);
+ if (!lock) {
+ status = DLM_IVLOCKID;
+ mlog(ML_ERROR, "did not find lock to convert on grant queue! "
+ "cookie=%u:%llu\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
+ dlm_print_one_lock_resource(res);
+ goto leave;
+ }
+
+ /* found the lock */
+ lksb = lock->lksb;
+
+ /* see if caller needed to get/put lvb */
+ if (flags & LKM_PUT_LVB) {
+ BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
+ lksb->flags |= DLM_LKSB_PUT_LVB;
+ memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN);
+ } else if (flags & LKM_GET_LVB) {
+ BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
+ lksb->flags |= DLM_LKSB_GET_LVB;
+ }
+
+ spin_lock(&res->spinlock);
+ status = __dlm_lockres_state_to_status(res);
+ if (status == DLM_NORMAL) {
+ __dlm_lockres_reserve_ast(res);
+ ast_reserved = 1;
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ status = __dlmconvert_master(dlm, res, lock, flags,
+ cnv->requested_type,
+ &call_ast, &kick_thread);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ wake = 1;
+ }
+ spin_unlock(&res->spinlock);
+ if (wake)
+ wake_up(&res->wq);
+
+ if (status != DLM_NORMAL) {
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
+ }
+
+leave:
+ if (lock)
+ dlm_lock_put(lock);
+
+ /* either queue the ast or release it, if reserved */
+ if (call_ast)
+ dlm_queue_ast(dlm, lock);
+ else if (ast_reserved)
+ dlm_lockres_release_ast(dlm, res);
+
+ if (kick_thread)
+ dlm_kick_thread(dlm, res);
+
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+
+ return status;
+}
diff --git a/fs/ocfs2/dlm/dlmconvert.h b/fs/ocfs2/dlm/dlmconvert.h
new file mode 100644
index 0000000000..1f37171651
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmconvert.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * dlmconvert.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+#ifndef DLMCONVERT_H
+#define DLMCONVERT_H
+
+enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type);
+enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type);
+
+#endif
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
new file mode 100644
index 0000000000..be5e9ed7da
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmdebug.c
+ *
+ * debug functionality for the dlm
+ *
+ * Copyright (C) 2004, 2008 Oracle. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/sysctl.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+#include "dlmdebug.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "../cluster/masklog.h"
+
+static int stringify_lockname(const char *lockname, int locklen, char *buf,
+ int len);
+
+void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
+{
+ spin_lock(&res->spinlock);
+ __dlm_print_one_lock_resource(res);
+ spin_unlock(&res->spinlock);
+}
+
+static void dlm_print_lockres_refmap(struct dlm_lock_resource *res)
+{
+ int bit;
+ assert_spin_locked(&res->spinlock);
+
+ printk(" refmap nodes: [ ");
+ bit = 0;
+ while (1) {
+ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
+ if (bit >= O2NM_MAX_NODES)
+ break;
+ printk("%u ", bit);
+ bit++;
+ }
+ printk("], inflight=%u\n", res->inflight_locks);
+}
+
+static void __dlm_print_lock(struct dlm_lock *lock)
+{
+ spin_lock(&lock->spinlock);
+
+ printk(" type=%d, conv=%d, node=%u, cookie=%u:%llu, "
+ "ref=%u, ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c), "
+ "pending=(conv=%c,lock=%c,cancel=%c,unlock=%c)\n",
+ lock->ml.type, lock->ml.convert_type, lock->ml.node,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ kref_read(&lock->lock_refs),
+ (list_empty(&lock->ast_list) ? 'y' : 'n'),
+ (lock->ast_pending ? 'y' : 'n'),
+ (list_empty(&lock->bast_list) ? 'y' : 'n'),
+ (lock->bast_pending ? 'y' : 'n'),
+ (lock->convert_pending ? 'y' : 'n'),
+ (lock->lock_pending ? 'y' : 'n'),
+ (lock->cancel_pending ? 'y' : 'n'),
+ (lock->unlock_pending ? 'y' : 'n'));
+
+ spin_unlock(&lock->spinlock);
+}
+
+void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
+{
+ struct dlm_lock *lock;
+ char buf[DLM_LOCKID_NAME_MAX];
+
+ assert_spin_locked(&res->spinlock);
+
+ stringify_lockname(res->lockname.name, res->lockname.len,
+ buf, sizeof(buf));
+ printk("lockres: %s, owner=%u, state=%u\n",
+ buf, res->owner, res->state);
+ printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
+ res->last_used, kref_read(&res->refs),
+ list_empty(&res->purge) ? "no" : "yes");
+ printk(" on dirty list: %s, on reco list: %s, "
+ "migrating pending: %s\n",
+ list_empty(&res->dirty) ? "no" : "yes",
+ list_empty(&res->recovering) ? "no" : "yes",
+ res->migration_pending ? "yes" : "no");
+ printk(" inflight locks: %d, asts reserved: %d\n",
+ res->inflight_locks, atomic_read(&res->asts_reserved));
+ dlm_print_lockres_refmap(res);
+ printk(" granted queue:\n");
+ list_for_each_entry(lock, &res->granted, list) {
+ __dlm_print_lock(lock);
+ }
+ printk(" converting queue:\n");
+ list_for_each_entry(lock, &res->converting, list) {
+ __dlm_print_lock(lock);
+ }
+ printk(" blocked queue:\n");
+ list_for_each_entry(lock, &res->blocked, list) {
+ __dlm_print_lock(lock);
+ }
+}
+
+void dlm_print_one_lock(struct dlm_lock *lockid)
+{
+ dlm_print_one_lock_resource(lockid->lockres);
+}
+EXPORT_SYMBOL_GPL(dlm_print_one_lock);
+
+static const char *dlm_errnames[] = {
+ [DLM_NORMAL] = "DLM_NORMAL",
+ [DLM_GRANTED] = "DLM_GRANTED",
+ [DLM_DENIED] = "DLM_DENIED",
+ [DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS",
+ [DLM_WORKING] = "DLM_WORKING",
+ [DLM_BLOCKED] = "DLM_BLOCKED",
+ [DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN",
+ [DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD",
+ [DLM_SYSERR] = "DLM_SYSERR",
+ [DLM_NOSUPPORT] = "DLM_NOSUPPORT",
+ [DLM_CANCELGRANT] = "DLM_CANCELGRANT",
+ [DLM_IVLOCKID] = "DLM_IVLOCKID",
+ [DLM_SYNC] = "DLM_SYNC",
+ [DLM_BADTYPE] = "DLM_BADTYPE",
+ [DLM_BADRESOURCE] = "DLM_BADRESOURCE",
+ [DLM_MAXHANDLES] = "DLM_MAXHANDLES",
+ [DLM_NOCLINFO] = "DLM_NOCLINFO",
+ [DLM_NOLOCKMGR] = "DLM_NOLOCKMGR",
+ [DLM_NOPURGED] = "DLM_NOPURGED",
+ [DLM_BADARGS] = "DLM_BADARGS",
+ [DLM_VOID] = "DLM_VOID",
+ [DLM_NOTQUEUED] = "DLM_NOTQUEUED",
+ [DLM_IVBUFLEN] = "DLM_IVBUFLEN",
+ [DLM_CVTUNGRANT] = "DLM_CVTUNGRANT",
+ [DLM_BADPARAM] = "DLM_BADPARAM",
+ [DLM_VALNOTVALID] = "DLM_VALNOTVALID",
+ [DLM_REJECTED] = "DLM_REJECTED",
+ [DLM_ABORT] = "DLM_ABORT",
+ [DLM_CANCEL] = "DLM_CANCEL",
+ [DLM_IVRESHANDLE] = "DLM_IVRESHANDLE",
+ [DLM_DEADLOCK] = "DLM_DEADLOCK",
+ [DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS",
+ [DLM_FORWARD] = "DLM_FORWARD",
+ [DLM_TIMEOUT] = "DLM_TIMEOUT",
+ [DLM_IVGROUPID] = "DLM_IVGROUPID",
+ [DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT",
+ [DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH",
+ [DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION",
+ [DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ",
+ [DLM_RECOVERING] = "DLM_RECOVERING",
+ [DLM_MIGRATING] = "DLM_MIGRATING",
+ [DLM_MAXSTATS] = "DLM_MAXSTATS",
+};
+
+static const char *dlm_errmsgs[] = {
+ [DLM_NORMAL] = "request in progress",
+ [DLM_GRANTED] = "request granted",
+ [DLM_DENIED] = "request denied",
+ [DLM_DENIED_NOLOCKS] = "request denied, out of system resources",
+ [DLM_WORKING] = "async request in progress",
+ [DLM_BLOCKED] = "lock request blocked",
+ [DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock",
+ [DLM_DENIED_GRACE_PERIOD] = "topological change in progress",
+ [DLM_SYSERR] = "system error",
+ [DLM_NOSUPPORT] = "unsupported",
+ [DLM_CANCELGRANT] = "can't cancel convert: already granted",
+ [DLM_IVLOCKID] = "bad lockid",
+ [DLM_SYNC] = "synchronous request granted",
+ [DLM_BADTYPE] = "bad resource type",
+ [DLM_BADRESOURCE] = "bad resource handle",
+ [DLM_MAXHANDLES] = "no more resource handles",
+ [DLM_NOCLINFO] = "can't contact cluster manager",
+ [DLM_NOLOCKMGR] = "can't contact lock manager",
+ [DLM_NOPURGED] = "can't contact purge daemon",
+ [DLM_BADARGS] = "bad api args",
+ [DLM_VOID] = "no status",
+ [DLM_NOTQUEUED] = "NOQUEUE was specified and request failed",
+ [DLM_IVBUFLEN] = "invalid resource name length",
+ [DLM_CVTUNGRANT] = "attempted to convert ungranted lock",
+ [DLM_BADPARAM] = "invalid lock mode specified",
+ [DLM_VALNOTVALID] = "value block has been invalidated",
+ [DLM_REJECTED] = "request rejected, unrecognized client",
+ [DLM_ABORT] = "blocked lock request cancelled",
+ [DLM_CANCEL] = "conversion request cancelled",
+ [DLM_IVRESHANDLE] = "invalid resource handle",
+ [DLM_DEADLOCK] = "deadlock recovery refused this request",
+ [DLM_DENIED_NOASTS] = "failed to allocate AST",
+ [DLM_FORWARD] = "request must wait for primary's response",
+ [DLM_TIMEOUT] = "timeout value for lock has expired",
+ [DLM_IVGROUPID] = "invalid group specification",
+ [DLM_VERS_CONFLICT] = "version conflicts prevent request handling",
+ [DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong",
+ [DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device",
+ [DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ",
+ [DLM_RECOVERING] = "lock resource being recovered",
+ [DLM_MIGRATING] = "lock resource being migrated",
+ [DLM_MAXSTATS] = "invalid error number",
+};
+
+const char *dlm_errmsg(enum dlm_status err)
+{
+ if (err >= DLM_MAXSTATS || err < 0)
+ return dlm_errmsgs[DLM_MAXSTATS];
+ return dlm_errmsgs[err];
+}
+EXPORT_SYMBOL_GPL(dlm_errmsg);
+
+const char *dlm_errname(enum dlm_status err)
+{
+ if (err >= DLM_MAXSTATS || err < 0)
+ return dlm_errnames[DLM_MAXSTATS];
+ return dlm_errnames[err];
+}
+EXPORT_SYMBOL_GPL(dlm_errname);
+
+/* NOTE: This function converts a lockname into a string. It uses knowledge
+ * of the format of the lockname that should be outside the purview of the dlm.
+ * We are adding only to make dlm debugging slightly easier.
+ *
+ * For more on lockname formats, please refer to dlmglue.c and ocfs2_lockid.h.
+ */
+static int stringify_lockname(const char *lockname, int locklen, char *buf,
+ int len)
+{
+ int out = 0;
+ __be64 inode_blkno_be;
+
+#define OCFS2_DENTRY_LOCK_INO_START 18
+ if (*lockname == 'N') {
+ memcpy((__be64 *)&inode_blkno_be,
+ (char *)&lockname[OCFS2_DENTRY_LOCK_INO_START],
+ sizeof(__be64));
+ out += scnprintf(buf + out, len - out, "%.*s%08x",
+ OCFS2_DENTRY_LOCK_INO_START - 1, lockname,
+ (unsigned int)be64_to_cpu(inode_blkno_be));
+ } else
+ out += scnprintf(buf + out, len - out, "%.*s",
+ locklen, lockname);
+ return out;
+}
+
+static int stringify_nodemap(unsigned long *nodemap, int maxnodes,
+ char *buf, int len)
+{
+ int out = 0;
+ int i = -1;
+
+ while ((i = find_next_bit(nodemap, maxnodes, i + 1)) < maxnodes)
+ out += scnprintf(buf + out, len - out, "%d ", i);
+
+ return out;
+}
+
+static int dump_mle(struct dlm_master_list_entry *mle, char *buf, int len)
+{
+ int out = 0;
+ char *mle_type;
+
+ if (mle->type == DLM_MLE_BLOCK)
+ mle_type = "BLK";
+ else if (mle->type == DLM_MLE_MASTER)
+ mle_type = "MAS";
+ else
+ mle_type = "MIG";
+
+ out += stringify_lockname(mle->mname, mle->mnamelen, buf + out, len - out);
+ out += scnprintf(buf + out, len - out,
+ "\t%3s\tmas=%3u\tnew=%3u\tevt=%1d\tuse=%1d\tref=%3d\n",
+ mle_type, mle->master, mle->new_master,
+ !list_empty(&mle->hb_events),
+ !!mle->inuse,
+ kref_read(&mle->mle_refs));
+
+ out += scnprintf(buf + out, len - out, "Maybe=");
+ out += stringify_nodemap(mle->maybe_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ out += scnprintf(buf + out, len - out, "Vote=");
+ out += stringify_nodemap(mle->vote_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ out += scnprintf(buf + out, len - out, "Response=");
+ out += stringify_nodemap(mle->response_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ out += scnprintf(buf + out, len - out, "Node=");
+ out += stringify_nodemap(mle->node_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ out += scnprintf(buf + out, len - out, "\n");
+
+ return out;
+}
+
+void dlm_print_one_mle(struct dlm_master_list_entry *mle)
+{
+ char *buf;
+
+ buf = (char *) get_zeroed_page(GFP_ATOMIC);
+ if (buf) {
+ dump_mle(mle, buf, PAGE_SIZE - 1);
+ free_page((unsigned long)buf);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *dlm_debugfs_root;
+
+#define DLM_DEBUGFS_DIR "o2dlm"
+#define DLM_DEBUGFS_DLM_STATE "dlm_state"
+#define DLM_DEBUGFS_LOCKING_STATE "locking_state"
+#define DLM_DEBUGFS_MLE_STATE "mle_state"
+#define DLM_DEBUGFS_PURGE_LIST "purge_list"
+
+/* begin - utils funcs */
+static int debug_release(struct inode *inode, struct file *file)
+{
+ free_page((unsigned long)file->private_data);
+ return 0;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
+}
+/* end - util funcs */
+
+/* begin - purge list funcs */
+static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
+{
+ struct dlm_lock_resource *res;
+ int out = 0;
+ unsigned long total = 0;
+
+ out += scnprintf(buf + out, len - out,
+ "Dumping Purgelist for Domain: %s\n", dlm->name);
+
+ spin_lock(&dlm->spinlock);
+ list_for_each_entry(res, &dlm->purge_list, purge) {
+ ++total;
+ if (len - out < 100)
+ continue;
+ spin_lock(&res->spinlock);
+ out += stringify_lockname(res->lockname.name,
+ res->lockname.len,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\t%ld\n",
+ (jiffies - res->last_used)/HZ);
+ spin_unlock(&res->spinlock);
+ }
+ spin_unlock(&dlm->spinlock);
+
+ out += scnprintf(buf + out, len - out, "Total on list: %lu\n", total);
+
+ return out;
+}
+
+static int debug_purgelist_open(struct inode *inode, struct file *file)
+{
+ struct dlm_ctxt *dlm = inode->i_private;
+ char *buf = NULL;
+
+ buf = (char *) get_zeroed_page(GFP_NOFS);
+ if (!buf)
+ goto bail;
+
+ i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
+
+ file->private_data = buf;
+
+ return 0;
+bail:
+ return -ENOMEM;
+}
+
+static const struct file_operations debug_purgelist_fops = {
+ .open = debug_purgelist_open,
+ .release = debug_release,
+ .read = debug_read,
+ .llseek = generic_file_llseek,
+};
+/* end - purge list funcs */
+
+/* begin - debug mle funcs */
+static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
+{
+ struct dlm_master_list_entry *mle;
+ struct hlist_head *bucket;
+ int i, out = 0;
+ unsigned long total = 0, longest = 0, bucket_count = 0;
+
+ out += scnprintf(buf + out, len - out,
+ "Dumping MLEs for Domain: %s\n", dlm->name);
+
+ spin_lock(&dlm->master_lock);
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_master_hash(dlm, i);
+ hlist_for_each_entry(mle, bucket, master_hash_node) {
+ ++total;
+ ++bucket_count;
+ if (len - out < 200)
+ continue;
+ out += dump_mle(mle, buf + out, len - out);
+ }
+ longest = max(longest, bucket_count);
+ bucket_count = 0;
+ }
+ spin_unlock(&dlm->master_lock);
+
+ out += scnprintf(buf + out, len - out,
+ "Total: %lu, Longest: %lu\n", total, longest);
+ return out;
+}
+
+static int debug_mle_open(struct inode *inode, struct file *file)
+{
+ struct dlm_ctxt *dlm = inode->i_private;
+ char *buf = NULL;
+
+ buf = (char *) get_zeroed_page(GFP_NOFS);
+ if (!buf)
+ goto bail;
+
+ i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
+
+ file->private_data = buf;
+
+ return 0;
+bail:
+ return -ENOMEM;
+}
+
+static const struct file_operations debug_mle_fops = {
+ .open = debug_mle_open,
+ .release = debug_release,
+ .read = debug_read,
+ .llseek = generic_file_llseek,
+};
+
+/* end - debug mle funcs */
+
+/* begin - debug lockres funcs */
+static int dump_lock(struct dlm_lock *lock, int list_type, char *buf, int len)
+{
+ int out;
+
+#define DEBUG_LOCK_VERSION 1
+ spin_lock(&lock->spinlock);
+ out = scnprintf(buf, len, "LOCK:%d,%d,%d,%d,%d,%d:%lld,%d,%d,%d,%d,%d,"
+ "%d,%d,%d,%d\n",
+ DEBUG_LOCK_VERSION,
+ list_type, lock->ml.type, lock->ml.convert_type,
+ lock->ml.node,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ !list_empty(&lock->ast_list),
+ !list_empty(&lock->bast_list),
+ lock->ast_pending, lock->bast_pending,
+ lock->convert_pending, lock->lock_pending,
+ lock->cancel_pending, lock->unlock_pending,
+ kref_read(&lock->lock_refs));
+ spin_unlock(&lock->spinlock);
+
+ return out;
+}
+
+static int dump_lockres(struct dlm_lock_resource *res, char *buf, int len)
+{
+ struct dlm_lock *lock;
+ int i;
+ int out = 0;
+
+ out += scnprintf(buf + out, len - out, "NAME:");
+ out += stringify_lockname(res->lockname.name, res->lockname.len,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+#define DEBUG_LRES_VERSION 1
+ out += scnprintf(buf + out, len - out,
+ "LRES:%d,%d,%d,%ld,%d,%d,%d,%d,%d,%d,%d\n",
+ DEBUG_LRES_VERSION,
+ res->owner, res->state, res->last_used,
+ !list_empty(&res->purge),
+ !list_empty(&res->dirty),
+ !list_empty(&res->recovering),
+ res->inflight_locks, res->migration_pending,
+ atomic_read(&res->asts_reserved),
+ kref_read(&res->refs));
+
+ /* refmap */
+ out += scnprintf(buf + out, len - out, "RMAP:");
+ out += stringify_nodemap(res->refmap, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ /* lvb */
+ out += scnprintf(buf + out, len - out, "LVBX:");
+ for (i = 0; i < DLM_LVB_LEN; i++)
+ out += scnprintf(buf + out, len - out,
+ "%02x", (unsigned char)res->lvb[i]);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ /* granted */
+ list_for_each_entry(lock, &res->granted, list)
+ out += dump_lock(lock, 0, buf + out, len - out);
+
+ /* converting */
+ list_for_each_entry(lock, &res->converting, list)
+ out += dump_lock(lock, 1, buf + out, len - out);
+
+ /* blocked */
+ list_for_each_entry(lock, &res->blocked, list)
+ out += dump_lock(lock, 2, buf + out, len - out);
+
+ out += scnprintf(buf + out, len - out, "\n");
+
+ return out;
+}
+
+static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct debug_lockres *dl = m->private;
+ struct dlm_ctxt *dlm = dl->dl_ctxt;
+ struct dlm_lock_resource *oldres = dl->dl_res;
+ struct dlm_lock_resource *res = NULL, *iter;
+ struct list_head *track_list;
+
+ spin_lock(&dlm->track_lock);
+ if (oldres)
+ track_list = &oldres->tracking;
+ else {
+ track_list = &dlm->tracking_list;
+ if (list_empty(track_list)) {
+ dl = NULL;
+ spin_unlock(&dlm->track_lock);
+ goto bail;
+ }
+ }
+
+ list_for_each_entry(iter, track_list, tracking) {
+ if (&iter->tracking != &dlm->tracking_list) {
+ dlm_lockres_get(iter);
+ res = iter;
+ }
+ break;
+ }
+ spin_unlock(&dlm->track_lock);
+
+ if (oldres)
+ dlm_lockres_put(oldres);
+
+ dl->dl_res = res;
+
+ if (res) {
+ spin_lock(&res->spinlock);
+ dump_lockres(res, dl->dl_buf, dl->dl_len - 1);
+ spin_unlock(&res->spinlock);
+ } else
+ dl = NULL;
+
+bail:
+ /* passed to seq_show */
+ return dl;
+}
+
+static void lockres_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *lockres_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return NULL;
+}
+
+static int lockres_seq_show(struct seq_file *s, void *v)
+{
+ struct debug_lockres *dl = (struct debug_lockres *)v;
+
+ seq_printf(s, "%s", dl->dl_buf);
+
+ return 0;
+}
+
+static const struct seq_operations debug_lockres_ops = {
+ .start = lockres_seq_start,
+ .stop = lockres_seq_stop,
+ .next = lockres_seq_next,
+ .show = lockres_seq_show,
+};
+
+static int debug_lockres_open(struct inode *inode, struct file *file)
+{
+ struct dlm_ctxt *dlm = inode->i_private;
+ struct debug_lockres *dl;
+ void *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto bail;
+
+ dl = __seq_open_private(file, &debug_lockres_ops, sizeof(*dl));
+ if (!dl)
+ goto bailfree;
+
+ dl->dl_len = PAGE_SIZE;
+ dl->dl_buf = buf;
+
+ dlm_grab(dlm);
+ dl->dl_ctxt = dlm;
+
+ return 0;
+
+bailfree:
+ kfree(buf);
+bail:
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+}
+
+static int debug_lockres_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct debug_lockres *dl = (struct debug_lockres *)seq->private;
+
+ if (dl->dl_res)
+ dlm_lockres_put(dl->dl_res);
+ dlm_put(dl->dl_ctxt);
+ kfree(dl->dl_buf);
+ return seq_release_private(inode, file);
+}
+
+static const struct file_operations debug_lockres_fops = {
+ .open = debug_lockres_open,
+ .release = debug_lockres_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+/* end - debug lockres funcs */
+
+/* begin - debug state funcs */
+static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
+{
+ int out = 0;
+ struct dlm_reco_node_data *node;
+ char *state;
+ int cur_mles = 0, tot_mles = 0;
+ int i;
+
+ spin_lock(&dlm->spinlock);
+
+ switch (dlm->dlm_state) {
+ case DLM_CTXT_NEW:
+ state = "NEW"; break;
+ case DLM_CTXT_JOINED:
+ state = "JOINED"; break;
+ case DLM_CTXT_IN_SHUTDOWN:
+ state = "SHUTDOWN"; break;
+ case DLM_CTXT_LEAVING:
+ state = "LEAVING"; break;
+ default:
+ state = "UNKNOWN"; break;
+ }
+
+ /* Domain: xxxxxxxxxx Key: 0xdfbac769 */
+ out += scnprintf(buf + out, len - out,
+ "Domain: %s Key: 0x%08x Protocol: %d.%d\n",
+ dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor);
+
+ /* Thread Pid: xxx Node: xxx State: xxxxx */
+ out += scnprintf(buf + out, len - out,
+ "Thread Pid: %d Node: %d State: %s\n",
+ task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
+
+ /* Number of Joins: xxx Joining Node: xxx */
+ out += scnprintf(buf + out, len - out,
+ "Number of Joins: %d Joining Node: %d\n",
+ dlm->num_joins, dlm->joining_node);
+
+ /* Domain Map: xx xx xx */
+ out += scnprintf(buf + out, len - out, "Domain Map: ");
+ out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ /* Exit Domain Map: xx xx xx */
+ out += scnprintf(buf + out, len - out, "Exit Domain Map: ");
+ out += stringify_nodemap(dlm->exit_domain_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ /* Live Map: xx xx xx */
+ out += scnprintf(buf + out, len - out, "Live Map: ");
+ out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ /* Lock Resources: xxx (xxx) */
+ out += scnprintf(buf + out, len - out,
+ "Lock Resources: %d (%d)\n",
+ atomic_read(&dlm->res_cur_count),
+ atomic_read(&dlm->res_tot_count));
+
+ for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
+ tot_mles += atomic_read(&dlm->mle_tot_count[i]);
+
+ for (i = 0; i < DLM_MLE_NUM_TYPES; ++i)
+ cur_mles += atomic_read(&dlm->mle_cur_count[i]);
+
+ /* MLEs: xxx (xxx) */
+ out += scnprintf(buf + out, len - out,
+ "MLEs: %d (%d)\n", cur_mles, tot_mles);
+
+ /* Blocking: xxx (xxx) */
+ out += scnprintf(buf + out, len - out,
+ " Blocking: %d (%d)\n",
+ atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
+ atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
+
+ /* Mastery: xxx (xxx) */
+ out += scnprintf(buf + out, len - out,
+ " Mastery: %d (%d)\n",
+ atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
+ atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
+
+ /* Migration: xxx (xxx) */
+ out += scnprintf(buf + out, len - out,
+ " Migration: %d (%d)\n",
+ atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
+ atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
+
+ /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */
+ out += scnprintf(buf + out, len - out,
+ "Lists: Dirty=%s Purge=%s PendingASTs=%s "
+ "PendingBASTs=%s\n",
+ (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
+ (list_empty(&dlm->purge_list) ? "Empty" : "InUse"),
+ (list_empty(&dlm->pending_asts) ? "Empty" : "InUse"),
+ (list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
+
+ /* Purge Count: xxx Refs: xxx */
+ out += scnprintf(buf + out, len - out,
+ "Purge Count: %d Refs: %d\n", dlm->purge_count,
+ kref_read(&dlm->dlm_refs));
+
+ /* Dead Node: xxx */
+ out += scnprintf(buf + out, len - out,
+ "Dead Node: %d\n", dlm->reco.dead_node);
+
+ /* What about DLM_RECO_STATE_FINALIZE? */
+ if (dlm->reco.state == DLM_RECO_STATE_ACTIVE)
+ state = "ACTIVE";
+ else
+ state = "INACTIVE";
+
+ /* Recovery Pid: xxxx Master: xxx State: xxxx */
+ out += scnprintf(buf + out, len - out,
+ "Recovery Pid: %d Master: %d State: %s\n",
+ task_pid_nr(dlm->dlm_reco_thread_task),
+ dlm->reco.new_master, state);
+
+ /* Recovery Map: xx xx */
+ out += scnprintf(buf + out, len - out, "Recovery Map: ");
+ out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
+ buf + out, len - out);
+ out += scnprintf(buf + out, len - out, "\n");
+
+ /* Recovery Node State: */
+ out += scnprintf(buf + out, len - out, "Recovery Node State:\n");
+ list_for_each_entry(node, &dlm->reco.node_data, list) {
+ switch (node->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ state = "INIT";
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ state = "REQUESTING";
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ state = "DEAD";
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ state = "RECEIVING";
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ state = "REQUESTED";
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ state = "DONE";
+ break;
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ state = "FINALIZE-SENT";
+ break;
+ default:
+ state = "BAD";
+ break;
+ }
+ out += scnprintf(buf + out, len - out, "\t%u - %s\n",
+ node->node_num, state);
+ }
+
+ spin_unlock(&dlm->spinlock);
+
+ return out;
+}
+
+static int debug_state_open(struct inode *inode, struct file *file)
+{
+ struct dlm_ctxt *dlm = inode->i_private;
+ char *buf = NULL;
+
+ buf = (char *) get_zeroed_page(GFP_NOFS);
+ if (!buf)
+ goto bail;
+
+ i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
+
+ file->private_data = buf;
+
+ return 0;
+bail:
+ return -ENOMEM;
+}
+
+static const struct file_operations debug_state_fops = {
+ .open = debug_state_open,
+ .release = debug_release,
+ .read = debug_read,
+ .llseek = generic_file_llseek,
+};
+/* end - debug state funcs */
+
+/* files in subroot */
+void dlm_debug_init(struct dlm_ctxt *dlm)
+{
+ /* for dumping dlm_ctxt */
+ debugfs_create_file(DLM_DEBUGFS_DLM_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_state_fops);
+
+ /* for dumping lockres */
+ debugfs_create_file(DLM_DEBUGFS_LOCKING_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_lockres_fops);
+
+ /* for dumping mles */
+ debugfs_create_file(DLM_DEBUGFS_MLE_STATE, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm, &debug_mle_fops);
+
+ /* for dumping lockres on the purge list */
+ debugfs_create_file(DLM_DEBUGFS_PURGE_LIST, S_IFREG|S_IRUSR,
+ dlm->dlm_debugfs_subroot, dlm,
+ &debug_purgelist_fops);
+}
+
+/* subroot - domain dir */
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+ dlm->dlm_debugfs_subroot = debugfs_create_dir(dlm->name,
+ dlm_debugfs_root);
+}
+
+void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+ debugfs_remove_recursive(dlm->dlm_debugfs_subroot);
+}
+
+/* debugfs root */
+void dlm_create_debugfs_root(void)
+{
+ dlm_debugfs_root = debugfs_create_dir(DLM_DEBUGFS_DIR, NULL);
+}
+
+void dlm_destroy_debugfs_root(void)
+{
+ debugfs_remove(dlm_debugfs_root);
+}
+#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
new file mode 100644
index 0000000000..e08f7357e7
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * dlmdebug.h
+ *
+ * Copyright (C) 2008 Oracle. All rights reserved.
+ */
+
+#ifndef DLMDEBUG_H
+#define DLMDEBUG_H
+
+void dlm_print_one_mle(struct dlm_master_list_entry *mle);
+
+#ifdef CONFIG_DEBUG_FS
+
+struct debug_lockres {
+ int dl_len;
+ char *dl_buf;
+ struct dlm_ctxt *dl_ctxt;
+ struct dlm_lock_resource *dl_res;
+};
+
+void dlm_debug_init(struct dlm_ctxt *dlm);
+
+void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm);
+void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm);
+
+void dlm_create_debugfs_root(void);
+void dlm_destroy_debugfs_root(void);
+
+#else
+
+static inline void dlm_debug_init(struct dlm_ctxt *dlm)
+{
+}
+static inline void dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+}
+static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
+{
+}
+static inline void dlm_create_debugfs_root(void)
+{
+}
+static inline void dlm_destroy_debugfs_root(void)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+#endif /* DLMDEBUG_H */
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
new file mode 100644
index 0000000000..5c04dde999
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -0,0 +1,2362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmdomain.c
+ *
+ * defines domain join / leave apis
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+#include "dlmdebug.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
+#include "../cluster/masklog.h"
+
+/*
+ * ocfs2 node maps are array of long int, which limits to send them freely
+ * across the wire due to endianness issues. To workaround this, we convert
+ * long ints to byte arrays. Following 3 routines are helper functions to
+ * set/test/copy bits within those array of bytes
+ */
+static inline void byte_set_bit(u8 nr, u8 map[])
+{
+ map[nr >> 3] |= (1UL << (nr & 7));
+}
+
+static inline int byte_test_bit(u8 nr, u8 map[])
+{
+ return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
+}
+
+static inline void byte_copymap(u8 dmap[], unsigned long smap[],
+ unsigned int sz)
+{
+ unsigned int nn;
+
+ if (!sz)
+ return;
+
+ memset(dmap, 0, ((sz + 7) >> 3));
+ for (nn = 0 ; nn < sz; nn++)
+ if (test_bit(nn, smap))
+ byte_set_bit(nn, dmap);
+}
+
+static void dlm_free_pagevec(void **vec, int pages)
+{
+ while (pages--)
+ free_page((unsigned long)vec[pages]);
+ kfree(vec);
+}
+
+static void **dlm_alloc_pagevec(int pages)
+{
+ void **vec = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
+ int i;
+
+ if (!vec)
+ return NULL;
+
+ for (i = 0; i < pages; i++)
+ if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
+ goto out_free;
+
+ mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
+ pages, (unsigned long)DLM_HASH_PAGES,
+ (unsigned long)DLM_BUCKETS_PER_PAGE);
+ return vec;
+out_free:
+ dlm_free_pagevec(vec, i);
+ return NULL;
+}
+
+/*
+ *
+ * spinlock lock ordering: if multiple locks are needed, obey this ordering:
+ * dlm_domain_lock
+ * struct dlm_ctxt->spinlock
+ * struct dlm_lock_resource->spinlock
+ * struct dlm_ctxt->master_lock
+ * struct dlm_ctxt->ast_lock
+ * dlm_master_list_entry->spinlock
+ * dlm_lock->spinlock
+ *
+ */
+
+DEFINE_SPINLOCK(dlm_domain_lock);
+LIST_HEAD(dlm_domains);
+static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
+
+/*
+ * The supported protocol version for DLM communication. Running domains
+ * will have a negotiated version with the same major number and a minor
+ * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should
+ * be used to determine what a running domain is actually using.
+ *
+ * New in version 1.1:
+ * - Message DLM_QUERY_REGION added to support global heartbeat
+ * - Message DLM_QUERY_NODEINFO added to allow online node removes
+ * New in version 1.2:
+ * - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain
+ * New in version 1.3:
+ * - Message DLM_DEREF_LOCKRES_DONE added to inform non-master that the
+ * refmap is cleared
+ */
+static const struct dlm_protocol_version dlm_protocol = {
+ .pv_major = 1,
+ .pv_minor = 3,
+};
+
+#define DLM_DOMAIN_BACKOFF_MS 200
+
+static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
+ void *data, void **ret_data);
+static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data);
+static int dlm_protocol_compare(struct dlm_protocol_version *existing,
+ struct dlm_protocol_version *request);
+
+static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
+
+void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ if (hlist_unhashed(&res->hash_node))
+ return;
+
+ mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ hlist_del_init(&res->hash_node);
+ dlm_lockres_put(res);
+}
+
+void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ struct hlist_head *bucket;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ bucket = dlm_lockres_hash(dlm, res->lockname.hash);
+
+ /* get a reference for our hashtable */
+ dlm_lockres_get(res);
+
+ hlist_add_head(&res->hash_node, bucket);
+
+ mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+}
+
+struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len,
+ unsigned int hash)
+{
+ struct hlist_head *bucket;
+ struct dlm_lock_resource *res;
+
+ mlog(0, "%.*s\n", len, name);
+
+ assert_spin_locked(&dlm->spinlock);
+
+ bucket = dlm_lockres_hash(dlm, hash);
+
+ hlist_for_each_entry(res, bucket, hash_node) {
+ if (res->lockname.name[0] != name[0])
+ continue;
+ if (unlikely(res->lockname.len != len))
+ continue;
+ if (memcmp(res->lockname.name + 1, name + 1, len - 1))
+ continue;
+ dlm_lockres_get(res);
+ return res;
+ }
+ return NULL;
+}
+
+/* intended to be called by functions which do not care about lock
+ * resources which are being purged (most net _handler functions).
+ * this will return NULL for any lock resource which is found but
+ * currently in the process of dropping its mastery reference.
+ * use __dlm_lookup_lockres_full when you need the lock resource
+ * regardless (e.g. dlm_get_lock_resource) */
+struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len,
+ unsigned int hash)
+{
+ struct dlm_lock_resource *res = NULL;
+
+ mlog(0, "%.*s\n", len, name);
+
+ assert_spin_locked(&dlm->spinlock);
+
+ res = __dlm_lookup_lockres_full(dlm, name, len, hash);
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ return NULL;
+ }
+ spin_unlock(&res->spinlock);
+ }
+
+ return res;
+}
+
+struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len)
+{
+ struct dlm_lock_resource *res;
+ unsigned int hash = dlm_lockid_hash(name, len);
+
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, len, hash);
+ spin_unlock(&dlm->spinlock);
+ return res;
+}
+
+static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
+{
+ struct dlm_ctxt *tmp;
+
+ assert_spin_locked(&dlm_domain_lock);
+
+ /* tmp->name here is always NULL terminated,
+ * but domain may not be! */
+ list_for_each_entry(tmp, &dlm_domains, list) {
+ if (strlen(tmp->name) == len &&
+ memcmp(tmp->name, domain, len)==0)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+/* For null terminated domain strings ONLY */
+static struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
+{
+ assert_spin_locked(&dlm_domain_lock);
+
+ return __dlm_lookup_domain_full(domain, strlen(domain));
+}
+
+
+/* returns true on one of two conditions:
+ * 1) the domain does not exist
+ * 2) the domain exists and it's state is "joined" */
+static int dlm_wait_on_domain_helper(const char *domain)
+{
+ int ret = 0;
+ struct dlm_ctxt *tmp = NULL;
+
+ spin_lock(&dlm_domain_lock);
+
+ tmp = __dlm_lookup_domain(domain);
+ if (!tmp)
+ ret = 1;
+ else if (tmp->dlm_state == DLM_CTXT_JOINED)
+ ret = 1;
+
+ spin_unlock(&dlm_domain_lock);
+ return ret;
+}
+
+static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
+{
+ dlm_destroy_debugfs_subroot(dlm);
+
+ if (dlm->lockres_hash)
+ dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
+
+ if (dlm->master_hash)
+ dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
+
+ kfree(dlm->name);
+ kfree(dlm);
+}
+
+/* A little strange - this function will be called while holding
+ * dlm_domain_lock and is expected to be holding it on the way out. We
+ * will however drop and reacquire it multiple times */
+static void dlm_ctxt_release(struct kref *kref)
+{
+ struct dlm_ctxt *dlm;
+
+ dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
+
+ BUG_ON(dlm->num_joins);
+ BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
+
+ /* we may still be in the list if we hit an error during join. */
+ list_del_init(&dlm->list);
+
+ spin_unlock(&dlm_domain_lock);
+
+ mlog(0, "freeing memory from domain %s\n", dlm->name);
+
+ wake_up(&dlm_domain_events);
+
+ dlm_free_ctxt_mem(dlm);
+
+ spin_lock(&dlm_domain_lock);
+}
+
+void dlm_put(struct dlm_ctxt *dlm)
+{
+ spin_lock(&dlm_domain_lock);
+ kref_put(&dlm->dlm_refs, dlm_ctxt_release);
+ spin_unlock(&dlm_domain_lock);
+}
+
+static void __dlm_get(struct dlm_ctxt *dlm)
+{
+ kref_get(&dlm->dlm_refs);
+}
+
+/* given a questionable reference to a dlm object, gets a reference if
+ * it can find it in the list, otherwise returns NULL in which case
+ * you shouldn't trust your pointer. */
+struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
+{
+ struct dlm_ctxt *target;
+ struct dlm_ctxt *ret = NULL;
+
+ spin_lock(&dlm_domain_lock);
+
+ list_for_each_entry(target, &dlm_domains, list) {
+ if (target == dlm) {
+ __dlm_get(target);
+ ret = target;
+ break;
+ }
+ }
+
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
+{
+ int ret;
+
+ spin_lock(&dlm_domain_lock);
+ ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
+ (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
+{
+ if (dlm->dlm_worker) {
+ destroy_workqueue(dlm->dlm_worker);
+ dlm->dlm_worker = NULL;
+ }
+}
+
+static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
+{
+ dlm_unregister_domain_handlers(dlm);
+ dlm_complete_thread(dlm);
+ dlm_complete_recovery_thread(dlm);
+ dlm_destroy_dlm_worker(dlm);
+
+ /* We've left the domain. Now we can take ourselves out of the
+ * list and allow the kref stuff to help us free the
+ * memory. */
+ spin_lock(&dlm_domain_lock);
+ list_del_init(&dlm->list);
+ spin_unlock(&dlm_domain_lock);
+
+ /* Wake up anyone waiting for us to remove this domain */
+ wake_up(&dlm_domain_events);
+}
+
+static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
+{
+ int i, num, n, ret = 0;
+ struct dlm_lock_resource *res;
+ struct hlist_node *iter;
+ struct hlist_head *bucket;
+ int dropped;
+
+ mlog(0, "Migrating locks from domain %s\n", dlm->name);
+
+ num = 0;
+ spin_lock(&dlm->spinlock);
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+redo_bucket:
+ n = 0;
+ bucket = dlm_lockres_hash(dlm, i);
+ iter = bucket->first;
+ while (iter) {
+ n++;
+ res = hlist_entry(iter, struct dlm_lock_resource,
+ hash_node);
+ dlm_lockres_get(res);
+ /* migrate, if necessary. this will drop the dlm
+ * spinlock and retake it if it does migration. */
+ dropped = dlm_empty_lockres(dlm, res);
+
+ spin_lock(&res->spinlock);
+ if (dropped)
+ __dlm_lockres_calc_usage(dlm, res);
+ else
+ iter = res->hash_node.next;
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_put(res);
+
+ if (dropped) {
+ cond_resched_lock(&dlm->spinlock);
+ goto redo_bucket;
+ }
+ }
+ cond_resched_lock(&dlm->spinlock);
+ num += n;
+ }
+
+ if (!num) {
+ if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
+ mlog(0, "%s: perhaps there are more lock resources "
+ "need to be migrated after dlm recovery\n", dlm->name);
+ ret = -EAGAIN;
+ } else {
+ mlog(0, "%s: we won't do dlm recovery after migrating "
+ "all lock resources\n", dlm->name);
+ dlm->migrate_done = 1;
+ }
+ }
+
+ spin_unlock(&dlm->spinlock);
+ wake_up(&dlm->dlm_thread_wq);
+
+ /* let the dlm thread take care of purging, keep scanning until
+ * nothing remains in the hash */
+ if (num) {
+ mlog(0, "%s: %d lock resources in hash last pass\n",
+ dlm->name, num);
+ ret = -EAGAIN;
+ }
+ mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
+ return ret;
+}
+
+static int dlm_no_joining_node(struct dlm_ctxt *dlm)
+{
+ int ret;
+
+ spin_lock(&dlm->spinlock);
+ ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
+ spin_unlock(&dlm->spinlock);
+
+ return ret;
+}
+
+static int dlm_begin_exit_domain_handler(struct o2net_msg *msg, u32 len,
+ void *data, void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ unsigned int node;
+ struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ node = exit_msg->node_idx;
+ mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
+
+ spin_lock(&dlm->spinlock);
+ set_bit(node, dlm->exit_domain_map);
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+
+ return 0;
+}
+
+static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
+{
+ /* Yikes, a double spinlock! I need domain_lock for the dlm
+ * state and the dlm spinlock for join state... Sorry! */
+again:
+ spin_lock(&dlm_domain_lock);
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "Node %d is joining, we wait on it.\n",
+ dlm->joining_node);
+ spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm_domain_lock);
+
+ wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
+ goto again;
+ }
+
+ dlm->dlm_state = DLM_CTXT_LEAVING;
+ spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm_domain_lock);
+}
+
+static void __dlm_print_nodes(struct dlm_ctxt *dlm)
+{
+ int node = -1, num = 0;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ printk("( ");
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ printk("%d ", node);
+ ++num;
+ }
+ printk(") %u nodes\n", num);
+}
+
+static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ unsigned int node;
+ struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
+
+ mlog(0, "%p %u %p", msg, len, data);
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ node = exit_msg->node_idx;
+
+ spin_lock(&dlm->spinlock);
+ clear_bit(node, dlm->domain_map);
+ clear_bit(node, dlm->exit_domain_map);
+ printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
+ __dlm_print_nodes(dlm);
+
+ /* notify anything attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, node, 0);
+
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+
+ return 0;
+}
+
+static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type,
+ unsigned int node)
+{
+ int status;
+ struct dlm_exit_domain leave_msg;
+
+ mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name,
+ msg_type, node);
+
+ memset(&leave_msg, 0, sizeof(leave_msg));
+ leave_msg.node_idx = dlm->node_num;
+
+ status = o2net_send_message(msg_type, dlm->key, &leave_msg,
+ sizeof(leave_msg), node, NULL);
+ if (status < 0)
+ mlog(ML_ERROR, "Error %d sending domain exit message %u "
+ "to node %u on domain %s\n", status, msg_type, node,
+ dlm->name);
+
+ return status;
+}
+
+static void dlm_begin_exit_domain(struct dlm_ctxt *dlm)
+{
+ int node = -1;
+
+ /* Support for begin exit domain was added in 1.2 */
+ if (dlm->dlm_locking_proto.pv_major == 1 &&
+ dlm->dlm_locking_proto.pv_minor < 2)
+ return;
+
+ /*
+ * Unlike DLM_EXIT_DOMAIN_MSG, DLM_BEGIN_EXIT_DOMAIN_MSG is purely
+ * informational. Meaning if a node does not receive the message,
+ * so be it.
+ */
+ spin_lock(&dlm->spinlock);
+ while (1) {
+ node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
+ if (node >= O2NM_MAX_NODES)
+ break;
+ if (node == dlm->node_num)
+ continue;
+
+ spin_unlock(&dlm->spinlock);
+ dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
+ spin_lock(&dlm->spinlock);
+ }
+ spin_unlock(&dlm->spinlock);
+}
+
+static void dlm_leave_domain(struct dlm_ctxt *dlm)
+{
+ int node, clear_node, status;
+
+ /* At this point we've migrated away all our locks and won't
+ * accept mastership of new ones. The dlm is responsible for
+ * almost nothing now. We make sure not to confuse any joining
+ * nodes and then commence shutdown procedure. */
+
+ spin_lock(&dlm->spinlock);
+ /* Clear ourselves from the domain map */
+ clear_bit(dlm->node_num, dlm->domain_map);
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
+ 0)) < O2NM_MAX_NODES) {
+ /* Drop the dlm spinlock. This is safe wrt the domain_map.
+ * -nodes cannot be added now as the
+ * query_join_handlers knows to respond with OK_NO_MAP
+ * -we catch the right network errors if a node is
+ * removed from the map while we're sending him the
+ * exit message. */
+ spin_unlock(&dlm->spinlock);
+
+ clear_node = 1;
+
+ status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG,
+ node);
+ if (status < 0 &&
+ status != -ENOPROTOOPT &&
+ status != -ENOTCONN) {
+ mlog(ML_NOTICE, "Error %d sending domain exit message "
+ "to node %d\n", status, node);
+
+ /* Not sure what to do here but lets sleep for
+ * a bit in case this was a transient
+ * error... */
+ msleep(DLM_DOMAIN_BACKOFF_MS);
+ clear_node = 0;
+ }
+
+ spin_lock(&dlm->spinlock);
+ /* If we're not clearing the node bit then we intend
+ * to loop back around to try again. */
+ if (clear_node)
+ clear_bit(node, dlm->domain_map);
+ }
+ spin_unlock(&dlm->spinlock);
+}
+
+void dlm_unregister_domain(struct dlm_ctxt *dlm)
+{
+ int leave = 0;
+ struct dlm_lock_resource *res;
+
+ spin_lock(&dlm_domain_lock);
+ BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
+ BUG_ON(!dlm->num_joins);
+
+ dlm->num_joins--;
+ if (!dlm->num_joins) {
+ /* We mark it "in shutdown" now so new register
+ * requests wait until we've completely left the
+ * domain. Don't use DLM_CTXT_LEAVING yet as we still
+ * want new domain joins to communicate with us at
+ * least until we've completed migration of our
+ * resources. */
+ dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
+ leave = 1;
+ }
+ spin_unlock(&dlm_domain_lock);
+
+ if (leave) {
+ mlog(0, "shutting down domain %s\n", dlm->name);
+ dlm_begin_exit_domain(dlm);
+
+ /* We changed dlm state, notify the thread */
+ dlm_kick_thread(dlm, NULL);
+
+ while (dlm_migrate_all_locks(dlm)) {
+ /* Give dlm_thread time to purge the lockres' */
+ msleep(500);
+ mlog(0, "%s: more migration to do\n", dlm->name);
+ }
+
+ /* This list should be empty. If not, print remaining lockres */
+ if (!list_empty(&dlm->tracking_list)) {
+ mlog(ML_ERROR, "Following lockres' are still on the "
+ "tracking list:\n");
+ list_for_each_entry(res, &dlm->tracking_list, tracking)
+ dlm_print_one_lock_resource(res);
+ }
+
+ dlm_mark_domain_leaving(dlm);
+ dlm_leave_domain(dlm);
+ printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
+ dlm_force_free_mles(dlm);
+ dlm_complete_dlm_shutdown(dlm);
+ }
+ dlm_put(dlm);
+}
+EXPORT_SYMBOL_GPL(dlm_unregister_domain);
+
+static int dlm_query_join_proto_check(char *proto_type, int node,
+ struct dlm_protocol_version *ours,
+ struct dlm_protocol_version *request)
+{
+ int rc;
+ struct dlm_protocol_version proto = *request;
+
+ if (!dlm_protocol_compare(ours, &proto)) {
+ mlog(0,
+ "node %u wanted to join with %s locking protocol "
+ "%u.%u, we respond with %u.%u\n",
+ node, proto_type,
+ request->pv_major,
+ request->pv_minor,
+ proto.pv_major, proto.pv_minor);
+ request->pv_minor = proto.pv_minor;
+ rc = 0;
+ } else {
+ mlog(ML_NOTICE,
+ "Node %u wanted to join with %s locking "
+ "protocol %u.%u, but we have %u.%u, disallowing\n",
+ node, proto_type,
+ request->pv_major,
+ request->pv_minor,
+ ours->pv_major,
+ ours->pv_minor);
+ rc = 1;
+ }
+
+ return rc;
+}
+
+/*
+ * struct dlm_query_join_packet is made up of four one-byte fields. They
+ * are effectively in big-endian order already. However, little-endian
+ * machines swap them before putting the packet on the wire (because
+ * query_join's response is a status, and that status is treated as a u32
+ * on the wire). Thus, a big-endian and little-endian machines will treat
+ * this structure differently.
+ *
+ * The solution is to have little-endian machines swap the structure when
+ * converting from the structure to the u32 representation. This will
+ * result in the structure having the correct format on the wire no matter
+ * the host endian format.
+ */
+static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
+ u32 *wire)
+{
+ union dlm_query_join_response response;
+
+ response.packet = *packet;
+ *wire = be32_to_cpu(response.intval);
+}
+
+static void dlm_query_join_wire_to_packet(u32 wire,
+ struct dlm_query_join_packet *packet)
+{
+ union dlm_query_join_response response;
+
+ response.intval = cpu_to_be32(wire);
+ *packet = response.packet;
+}
+
+static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_query_join_request *query;
+ struct dlm_query_join_packet packet = {
+ .code = JOIN_DISALLOW,
+ };
+ struct dlm_ctxt *dlm = NULL;
+ u32 response;
+ u8 nodenum;
+
+ query = (struct dlm_query_join_request *) msg->buf;
+
+ mlog(0, "node %u wants to join domain %s\n", query->node_idx,
+ query->domain);
+
+ /*
+ * If heartbeat doesn't consider the node live, tell it
+ * to back off and try again. This gives heartbeat a chance
+ * to catch up.
+ */
+ if (!o2hb_check_node_heartbeating_no_sem(query->node_idx)) {
+ mlog(0, "node %u is not in our live map yet\n",
+ query->node_idx);
+
+ packet.code = JOIN_DISALLOW;
+ goto respond;
+ }
+
+ packet.code = JOIN_OK_NO_MAP;
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
+ if (!dlm)
+ goto unlock_respond;
+
+ /*
+ * There is a small window where the joining node may not see the
+ * node(s) that just left but still part of the cluster. DISALLOW
+ * join request if joining node has different node map.
+ */
+ nodenum=0;
+ while (nodenum < O2NM_MAX_NODES) {
+ if (test_bit(nodenum, dlm->domain_map)) {
+ if (!byte_test_bit(nodenum, query->node_map)) {
+ mlog(0, "disallow join as node %u does not "
+ "have node %u in its nodemap\n",
+ query->node_idx, nodenum);
+ packet.code = JOIN_DISALLOW;
+ goto unlock_respond;
+ }
+ }
+ nodenum++;
+ }
+
+ /* Once the dlm ctxt is marked as leaving then we don't want
+ * to be put in someone's domain map.
+ * Also, explicitly disallow joining at certain troublesome
+ * times (ie. during recovery). */
+ if (dlm->dlm_state != DLM_CTXT_LEAVING) {
+ int bit = query->node_idx;
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->dlm_state == DLM_CTXT_NEW &&
+ dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /*If this is a brand new context and we
+ * haven't started our join process yet, then
+ * the other node won the race. */
+ packet.code = JOIN_OK_NO_MAP;
+ } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* Disallow parallel joins. */
+ packet.code = JOIN_DISALLOW;
+ } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
+ mlog(0, "node %u trying to join, but recovery "
+ "is ongoing.\n", bit);
+ packet.code = JOIN_DISALLOW;
+ } else if (test_bit(bit, dlm->recovery_map)) {
+ mlog(0, "node %u trying to join, but it "
+ "still needs recovery.\n", bit);
+ packet.code = JOIN_DISALLOW;
+ } else if (test_bit(bit, dlm->domain_map)) {
+ mlog(0, "node %u trying to join, but it "
+ "is still in the domain! needs recovery?\n",
+ bit);
+ packet.code = JOIN_DISALLOW;
+ } else {
+ /* Alright we're fully a part of this domain
+ * so we keep some state as to who's joining
+ * and indicate to him that needs to be fixed
+ * up. */
+
+ /* Make sure we speak compatible locking protocols. */
+ if (dlm_query_join_proto_check("DLM", bit,
+ &dlm->dlm_locking_proto,
+ &query->dlm_proto)) {
+ packet.code = JOIN_PROTOCOL_MISMATCH;
+ } else if (dlm_query_join_proto_check("fs", bit,
+ &dlm->fs_locking_proto,
+ &query->fs_proto)) {
+ packet.code = JOIN_PROTOCOL_MISMATCH;
+ } else {
+ packet.dlm_minor = query->dlm_proto.pv_minor;
+ packet.fs_minor = query->fs_proto.pv_minor;
+ packet.code = JOIN_OK;
+ __dlm_set_joining_node(dlm, query->node_idx);
+ }
+ }
+
+ spin_unlock(&dlm->spinlock);
+ }
+unlock_respond:
+ spin_unlock(&dlm_domain_lock);
+
+respond:
+ mlog(0, "We respond with %u\n", packet.code);
+
+ dlm_query_join_packet_to_wire(&packet, &response);
+ return response;
+}
+
+static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_assert_joined *assert;
+ struct dlm_ctxt *dlm = NULL;
+
+ assert = (struct dlm_assert_joined *) msg->buf;
+
+ mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
+ assert->domain);
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
+ /* XXX should we consider no dlm ctxt an error? */
+ if (dlm) {
+ spin_lock(&dlm->spinlock);
+
+ /* Alright, this node has officially joined our
+ * domain. Set him in the map and clean up our
+ * leftover join state. */
+ BUG_ON(dlm->joining_node != assert->node_idx);
+
+ if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
+ mlog(0, "dlm recovery is ongoing, disallow join\n");
+ spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm_domain_lock);
+ return -EAGAIN;
+ }
+
+ set_bit(assert->node_idx, dlm->domain_map);
+ clear_bit(assert->node_idx, dlm->exit_domain_map);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+
+ printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
+ assert->node_idx, dlm->name);
+ __dlm_print_nodes(dlm);
+
+ /* notify anything attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
+
+ spin_unlock(&dlm->spinlock);
+ }
+ spin_unlock(&dlm_domain_lock);
+
+ return 0;
+}
+
+static int dlm_match_regions(struct dlm_ctxt *dlm,
+ struct dlm_query_region *qr,
+ char *local, int locallen)
+{
+ char *remote = qr->qr_regions;
+ char *l, *r;
+ int localnr, i, j, foundit;
+ int status = 0;
+
+ if (!o2hb_global_heartbeat_active()) {
+ if (qr->qr_numregions) {
+ mlog(ML_ERROR, "Domain %s: Joining node %d has global "
+ "heartbeat enabled but local node %d does not\n",
+ qr->qr_domain, qr->qr_node, dlm->node_num);
+ status = -EINVAL;
+ }
+ goto bail;
+ }
+
+ if (o2hb_global_heartbeat_active() && !qr->qr_numregions) {
+ mlog(ML_ERROR, "Domain %s: Local node %d has global "
+ "heartbeat enabled but joining node %d does not\n",
+ qr->qr_domain, dlm->node_num, qr->qr_node);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ r = remote;
+ for (i = 0; i < qr->qr_numregions; ++i) {
+ mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, r);
+ r += O2HB_MAX_REGION_NAME_LEN;
+ }
+
+ localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
+ localnr = o2hb_get_all_regions(local, (u8)localnr);
+
+ /* compare local regions with remote */
+ l = local;
+ for (i = 0; i < localnr; ++i) {
+ foundit = 0;
+ r = remote;
+ for (j = 0; j <= qr->qr_numregions; ++j) {
+ if (!memcmp(l, r, O2HB_MAX_REGION_NAME_LEN)) {
+ foundit = 1;
+ break;
+ }
+ r += O2HB_MAX_REGION_NAME_LEN;
+ }
+ if (!foundit) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "Domain %s: Region '%.*s' registered "
+ "in local node %d but not in joining node %d\n",
+ qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, l,
+ dlm->node_num, qr->qr_node);
+ goto bail;
+ }
+ l += O2HB_MAX_REGION_NAME_LEN;
+ }
+
+ /* compare remote with local regions */
+ r = remote;
+ for (i = 0; i < qr->qr_numregions; ++i) {
+ foundit = 0;
+ l = local;
+ for (j = 0; j < localnr; ++j) {
+ if (!memcmp(r, l, O2HB_MAX_REGION_NAME_LEN)) {
+ foundit = 1;
+ break;
+ }
+ l += O2HB_MAX_REGION_NAME_LEN;
+ }
+ if (!foundit) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "Domain %s: Region '%.*s' registered "
+ "in joining node %d but not in local node %d\n",
+ qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, r,
+ qr->qr_node, dlm->node_num);
+ goto bail;
+ }
+ r += O2HB_MAX_REGION_NAME_LEN;
+ }
+
+bail:
+ return status;
+}
+
+static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
+{
+ struct dlm_query_region *qr = NULL;
+ int status, ret = 0, i;
+ char *p;
+
+ if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
+ goto bail;
+
+ qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL);
+ if (!qr) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ qr->qr_node = dlm->node_num;
+ qr->qr_namelen = strlen(dlm->name);
+ memcpy(qr->qr_domain, dlm->name, qr->qr_namelen);
+ /* if local hb, the numregions will be zero */
+ if (o2hb_global_heartbeat_active())
+ qr->qr_numregions = o2hb_get_all_regions(qr->qr_regions,
+ O2NM_MAX_REGIONS);
+
+ p = qr->qr_regions;
+ for (i = 0; i < qr->qr_numregions; ++i, p += O2HB_MAX_REGION_NAME_LEN)
+ mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, p);
+
+ i = -1;
+ while ((i = find_next_bit(node_map, O2NM_MAX_NODES,
+ i + 1)) < O2NM_MAX_NODES) {
+ if (i == dlm->node_num)
+ continue;
+
+ mlog(0, "Sending regions to node %d\n", i);
+
+ ret = o2net_send_message(DLM_QUERY_REGION, DLM_MOD_KEY, qr,
+ sizeof(struct dlm_query_region),
+ i, &status);
+ if (ret >= 0)
+ ret = status;
+ if (ret) {
+ mlog(ML_ERROR, "Region mismatch %d, node %d\n",
+ ret, i);
+ break;
+ }
+ }
+
+bail:
+ kfree(qr);
+ return ret;
+}
+
+static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
+ void *data, void **ret_data)
+{
+ struct dlm_query_region *qr;
+ struct dlm_ctxt *dlm = NULL;
+ char *local = NULL;
+ int status = 0;
+
+ qr = (struct dlm_query_region *) msg->buf;
+
+ mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
+ qr->qr_domain);
+
+ /* buffer used in dlm_mast_regions() */
+ local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ status = -EINVAL;
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(qr->qr_domain, qr->qr_namelen);
+ if (!dlm) {
+ mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
+ "before join domain\n", qr->qr_node, qr->qr_domain);
+ goto out_domain_lock;
+ }
+
+ spin_lock(&dlm->spinlock);
+ if (dlm->joining_node != qr->qr_node) {
+ mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
+ "but joining node is %d\n", qr->qr_node, qr->qr_domain,
+ dlm->joining_node);
+ goto out_dlm_lock;
+ }
+
+ /* Support for global heartbeat was added in 1.1 */
+ if (dlm->dlm_locking_proto.pv_major == 1 &&
+ dlm->dlm_locking_proto.pv_minor == 0) {
+ mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
+ "but active dlm protocol is %d.%d\n", qr->qr_node,
+ qr->qr_domain, dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor);
+ goto out_dlm_lock;
+ }
+
+ status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
+
+out_dlm_lock:
+ spin_unlock(&dlm->spinlock);
+
+out_domain_lock:
+ spin_unlock(&dlm_domain_lock);
+
+ kfree(local);
+
+ return status;
+}
+
+static int dlm_match_nodes(struct dlm_ctxt *dlm, struct dlm_query_nodeinfo *qn)
+{
+ struct o2nm_node *local;
+ struct dlm_node_info *remote;
+ int i, j;
+ int status = 0;
+
+ for (j = 0; j < qn->qn_numnodes; ++j)
+ mlog(0, "Node %3d, %pI4:%u\n", qn->qn_nodes[j].ni_nodenum,
+ &(qn->qn_nodes[j].ni_ipv4_address),
+ ntohs(qn->qn_nodes[j].ni_ipv4_port));
+
+ for (i = 0; i < O2NM_MAX_NODES && !status; ++i) {
+ local = o2nm_get_node_by_num(i);
+ remote = NULL;
+ for (j = 0; j < qn->qn_numnodes; ++j) {
+ if (qn->qn_nodes[j].ni_nodenum == i) {
+ remote = &(qn->qn_nodes[j]);
+ break;
+ }
+ }
+
+ if (!local && !remote)
+ continue;
+
+ if ((local && !remote) || (!local && remote))
+ status = -EINVAL;
+
+ if (!status &&
+ ((remote->ni_nodenum != local->nd_num) ||
+ (remote->ni_ipv4_port != local->nd_ipv4_port) ||
+ (remote->ni_ipv4_address != local->nd_ipv4_address)))
+ status = -EINVAL;
+
+ if (status) {
+ if (remote && !local)
+ mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) "
+ "registered in joining node %d but not in "
+ "local node %d\n", qn->qn_domain,
+ remote->ni_nodenum,
+ &(remote->ni_ipv4_address),
+ ntohs(remote->ni_ipv4_port),
+ qn->qn_nodenum, dlm->node_num);
+ if (local && !remote)
+ mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) "
+ "registered in local node %d but not in "
+ "joining node %d\n", qn->qn_domain,
+ local->nd_num, &(local->nd_ipv4_address),
+ ntohs(local->nd_ipv4_port),
+ dlm->node_num, qn->qn_nodenum);
+ BUG_ON((!local && !remote));
+ }
+
+ if (local)
+ o2nm_node_put(local);
+ }
+
+ return status;
+}
+
+static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
+{
+ struct dlm_query_nodeinfo *qn = NULL;
+ struct o2nm_node *node;
+ int ret = 0, status, count, i;
+
+ if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
+ goto bail;
+
+ qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);
+ if (!qn) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ for (i = 0, count = 0; i < O2NM_MAX_NODES; ++i) {
+ node = o2nm_get_node_by_num(i);
+ if (!node)
+ continue;
+ qn->qn_nodes[count].ni_nodenum = node->nd_num;
+ qn->qn_nodes[count].ni_ipv4_port = node->nd_ipv4_port;
+ qn->qn_nodes[count].ni_ipv4_address = node->nd_ipv4_address;
+ mlog(0, "Node %3d, %pI4:%u\n", node->nd_num,
+ &(node->nd_ipv4_address), ntohs(node->nd_ipv4_port));
+ ++count;
+ o2nm_node_put(node);
+ }
+
+ qn->qn_nodenum = dlm->node_num;
+ qn->qn_numnodes = count;
+ qn->qn_namelen = strlen(dlm->name);
+ memcpy(qn->qn_domain, dlm->name, qn->qn_namelen);
+
+ i = -1;
+ while ((i = find_next_bit(node_map, O2NM_MAX_NODES,
+ i + 1)) < O2NM_MAX_NODES) {
+ if (i == dlm->node_num)
+ continue;
+
+ mlog(0, "Sending nodeinfo to node %d\n", i);
+
+ ret = o2net_send_message(DLM_QUERY_NODEINFO, DLM_MOD_KEY,
+ qn, sizeof(struct dlm_query_nodeinfo),
+ i, &status);
+ if (ret >= 0)
+ ret = status;
+ if (ret) {
+ mlog(ML_ERROR, "node mismatch %d, node %d\n", ret, i);
+ break;
+ }
+ }
+
+bail:
+ kfree(qn);
+ return ret;
+}
+
+static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
+ void *data, void **ret_data)
+{
+ struct dlm_query_nodeinfo *qn;
+ struct dlm_ctxt *dlm = NULL;
+ int locked = 0, status = -EINVAL;
+
+ qn = (struct dlm_query_nodeinfo *) msg->buf;
+
+ mlog(0, "Node %u queries nodes on domain %s\n", qn->qn_nodenum,
+ qn->qn_domain);
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(qn->qn_domain, qn->qn_namelen);
+ if (!dlm) {
+ mlog(ML_ERROR, "Node %d queried nodes on domain %s before "
+ "join domain\n", qn->qn_nodenum, qn->qn_domain);
+ goto bail;
+ }
+
+ spin_lock(&dlm->spinlock);
+ locked = 1;
+ if (dlm->joining_node != qn->qn_nodenum) {
+ mlog(ML_ERROR, "Node %d queried nodes on domain %s but "
+ "joining node is %d\n", qn->qn_nodenum, qn->qn_domain,
+ dlm->joining_node);
+ goto bail;
+ }
+
+ /* Support for node query was added in 1.1 */
+ if (dlm->dlm_locking_proto.pv_major == 1 &&
+ dlm->dlm_locking_proto.pv_minor == 0) {
+ mlog(ML_ERROR, "Node %d queried nodes on domain %s "
+ "but active dlm protocol is %d.%d\n", qn->qn_nodenum,
+ qn->qn_domain, dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor);
+ goto bail;
+ }
+
+ status = dlm_match_nodes(dlm, qn);
+
+bail:
+ if (locked)
+ spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm_domain_lock);
+
+ return status;
+}
+
+static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_cancel_join *cancel;
+ struct dlm_ctxt *dlm = NULL;
+
+ cancel = (struct dlm_cancel_join *) msg->buf;
+
+ mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
+ cancel->domain);
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
+
+ if (dlm) {
+ spin_lock(&dlm->spinlock);
+
+ /* Yikes, this guy wants to cancel his join. No
+ * problem, we simply cleanup our join state. */
+ BUG_ON(dlm->joining_node != cancel->node_idx);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+
+ spin_unlock(&dlm->spinlock);
+ }
+ spin_unlock(&dlm_domain_lock);
+
+ return 0;
+}
+
+static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
+ unsigned int node)
+{
+ int status;
+ struct dlm_cancel_join cancel_msg;
+
+ memset(&cancel_msg, 0, sizeof(cancel_msg));
+ cancel_msg.node_idx = dlm->node_num;
+ cancel_msg.name_len = strlen(dlm->name);
+ memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
+
+ status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ &cancel_msg, sizeof(cancel_msg), node,
+ NULL);
+ if (status < 0) {
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ node);
+ goto bail;
+ }
+
+bail:
+ return status;
+}
+
+/* map_size should be in bytes. */
+static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
+ unsigned long *node_map,
+ unsigned int map_size)
+{
+ int status, tmpstat;
+ int node;
+
+ if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) *
+ sizeof(unsigned long))) {
+ mlog(ML_ERROR,
+ "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
+ map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES));
+ return -EINVAL;
+ }
+
+ status = 0;
+ node = -1;
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ if (node == dlm->node_num)
+ continue;
+
+ tmpstat = dlm_send_one_join_cancel(dlm, node);
+ if (tmpstat) {
+ mlog(ML_ERROR, "Error return %d cancelling join on "
+ "node %d\n", tmpstat, node);
+ if (!status)
+ status = tmpstat;
+ }
+ }
+
+ if (status)
+ mlog_errno(status);
+ return status;
+}
+
+static int dlm_request_join(struct dlm_ctxt *dlm,
+ int node,
+ enum dlm_query_join_response_code *response)
+{
+ int status;
+ struct dlm_query_join_request join_msg;
+ struct dlm_query_join_packet packet;
+ u32 join_resp;
+
+ mlog(0, "querying node %d\n", node);
+
+ memset(&join_msg, 0, sizeof(join_msg));
+ join_msg.node_idx = dlm->node_num;
+ join_msg.name_len = strlen(dlm->name);
+ memcpy(join_msg.domain, dlm->name, join_msg.name_len);
+ join_msg.dlm_proto = dlm->dlm_locking_proto;
+ join_msg.fs_proto = dlm->fs_locking_proto;
+
+ /* copy live node map to join message */
+ byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
+
+ status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
+ sizeof(join_msg), node, &join_resp);
+ if (status < 0 && status != -ENOPROTOOPT) {
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
+ node);
+ goto bail;
+ }
+ dlm_query_join_wire_to_packet(join_resp, &packet);
+
+ /* -ENOPROTOOPT from the net code means the other side isn't
+ listening for our message type -- that's fine, it means
+ his dlm isn't up, so we can consider him a 'yes' but not
+ joined into the domain. */
+ if (status == -ENOPROTOOPT) {
+ status = 0;
+ *response = JOIN_OK_NO_MAP;
+ } else {
+ *response = packet.code;
+ switch (packet.code) {
+ case JOIN_DISALLOW:
+ case JOIN_OK_NO_MAP:
+ break;
+ case JOIN_PROTOCOL_MISMATCH:
+ mlog(ML_NOTICE,
+ "This node requested DLM locking protocol %u.%u and "
+ "filesystem locking protocol %u.%u. At least one of "
+ "the protocol versions on node %d is not compatible, "
+ "disconnecting\n",
+ dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor,
+ dlm->fs_locking_proto.pv_major,
+ dlm->fs_locking_proto.pv_minor,
+ node);
+ status = -EPROTO;
+ break;
+ case JOIN_OK:
+ /* Use the same locking protocol as the remote node */
+ dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
+ dlm->fs_locking_proto.pv_minor = packet.fs_minor;
+ mlog(0,
+ "Node %d responds JOIN_OK with DLM locking protocol "
+ "%u.%u and fs locking protocol %u.%u\n",
+ node,
+ dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor,
+ dlm->fs_locking_proto.pv_major,
+ dlm->fs_locking_proto.pv_minor);
+ break;
+ default:
+ status = -EINVAL;
+ mlog(ML_ERROR, "invalid response %d from node %u\n",
+ packet.code, node);
+ /* Reset response to JOIN_DISALLOW */
+ *response = JOIN_DISALLOW;
+ break;
+ }
+ }
+
+ mlog(0, "status %d, node %d response is %d\n", status, node,
+ *response);
+
+bail:
+ return status;
+}
+
+static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
+ unsigned int node)
+{
+ int status;
+ int ret;
+ struct dlm_assert_joined assert_msg;
+
+ mlog(0, "Sending join assert to node %u\n", node);
+
+ memset(&assert_msg, 0, sizeof(assert_msg));
+ assert_msg.node_idx = dlm->node_num;
+ assert_msg.name_len = strlen(dlm->name);
+ memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
+
+ status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ &assert_msg, sizeof(assert_msg), node,
+ &ret);
+ if (status < 0)
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ node);
+ else
+ status = ret;
+
+ return status;
+}
+
+static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
+ unsigned long *node_map)
+{
+ int status, node, live;
+
+ status = 0;
+ node = -1;
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ if (node == dlm->node_num)
+ continue;
+
+ do {
+ /* It is very important that this message be
+ * received so we spin until either the node
+ * has died or it gets the message. */
+ status = dlm_send_one_join_assert(dlm, node);
+
+ spin_lock(&dlm->spinlock);
+ live = test_bit(node, dlm->live_nodes_map);
+ spin_unlock(&dlm->spinlock);
+
+ if (status) {
+ mlog(ML_ERROR, "Error return %d asserting "
+ "join on node %d\n", status, node);
+
+ /* give us some time between errors... */
+ if (live)
+ msleep(DLM_DOMAIN_BACKOFF_MS);
+ }
+ } while (status && live);
+ }
+}
+
+struct domain_join_ctxt {
+ unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+};
+
+static int dlm_should_restart_join(struct dlm_ctxt *dlm,
+ struct domain_join_ctxt *ctxt,
+ enum dlm_query_join_response_code response)
+{
+ int ret;
+
+ if (response == JOIN_DISALLOW) {
+ mlog(0, "Latest response of disallow -- should restart\n");
+ return 1;
+ }
+
+ spin_lock(&dlm->spinlock);
+ /* For now, we restart the process if the node maps have
+ * changed at all */
+ ret = !bitmap_equal(ctxt->live_map, dlm->live_nodes_map,
+ O2NM_MAX_NODES);
+ spin_unlock(&dlm->spinlock);
+
+ if (ret)
+ mlog(0, "Node maps changed -- should restart\n");
+
+ return ret;
+}
+
+static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
+{
+ int status = 0, tmpstat, node;
+ struct domain_join_ctxt *ctxt;
+ enum dlm_query_join_response_code response = JOIN_DISALLOW;
+
+ mlog(0, "%p", dlm);
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* group sem locking should work for us here -- we're already
+ * registered for heartbeat events so filling this should be
+ * atomic wrt getting those handlers called. */
+ o2hb_fill_node_map(dlm->live_nodes_map, O2NM_MAX_NODES);
+
+ spin_lock(&dlm->spinlock);
+ bitmap_copy(ctxt->live_map, dlm->live_nodes_map, O2NM_MAX_NODES);
+ __dlm_set_joining_node(dlm, dlm->node_num);
+ spin_unlock(&dlm->spinlock);
+
+ node = -1;
+ while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ if (node == dlm->node_num)
+ continue;
+
+ status = dlm_request_join(dlm, node, &response);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Ok, either we got a response or the node doesn't have a
+ * dlm up. */
+ if (response == JOIN_OK)
+ set_bit(node, ctxt->yes_resp_map);
+
+ if (dlm_should_restart_join(dlm, ctxt, response)) {
+ status = -EAGAIN;
+ goto bail;
+ }
+ }
+
+ mlog(0, "Yay, done querying nodes!\n");
+
+ /* Yay, everyone agree's we can join the domain. My domain is
+ * comprised of all nodes who were put in the
+ * yes_resp_map. Copy that into our domain map and send a join
+ * assert message to clean up everyone elses state. */
+ spin_lock(&dlm->spinlock);
+ bitmap_copy(dlm->domain_map, ctxt->yes_resp_map, O2NM_MAX_NODES);
+ set_bit(dlm->node_num, dlm->domain_map);
+ spin_unlock(&dlm->spinlock);
+
+ /* Support for global heartbeat and node info was added in 1.1 */
+ if (dlm->dlm_locking_proto.pv_major > 1 ||
+ dlm->dlm_locking_proto.pv_minor > 0) {
+ status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+ status = dlm_send_regions(dlm, ctxt->yes_resp_map);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
+
+ /* Joined state *must* be set before the joining node
+ * information, otherwise the query_join handler may read no
+ * current joiner but a state of NEW and tell joining nodes
+ * we're not in the domain. */
+ spin_lock(&dlm_domain_lock);
+ dlm->dlm_state = DLM_CTXT_JOINED;
+ dlm->num_joins++;
+ spin_unlock(&dlm_domain_lock);
+
+bail:
+ spin_lock(&dlm->spinlock);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+ if (!status) {
+ printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
+ __dlm_print_nodes(dlm);
+ }
+ spin_unlock(&dlm->spinlock);
+
+ if (ctxt) {
+ /* Do we need to send a cancel message to any nodes? */
+ if (status < 0) {
+ tmpstat = dlm_send_join_cancels(dlm,
+ ctxt->yes_resp_map,
+ sizeof(ctxt->yes_resp_map));
+ if (tmpstat < 0)
+ mlog_errno(tmpstat);
+ }
+ kfree(ctxt);
+ }
+
+ mlog(0, "returning %d\n", status);
+ return status;
+}
+
+static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
+{
+ o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
+ o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
+ o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
+}
+
+static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
+{
+ int status;
+
+ mlog(0, "registering handlers.\n");
+
+ o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
+ dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
+ o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
+ dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
+
+ status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
+ if (status)
+ goto bail;
+
+ status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
+ sizeof(struct dlm_master_request),
+ dlm_master_request_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
+ sizeof(struct dlm_assert_master),
+ dlm_assert_master_handler,
+ dlm, dlm_assert_master_post_handler,
+ &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
+ sizeof(struct dlm_create_lock),
+ dlm_create_lock_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
+ DLM_CONVERT_LOCK_MAX_LEN,
+ dlm_convert_lock_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
+ DLM_UNLOCK_LOCK_MAX_LEN,
+ dlm_unlock_lock_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
+ DLM_PROXY_AST_MAX_LEN,
+ dlm_proxy_ast_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
+ sizeof(struct dlm_exit_domain),
+ dlm_exit_domain_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
+ sizeof(struct dlm_deref_lockres),
+ dlm_deref_lockres_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
+ sizeof(struct dlm_migrate_request),
+ dlm_migrate_request_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
+ DLM_MIG_LOCKRES_MAX_LEN,
+ dlm_mig_lockres_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
+ sizeof(struct dlm_master_requery),
+ dlm_master_requery_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
+ sizeof(struct dlm_lock_request),
+ dlm_request_all_locks_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
+ sizeof(struct dlm_reco_data_done),
+ dlm_reco_data_done_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
+ sizeof(struct dlm_begin_reco),
+ dlm_begin_reco_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
+ sizeof(struct dlm_finalize_reco),
+ dlm_finalize_reco_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key,
+ sizeof(struct dlm_exit_domain),
+ dlm_begin_exit_domain_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_DEREF_LOCKRES_DONE, dlm->key,
+ sizeof(struct dlm_deref_lockres_done),
+ dlm_deref_lockres_done_handler,
+ dlm, NULL, &dlm->dlm_domain_handlers);
+bail:
+ if (status)
+ dlm_unregister_domain_handlers(dlm);
+
+ return status;
+}
+
+static int dlm_join_domain(struct dlm_ctxt *dlm)
+{
+ int status;
+ unsigned int backoff;
+ unsigned int total_backoff = 0;
+ char wq_name[O2NM_MAX_NAME_LEN];
+
+ BUG_ON(!dlm);
+
+ mlog(0, "Join domain %s\n", dlm->name);
+
+ status = dlm_register_domain_handlers(dlm);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = dlm_launch_thread(dlm);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = dlm_launch_recovery_thread(dlm);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ dlm_debug_init(dlm);
+
+ snprintf(wq_name, O2NM_MAX_NAME_LEN, "dlm_wq-%s", dlm->name);
+ dlm->dlm_worker = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 0);
+ if (!dlm->dlm_worker) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ do {
+ status = dlm_try_to_join_domain(dlm);
+
+ /* If we're racing another node to the join, then we
+ * need to back off temporarily and let them
+ * complete. */
+#define DLM_JOIN_TIMEOUT_MSECS 90000
+ if (status == -EAGAIN) {
+ if (signal_pending(current)) {
+ status = -ERESTARTSYS;
+ goto bail;
+ }
+
+ if (total_backoff > DLM_JOIN_TIMEOUT_MSECS) {
+ status = -ERESTARTSYS;
+ mlog(ML_NOTICE, "Timed out joining dlm domain "
+ "%s after %u msecs\n", dlm->name,
+ total_backoff);
+ goto bail;
+ }
+
+ /*
+ * <chip> After you!
+ * <dale> No, after you!
+ * <chip> I insist!
+ * <dale> But you first!
+ * ...
+ */
+ backoff = (unsigned int)(jiffies & 0x3);
+ backoff *= DLM_DOMAIN_BACKOFF_MS;
+ total_backoff += backoff;
+ mlog(0, "backoff %d\n", backoff);
+ msleep(backoff);
+ }
+ } while (status == -EAGAIN);
+
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ wake_up(&dlm_domain_events);
+
+ if (status) {
+ dlm_unregister_domain_handlers(dlm);
+ dlm_complete_thread(dlm);
+ dlm_complete_recovery_thread(dlm);
+ dlm_destroy_dlm_worker(dlm);
+ }
+
+ return status;
+}
+
+static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
+ u32 key)
+{
+ int i;
+ int ret;
+ struct dlm_ctxt *dlm = NULL;
+
+ dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
+ if (!dlm) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ dlm->name = kstrdup(domain, GFP_KERNEL);
+ if (dlm->name == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
+ if (!dlm->lockres_hash) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ for (i = 0; i < DLM_HASH_BUCKETS; i++)
+ INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
+
+ dlm->master_hash = (struct hlist_head **)
+ dlm_alloc_pagevec(DLM_HASH_PAGES);
+ if (!dlm->master_hash) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ for (i = 0; i < DLM_HASH_BUCKETS; i++)
+ INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
+
+ dlm->key = key;
+ dlm->node_num = o2nm_this_node();
+
+ dlm_create_debugfs_subroot(dlm);
+
+ spin_lock_init(&dlm->spinlock);
+ spin_lock_init(&dlm->master_lock);
+ spin_lock_init(&dlm->ast_lock);
+ spin_lock_init(&dlm->track_lock);
+ INIT_LIST_HEAD(&dlm->list);
+ INIT_LIST_HEAD(&dlm->dirty_list);
+ INIT_LIST_HEAD(&dlm->reco.resources);
+ INIT_LIST_HEAD(&dlm->reco.node_data);
+ INIT_LIST_HEAD(&dlm->purge_list);
+ INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
+ INIT_LIST_HEAD(&dlm->tracking_list);
+ dlm->reco.state = 0;
+
+ INIT_LIST_HEAD(&dlm->pending_asts);
+ INIT_LIST_HEAD(&dlm->pending_basts);
+
+ mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
+ dlm->recovery_map, &(dlm->recovery_map[0]));
+
+ bitmap_zero(dlm->recovery_map, O2NM_MAX_NODES);
+ bitmap_zero(dlm->live_nodes_map, O2NM_MAX_NODES);
+ bitmap_zero(dlm->domain_map, O2NM_MAX_NODES);
+
+ dlm->dlm_thread_task = NULL;
+ dlm->dlm_reco_thread_task = NULL;
+ dlm->dlm_worker = NULL;
+ init_waitqueue_head(&dlm->dlm_thread_wq);
+ init_waitqueue_head(&dlm->dlm_reco_thread_wq);
+ init_waitqueue_head(&dlm->reco.event);
+ init_waitqueue_head(&dlm->ast_wq);
+ init_waitqueue_head(&dlm->migration_wq);
+ INIT_LIST_HEAD(&dlm->mle_hb_events);
+
+ dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
+ init_waitqueue_head(&dlm->dlm_join_events);
+
+ dlm->migrate_done = 0;
+
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+
+ atomic_set(&dlm->res_tot_count, 0);
+ atomic_set(&dlm->res_cur_count, 0);
+ for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
+ atomic_set(&dlm->mle_tot_count[i], 0);
+ atomic_set(&dlm->mle_cur_count[i], 0);
+ }
+
+ spin_lock_init(&dlm->work_lock);
+ INIT_LIST_HEAD(&dlm->work_list);
+ INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
+
+ kref_init(&dlm->dlm_refs);
+ dlm->dlm_state = DLM_CTXT_NEW;
+
+ INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
+
+ mlog(0, "context init: refcount %u\n",
+ kref_read(&dlm->dlm_refs));
+
+ ret = 0;
+leave:
+ if (ret < 0 && dlm) {
+ if (dlm->master_hash)
+ dlm_free_pagevec((void **)dlm->master_hash,
+ DLM_HASH_PAGES);
+
+ if (dlm->lockres_hash)
+ dlm_free_pagevec((void **)dlm->lockres_hash,
+ DLM_HASH_PAGES);
+
+ kfree(dlm->name);
+ kfree(dlm);
+ dlm = NULL;
+ }
+ return dlm;
+}
+
+/*
+ * Compare a requested locking protocol version against the current one.
+ *
+ * If the major numbers are different, they are incompatible.
+ * If the current minor is greater than the request, they are incompatible.
+ * If the current minor is less than or equal to the request, they are
+ * compatible, and the requester should run at the current minor version.
+ */
+static int dlm_protocol_compare(struct dlm_protocol_version *existing,
+ struct dlm_protocol_version *request)
+{
+ if (existing->pv_major != request->pv_major)
+ return 1;
+
+ if (existing->pv_minor > request->pv_minor)
+ return 1;
+
+ if (existing->pv_minor < request->pv_minor)
+ request->pv_minor = existing->pv_minor;
+
+ return 0;
+}
+
+/*
+ * dlm_register_domain: one-time setup per "domain".
+ *
+ * The filesystem passes in the requested locking version via proto.
+ * If registration was successful, proto will contain the negotiated
+ * locking protocol.
+ */
+struct dlm_ctxt * dlm_register_domain(const char *domain,
+ u32 key,
+ struct dlm_protocol_version *fs_proto)
+{
+ int ret;
+ struct dlm_ctxt *dlm = NULL;
+ struct dlm_ctxt *new_ctxt = NULL;
+
+ if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
+ ret = -ENAMETOOLONG;
+ mlog(ML_ERROR, "domain name length too long\n");
+ goto leave;
+ }
+
+ mlog(0, "register called for domain \"%s\"\n", domain);
+
+retry:
+ dlm = NULL;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ spin_lock(&dlm_domain_lock);
+
+ dlm = __dlm_lookup_domain(domain);
+ if (dlm) {
+ if (dlm->dlm_state != DLM_CTXT_JOINED) {
+ spin_unlock(&dlm_domain_lock);
+
+ mlog(0, "This ctxt is not joined yet!\n");
+ wait_event_interruptible(dlm_domain_events,
+ dlm_wait_on_domain_helper(
+ domain));
+ goto retry;
+ }
+
+ if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
+ spin_unlock(&dlm_domain_lock);
+ mlog(ML_ERROR,
+ "Requested locking protocol version is not "
+ "compatible with already registered domain "
+ "\"%s\"\n", domain);
+ ret = -EPROTO;
+ goto leave;
+ }
+
+ __dlm_get(dlm);
+ dlm->num_joins++;
+
+ spin_unlock(&dlm_domain_lock);
+
+ ret = 0;
+ goto leave;
+ }
+
+ /* doesn't exist */
+ if (!new_ctxt) {
+ spin_unlock(&dlm_domain_lock);
+
+ new_ctxt = dlm_alloc_ctxt(domain, key);
+ if (new_ctxt)
+ goto retry;
+
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ /* a little variable switch-a-roo here... */
+ dlm = new_ctxt;
+ new_ctxt = NULL;
+
+ /* add the new domain */
+ list_add_tail(&dlm->list, &dlm_domains);
+ spin_unlock(&dlm_domain_lock);
+
+ /*
+ * Pass the locking protocol version into the join. If the join
+ * succeeds, it will have the negotiated protocol set.
+ */
+ dlm->dlm_locking_proto = dlm_protocol;
+ dlm->fs_locking_proto = *fs_proto;
+
+ ret = dlm_join_domain(dlm);
+ if (ret) {
+ mlog_errno(ret);
+ dlm_put(dlm);
+ goto leave;
+ }
+
+ /* Tell the caller what locking protocol we negotiated */
+ *fs_proto = dlm->fs_locking_proto;
+
+ ret = 0;
+leave:
+ if (new_ctxt)
+ dlm_free_ctxt_mem(new_ctxt);
+
+ if (ret < 0)
+ dlm = ERR_PTR(ret);
+
+ return dlm;
+}
+EXPORT_SYMBOL_GPL(dlm_register_domain);
+
+static LIST_HEAD(dlm_join_handlers);
+
+static void dlm_unregister_net_handlers(void)
+{
+ o2net_unregister_handler_list(&dlm_join_handlers);
+}
+
+static int dlm_register_net_handlers(void)
+{
+ int status = 0;
+
+ status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
+ sizeof(struct dlm_query_join_request),
+ dlm_query_join_handler,
+ NULL, NULL, &dlm_join_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ sizeof(struct dlm_assert_joined),
+ dlm_assert_joined_handler,
+ NULL, NULL, &dlm_join_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ sizeof(struct dlm_cancel_join),
+ dlm_cancel_join_handler,
+ NULL, NULL, &dlm_join_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_QUERY_REGION, DLM_MOD_KEY,
+ sizeof(struct dlm_query_region),
+ dlm_query_region_handler,
+ NULL, NULL, &dlm_join_handlers);
+
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_QUERY_NODEINFO, DLM_MOD_KEY,
+ sizeof(struct dlm_query_nodeinfo),
+ dlm_query_nodeinfo_handler,
+ NULL, NULL, &dlm_join_handlers);
+bail:
+ if (status < 0)
+ dlm_unregister_net_handlers();
+
+ return status;
+}
+
+/* Domain eviction callback handling.
+ *
+ * The file system requires notification of node death *before* the
+ * dlm completes it's recovery work, otherwise it may be able to
+ * acquire locks on resources requiring recovery. Since the dlm can
+ * evict a node from it's domain *before* heartbeat fires, a similar
+ * mechanism is required. */
+
+/* Eviction is not expected to happen often, so a per-domain lock is
+ * not necessary. Eviction callbacks are allowed to sleep for short
+ * periods of time. */
+static DECLARE_RWSEM(dlm_callback_sem);
+
+void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
+ int node_num)
+{
+ struct dlm_eviction_cb *cb;
+
+ down_read(&dlm_callback_sem);
+ list_for_each_entry(cb, &dlm->dlm_eviction_callbacks, ec_item) {
+ cb->ec_func(node_num, cb->ec_data);
+ }
+ up_read(&dlm_callback_sem);
+}
+
+void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
+ dlm_eviction_func *f,
+ void *data)
+{
+ INIT_LIST_HEAD(&cb->ec_item);
+ cb->ec_func = f;
+ cb->ec_data = data;
+}
+EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
+
+void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
+ struct dlm_eviction_cb *cb)
+{
+ down_write(&dlm_callback_sem);
+ list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
+ up_write(&dlm_callback_sem);
+}
+EXPORT_SYMBOL_GPL(dlm_register_eviction_cb);
+
+void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
+{
+ down_write(&dlm_callback_sem);
+ list_del_init(&cb->ec_item);
+ up_write(&dlm_callback_sem);
+}
+EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb);
+
+static int __init dlm_init(void)
+{
+ int status;
+
+ status = dlm_init_mle_cache();
+ if (status) {
+ mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n");
+ goto error;
+ }
+
+ status = dlm_init_master_caches();
+ if (status) {
+ mlog(ML_ERROR, "Could not create o2dlm_lockres and "
+ "o2dlm_lockname slabcaches\n");
+ goto error;
+ }
+
+ status = dlm_init_lock_cache();
+ if (status) {
+ mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n");
+ goto error;
+ }
+
+ status = dlm_register_net_handlers();
+ if (status) {
+ mlog(ML_ERROR, "Unable to register network handlers\n");
+ goto error;
+ }
+
+ dlm_create_debugfs_root();
+
+ return 0;
+error:
+ dlm_unregister_net_handlers();
+ dlm_destroy_lock_cache();
+ dlm_destroy_master_caches();
+ dlm_destroy_mle_cache();
+ return -1;
+}
+
+static void __exit dlm_exit (void)
+{
+ dlm_destroy_debugfs_root();
+ dlm_unregister_net_handlers();
+ dlm_destroy_lock_cache();
+ dlm_destroy_master_caches();
+ dlm_destroy_mle_cache();
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OCFS2 Distributed Lock Management");
+
+module_init(dlm_init);
+module_exit(dlm_exit);
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
new file mode 100644
index 0000000000..815abe30ad
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * dlmdomain.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+#ifndef DLMDOMAIN_H
+#define DLMDOMAIN_H
+
+extern spinlock_t dlm_domain_lock;
+extern struct list_head dlm_domains;
+
+static inline int dlm_joined(struct dlm_ctxt *dlm)
+{
+ int ret = 0;
+
+ spin_lock(&dlm_domain_lock);
+ if (dlm->dlm_state == DLM_CTXT_JOINED)
+ ret = 1;
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
+{
+ int ret = 0;
+
+ spin_lock(&dlm_domain_lock);
+ if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
+ ret = 1;
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
+ int node_num);
+
+#endif
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
new file mode 100644
index 0000000000..041fd1791a
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmlock.c
+ *
+ * underlying calls for lock creation
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#include "dlmconvert.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "../cluster/masklog.h"
+
+static struct kmem_cache *dlm_lock_cache;
+
+static DEFINE_SPINLOCK(dlm_cookie_lock);
+static u64 dlm_next_cookie = 1;
+
+static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags);
+static void dlm_init_lock(struct dlm_lock *newlock, int type,
+ u8 node, u64 cookie);
+static void dlm_lock_release(struct kref *kref);
+static void dlm_lock_detach_lockres(struct dlm_lock *lock);
+
+int dlm_init_lock_cache(void)
+{
+ dlm_lock_cache = kmem_cache_create("o2dlm_lock",
+ sizeof(struct dlm_lock),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (dlm_lock_cache == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void dlm_destroy_lock_cache(void)
+{
+ kmem_cache_destroy(dlm_lock_cache);
+}
+
+/* Tell us whether we can grant a new lock request.
+ * locking:
+ * caller needs: res->spinlock
+ * taken: none
+ * held on exit: none
+ * returns: 1 if the lock can be granted, 0 otherwise.
+ */
+static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ struct dlm_lock *tmplock;
+
+ list_for_each_entry(tmplock, &res->granted, list) {
+ if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
+ return 0;
+ }
+
+ list_for_each_entry(tmplock, &res->converting, list) {
+ if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
+ return 0;
+ if (!dlm_lock_compatible(tmplock->ml.convert_type,
+ lock->ml.type))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* performs lock creation at the lockres master site
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_NOTQUEUED
+ */
+static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags)
+{
+ int call_ast = 0, kick_thread = 0;
+ enum dlm_status status = DLM_NORMAL;
+
+ mlog(0, "type=%d\n", lock->ml.type);
+
+ spin_lock(&res->spinlock);
+ /* if called from dlm_create_lock_handler, need to
+ * ensure it will not sleep in dlm_wait_on_lockres */
+ status = __dlm_lockres_state_to_status(res);
+ if (status != DLM_NORMAL &&
+ lock->ml.node != dlm->node_num) {
+ /* erf. state changed after lock was dropped. */
+ spin_unlock(&res->spinlock);
+ dlm_error(status);
+ return status;
+ }
+ __dlm_wait_on_lockres(res);
+ __dlm_lockres_reserve_ast(res);
+
+ if (dlm_can_grant_new_lock(res, lock)) {
+ mlog(0, "I can grant this lock right away\n");
+ /* got it right away */
+ lock->lksb->status = DLM_NORMAL;
+ status = DLM_NORMAL;
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->granted);
+
+ /* for the recovery lock, we can't allow the ast
+ * to be queued since the dlmthread is already
+ * frozen. but the recovery lock is always locked
+ * with LKM_NOQUEUE so we do not need the ast in
+ * this special case */
+ if (!dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ kick_thread = 1;
+ call_ast = 1;
+ } else {
+ mlog(0, "%s: returning DLM_NORMAL to "
+ "node %u for reco lock\n", dlm->name,
+ lock->ml.node);
+ }
+ } else {
+ /* for NOQUEUE request, unless we get the
+ * lock right away, return DLM_NOTQUEUED */
+ if (flags & LKM_NOQUEUE) {
+ status = DLM_NOTQUEUED;
+ if (dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ mlog(0, "%s: returning NOTQUEUED to "
+ "node %u for reco lock\n", dlm->name,
+ lock->ml.node);
+ }
+ } else {
+ status = DLM_NORMAL;
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->blocked);
+ kick_thread = 1;
+ }
+ }
+
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ /* either queue the ast or release it */
+ if (call_ast)
+ dlm_queue_ast(dlm, lock);
+ else
+ dlm_lockres_release_ast(dlm, res);
+
+ dlm_lockres_calc_usage(dlm, res);
+ if (kick_thread)
+ dlm_kick_thread(dlm, res);
+
+ return status;
+}
+
+void dlm_revert_pending_lock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ /* remove from local queue if it failed */
+ list_del_init(&lock->list);
+ lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
+}
+
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_DENIED, DLM_RECOVERING, or net status
+ */
+static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags)
+{
+ enum dlm_status status = DLM_DENIED;
+ int lockres_changed = 1;
+
+ mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n",
+ lock->ml.type, res->lockname.len,
+ res->lockname.name, flags);
+
+ /*
+ * Wait if resource is getting recovered, remastered, etc.
+ * If the resource was remastered and new owner is self, then exit.
+ */
+ spin_lock(&res->spinlock);
+ __dlm_wait_on_lockres(res);
+ if (res->owner == dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ return DLM_RECOVERING;
+ }
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+
+ /* add lock to local (secondary) queue */
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->blocked);
+ lock->lock_pending = 1;
+ spin_unlock(&res->spinlock);
+
+ /* spec seems to say that you will get DLM_NORMAL when the lock
+ * has been queued, meaning we need to wait for a reply here. */
+ status = dlm_send_remote_lock_request(dlm, res, lock, flags);
+
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ lock->lock_pending = 0;
+ if (status != DLM_NORMAL) {
+ if (status == DLM_RECOVERING &&
+ dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ /* recovery lock was mastered by dead node.
+ * we need to have calc_usage shoot down this
+ * lockres and completely remaster it. */
+ mlog(0, "%s: recovery lock was owned by "
+ "dead node %u, remaster it now.\n",
+ dlm->name, res->owner);
+ } else if (status != DLM_NOTQUEUED) {
+ /*
+ * DO NOT call calc_usage, as this would unhash
+ * the remote lockres before we ever get to use
+ * it. treat as if we never made any change to
+ * the lockres.
+ */
+ lockres_changed = 0;
+ dlm_error(status);
+ }
+ dlm_revert_pending_lock(res, lock);
+ dlm_lock_put(lock);
+ } else if (dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ /* special case for the $RECOVERY lock.
+ * there will never be an AST delivered to put
+ * this lock on the proper secondary queue
+ * (granted), so do it manually. */
+ mlog(0, "%s: $RECOVERY lock for this node (%u) is "
+ "mastered by %u; got lock, manually granting (no ast)\n",
+ dlm->name, dlm->node_num, res->owner);
+ list_move_tail(&lock->list, &res->granted);
+ }
+ spin_unlock(&res->spinlock);
+
+ if (lockres_changed)
+ dlm_lockres_calc_usage(dlm, res);
+
+ wake_up(&res->wq);
+ return status;
+}
+
+
+/* for remote lock creation.
+ * locking:
+ * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
+ * taken: none
+ * held on exit: none
+ * returns: DLM_NOLOCKMGR, or net status
+ */
+static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags)
+{
+ struct dlm_create_lock create;
+ int tmpret, status = 0;
+ enum dlm_status ret;
+
+ memset(&create, 0, sizeof(create));
+ create.node_idx = dlm->node_num;
+ create.requested_type = lock->ml.type;
+ create.cookie = lock->ml.cookie;
+ create.namelen = res->lockname.len;
+ create.flags = cpu_to_be32(flags);
+ memcpy(create.name, res->lockname.name, create.namelen);
+
+ tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
+ sizeof(create), res->owner, &status);
+ if (tmpret >= 0) {
+ ret = status;
+ if (ret == DLM_REJECTED) {
+ mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer "
+ "owned by node %u. That node is coming back up "
+ "currently.\n", dlm->name, create.namelen,
+ create.name, res->owner);
+ dlm_print_one_lock_resource(res);
+ BUG();
+ }
+ } else {
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to "
+ "node %u\n", dlm->name, create.namelen, create.name,
+ tmpret, res->owner);
+ if (dlm_is_host_down(tmpret))
+ ret = DLM_RECOVERING;
+ else
+ ret = dlm_err_to_dlm_status(tmpret);
+ }
+
+ return ret;
+}
+
+void dlm_lock_get(struct dlm_lock *lock)
+{
+ kref_get(&lock->lock_refs);
+}
+
+void dlm_lock_put(struct dlm_lock *lock)
+{
+ kref_put(&lock->lock_refs, dlm_lock_release);
+}
+
+static void dlm_lock_release(struct kref *kref)
+{
+ struct dlm_lock *lock;
+
+ lock = container_of(kref, struct dlm_lock, lock_refs);
+
+ BUG_ON(!list_empty(&lock->list));
+ BUG_ON(!list_empty(&lock->ast_list));
+ BUG_ON(!list_empty(&lock->bast_list));
+ BUG_ON(lock->ast_pending);
+ BUG_ON(lock->bast_pending);
+
+ dlm_lock_detach_lockres(lock);
+
+ if (lock->lksb_kernel_allocated) {
+ mlog(0, "freeing kernel-allocated lksb\n");
+ kfree(lock->lksb);
+ }
+ kmem_cache_free(dlm_lock_cache, lock);
+}
+
+/* associate a lock with it's lockres, getting a ref on the lockres */
+void dlm_lock_attach_lockres(struct dlm_lock *lock,
+ struct dlm_lock_resource *res)
+{
+ dlm_lockres_get(res);
+ lock->lockres = res;
+}
+
+/* drop ref on lockres, if there is still one associated with lock */
+static void dlm_lock_detach_lockres(struct dlm_lock *lock)
+{
+ struct dlm_lock_resource *res;
+
+ res = lock->lockres;
+ if (res) {
+ lock->lockres = NULL;
+ mlog(0, "removing lock's lockres reference\n");
+ dlm_lockres_put(res);
+ }
+}
+
+static void dlm_init_lock(struct dlm_lock *newlock, int type,
+ u8 node, u64 cookie)
+{
+ INIT_LIST_HEAD(&newlock->list);
+ INIT_LIST_HEAD(&newlock->ast_list);
+ INIT_LIST_HEAD(&newlock->bast_list);
+ spin_lock_init(&newlock->spinlock);
+ newlock->ml.type = type;
+ newlock->ml.convert_type = LKM_IVMODE;
+ newlock->ml.highest_blocked = LKM_IVMODE;
+ newlock->ml.node = node;
+ newlock->ml.pad1 = 0;
+ newlock->ml.list = 0;
+ newlock->ml.flags = 0;
+ newlock->ast = NULL;
+ newlock->bast = NULL;
+ newlock->astdata = NULL;
+ newlock->ml.cookie = cpu_to_be64(cookie);
+ newlock->ast_pending = 0;
+ newlock->bast_pending = 0;
+ newlock->convert_pending = 0;
+ newlock->lock_pending = 0;
+ newlock->unlock_pending = 0;
+ newlock->cancel_pending = 0;
+ newlock->lksb_kernel_allocated = 0;
+
+ kref_init(&newlock->lock_refs);
+}
+
+struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
+ struct dlm_lockstatus *lksb)
+{
+ struct dlm_lock *lock;
+ int kernel_allocated = 0;
+
+ lock = kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
+ if (!lock)
+ return NULL;
+
+ if (!lksb) {
+ /* zero memory only if kernel-allocated */
+ lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
+ if (!lksb) {
+ kmem_cache_free(dlm_lock_cache, lock);
+ return NULL;
+ }
+ kernel_allocated = 1;
+ }
+
+ dlm_init_lock(lock, type, node, cookie);
+ if (kernel_allocated)
+ lock->lksb_kernel_allocated = 1;
+ lock->lksb = lksb;
+ lksb->lockid = lock;
+ return lock;
+}
+
+/* handler for lock creation net message
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
+ */
+int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ enum dlm_status status = DLM_NORMAL;
+ char *name;
+ unsigned int namelen;
+
+ BUG_ON(!dlm);
+
+ if (!dlm_grab(dlm))
+ return DLM_REJECTED;
+
+ name = create->name;
+ namelen = create->namelen;
+ status = DLM_REJECTED;
+ if (!dlm_domain_fully_joined(dlm)) {
+ mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
+ "sending a create_lock message for lock %.*s!\n",
+ dlm->name, create->node_idx, namelen, name);
+ dlm_error(status);
+ goto leave;
+ }
+
+ status = DLM_IVBUFLEN;
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ status = DLM_SYSERR;
+ newlock = dlm_new_lock(create->requested_type,
+ create->node_idx,
+ be64_to_cpu(create->cookie), NULL);
+ if (!newlock) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ lksb = newlock->lksb;
+
+ if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
+ lksb->flags |= DLM_LKSB_GET_LVB;
+ mlog(0, "set DLM_LKSB_GET_LVB flag\n");
+ }
+
+ status = DLM_IVLOCKID;
+ res = dlm_lookup_lockres(dlm, name, namelen);
+ if (!res) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ spin_lock(&res->spinlock);
+ status = __dlm_lockres_state_to_status(res);
+ spin_unlock(&res->spinlock);
+
+ if (status != DLM_NORMAL) {
+ mlog(0, "lockres recovering/migrating/in-progress\n");
+ goto leave;
+ }
+
+ dlm_lock_attach_lockres(newlock, res);
+
+ status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
+leave:
+ if (status != DLM_NORMAL)
+ if (newlock)
+ dlm_lock_put(newlock);
+
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+
+ return status;
+}
+
+
+/* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
+static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
+{
+ u64 tmpnode = node_num;
+
+ /* shift single byte of node num into top 8 bits */
+ tmpnode <<= 56;
+
+ spin_lock(&dlm_cookie_lock);
+ *cookie = (dlm_next_cookie | tmpnode);
+ if (++dlm_next_cookie & 0xff00000000000000ull) {
+ mlog(0, "This node's cookie will now wrap!\n");
+ dlm_next_cookie = 1;
+ }
+ spin_unlock(&dlm_cookie_lock);
+}
+
+enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
+ struct dlm_lockstatus *lksb, int flags,
+ const char *name, int namelen, dlm_astlockfunc_t *ast,
+ void *data, dlm_bastlockfunc_t *bast)
+{
+ enum dlm_status status;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *lock = NULL;
+ int convert = 0, recovery = 0;
+
+ /* yes this function is a mess.
+ * TODO: clean this up. lots of common code in the
+ * lock and convert paths, especially in the retry blocks */
+ if (!lksb) {
+ dlm_error(DLM_BADARGS);
+ return DLM_BADARGS;
+ }
+
+ status = DLM_BADPARAM;
+ if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
+ dlm_error(status);
+ goto error;
+ }
+
+ if (flags & ~LKM_VALID_FLAGS) {
+ dlm_error(status);
+ goto error;
+ }
+
+ convert = (flags & LKM_CONVERT);
+ recovery = (flags & LKM_RECOVERY);
+
+ if (recovery &&
+ (!dlm_is_recovery_lock(name, namelen) || convert) ) {
+ dlm_error(status);
+ goto error;
+ }
+ if (convert && (flags & LKM_LOCAL)) {
+ mlog(ML_ERROR, "strange LOCAL convert request!\n");
+ goto error;
+ }
+
+ if (convert) {
+ /* CONVERT request */
+
+ /* if converting, must pass in a valid dlm_lock */
+ lock = lksb->lockid;
+ if (!lock) {
+ mlog(ML_ERROR, "NULL lock pointer in convert "
+ "request\n");
+ goto error;
+ }
+
+ res = lock->lockres;
+ if (!res) {
+ mlog(ML_ERROR, "NULL lockres pointer in convert "
+ "request\n");
+ goto error;
+ }
+ dlm_lockres_get(res);
+
+ /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
+ * static after the original lock call. convert requests will
+ * ensure that everything is the same, or return DLM_BADARGS.
+ * this means that DLM_DENIED_NOASTS will never be returned.
+ */
+ if (lock->lksb != lksb || lock->ast != ast ||
+ lock->bast != bast || lock->astdata != data) {
+ status = DLM_BADARGS;
+ mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
+ "astdata=%p\n", lksb, ast, bast, data);
+ mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
+ "astdata=%p\n", lock->lksb, lock->ast,
+ lock->bast, lock->astdata);
+ goto error;
+ }
+retry_convert:
+ dlm_wait_for_recovery(dlm);
+
+ if (res->owner == dlm->node_num)
+ status = dlmconvert_master(dlm, res, lock, flags, mode);
+ else
+ status = dlmconvert_remote(dlm, res, lock, flags, mode);
+ if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
+ status == DLM_FORWARD) {
+ /* for now, see how this works without sleeping
+ * and just retry right away. I suspect the reco
+ * or migration will complete fast enough that
+ * no waiting will be necessary */
+ mlog(0, "retrying convert with migration/recovery/"
+ "in-progress\n");
+ msleep(100);
+ goto retry_convert;
+ }
+ } else {
+ u64 tmpcookie;
+
+ /* LOCK request */
+ status = DLM_BADARGS;
+ if (!name) {
+ dlm_error(status);
+ goto error;
+ }
+
+ status = DLM_IVBUFLEN;
+ if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) {
+ dlm_error(status);
+ goto error;
+ }
+
+ dlm_get_next_cookie(dlm->node_num, &tmpcookie);
+ lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
+ if (!lock) {
+ dlm_error(status);
+ goto error;
+ }
+
+ if (!recovery)
+ dlm_wait_for_recovery(dlm);
+
+ /* find or create the lock resource */
+ res = dlm_get_lock_resource(dlm, name, namelen, flags);
+ if (!res) {
+ status = DLM_IVLOCKID;
+ dlm_error(status);
+ goto error;
+ }
+
+ mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
+ mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
+
+ dlm_lock_attach_lockres(lock, res);
+ lock->ast = ast;
+ lock->bast = bast;
+ lock->astdata = data;
+
+retry_lock:
+ if (flags & LKM_VALBLK) {
+ mlog(0, "LKM_VALBLK passed by caller\n");
+
+ /* LVB requests for non PR, PW or EX locks are
+ * ignored. */
+ if (mode < LKM_PRMODE)
+ flags &= ~LKM_VALBLK;
+ else {
+ flags |= LKM_GET_LVB;
+ lock->lksb->flags |= DLM_LKSB_GET_LVB;
+ }
+ }
+
+ if (res->owner == dlm->node_num)
+ status = dlmlock_master(dlm, res, lock, flags);
+ else
+ status = dlmlock_remote(dlm, res, lock, flags);
+
+ if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
+ status == DLM_FORWARD) {
+ msleep(100);
+ if (recovery) {
+ if (status != DLM_RECOVERING)
+ goto retry_lock;
+ /* wait to see the node go down, then
+ * drop down and allow the lockres to
+ * get cleaned up. need to remaster. */
+ dlm_wait_for_node_death(dlm, res->owner,
+ DLM_NODE_DEATH_WAIT_MAX);
+ } else {
+ dlm_wait_for_recovery(dlm);
+ goto retry_lock;
+ }
+ }
+
+ /* Inflight taken in dlm_get_lock_resource() is dropped here */
+ spin_lock(&res->spinlock);
+ dlm_lockres_drop_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_kick_thread(dlm, res);
+
+ if (status != DLM_NORMAL) {
+ lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ goto error;
+ }
+ }
+
+error:
+ if (status != DLM_NORMAL) {
+ if (lock && !convert)
+ dlm_lock_put(lock);
+ // this is kind of unnecessary
+ lksb->status = status;
+ }
+
+ /* put lockres ref from the convert path
+ * or from dlm_get_lock_resource */
+ if (res)
+ dlm_lockres_put(res);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(dlmlock);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
new file mode 100644
index 0000000000..d610da8e2f
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -0,0 +1,3564 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmmod.c
+ *
+ * standalone DLM module
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+#include "dlmdebug.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
+#include "../cluster/masklog.h"
+
+static void dlm_mle_node_down(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node,
+ int idx);
+static void dlm_mle_node_up(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node,
+ int idx);
+
+static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
+static int dlm_do_assert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ void *nodemap, u32 flags);
+static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
+
+static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ const char *name,
+ unsigned int namelen)
+{
+ if (dlm != mle->dlm)
+ return 0;
+
+ if (namelen != mle->mnamelen ||
+ memcmp(name, mle->mname, namelen) != 0)
+ return 0;
+
+ return 1;
+}
+
+static struct kmem_cache *dlm_lockres_cache;
+static struct kmem_cache *dlm_lockname_cache;
+static struct kmem_cache *dlm_mle_cache;
+
+static void dlm_mle_release(struct kref *kref);
+static void dlm_init_mle(struct dlm_master_list_entry *mle,
+ enum dlm_mle_type type,
+ struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ const char *name,
+ unsigned int namelen);
+static void dlm_put_mle(struct dlm_master_list_entry *mle);
+static void __dlm_put_mle(struct dlm_master_list_entry *mle);
+static int dlm_find_mle(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry **mle,
+ char *name, unsigned int namelen);
+
+static int dlm_do_master_request(struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle, int to);
+
+
+static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int *blocked);
+static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int blocked);
+static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ struct dlm_master_list_entry **oldmle,
+ const char *name, unsigned int namelen,
+ u8 new_master, u8 master);
+
+static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 target);
+static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+
+
+int dlm_is_host_down(int errno)
+{
+ switch (errno) {
+ case -EBADF:
+ case -ECONNREFUSED:
+ case -ENOTCONN:
+ case -ECONNRESET:
+ case -EPIPE:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ETIMEDOUT:
+ case -ECONNABORTED:
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ case -ENETRESET:
+ case -ESHUTDOWN:
+ case -ENOPROTOOPT:
+ case -EINVAL: /* if returned from our tcp code,
+ this means there is no socket */
+ return 1;
+ }
+ return 0;
+}
+
+
+/*
+ * MASTER LIST FUNCTIONS
+ */
+
+
+/*
+ * regarding master list entries and heartbeat callbacks:
+ *
+ * in order to avoid sleeping and allocation that occurs in
+ * heartbeat, master list entries are simply attached to the
+ * dlm's established heartbeat callbacks. the mle is attached
+ * when it is created, and since the dlm->spinlock is held at
+ * that time, any heartbeat event will be properly discovered
+ * by the mle. the mle needs to be detached from the
+ * dlm->mle_hb_events list as soon as heartbeat events are no
+ * longer useful to the mle, and before the mle is freed.
+ *
+ * as a general rule, heartbeat events are no longer needed by
+ * the mle once an "answer" regarding the lock master has been
+ * received.
+ */
+static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
+}
+
+
+static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ if (!list_empty(&mle->hb_events))
+ list_del_init(&mle->hb_events);
+}
+
+
+static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ spin_lock(&dlm->spinlock);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ spin_unlock(&dlm->spinlock);
+}
+
+static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+ mle->inuse++;
+ kref_get(&mle->mle_refs);
+}
+
+static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ mle->inuse--;
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+}
+
+/* remove from list and free */
+static void __dlm_put_mle(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+ if (!kref_read(&mle->mle_refs)) {
+ /* this may or may not crash, but who cares.
+ * it's a BUG. */
+ mlog(ML_ERROR, "bad mle: %p\n", mle);
+ dlm_print_one_mle(mle);
+ BUG();
+ } else
+ kref_put(&mle->mle_refs, dlm_mle_release);
+}
+
+
+/* must not have any spinlocks coming in */
+static void dlm_put_mle(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+}
+
+static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
+{
+ kref_get(&mle->mle_refs);
+}
+
+static void dlm_init_mle(struct dlm_master_list_entry *mle,
+ enum dlm_mle_type type,
+ struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ const char *name,
+ unsigned int namelen)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ mle->dlm = dlm;
+ mle->type = type;
+ INIT_HLIST_NODE(&mle->master_hash_node);
+ INIT_LIST_HEAD(&mle->hb_events);
+ bitmap_zero(mle->maybe_map, O2NM_MAX_NODES);
+ spin_lock_init(&mle->spinlock);
+ init_waitqueue_head(&mle->wq);
+ atomic_set(&mle->woken, 0);
+ kref_init(&mle->mle_refs);
+ bitmap_zero(mle->response_map, O2NM_MAX_NODES);
+ mle->master = O2NM_MAX_NODES;
+ mle->new_master = O2NM_MAX_NODES;
+ mle->inuse = 0;
+
+ BUG_ON(mle->type != DLM_MLE_BLOCK &&
+ mle->type != DLM_MLE_MASTER &&
+ mle->type != DLM_MLE_MIGRATION);
+
+ if (mle->type == DLM_MLE_MASTER) {
+ BUG_ON(!res);
+ mle->mleres = res;
+ memcpy(mle->mname, res->lockname.name, res->lockname.len);
+ mle->mnamelen = res->lockname.len;
+ mle->mnamehash = res->lockname.hash;
+ } else {
+ BUG_ON(!name);
+ mle->mleres = NULL;
+ memcpy(mle->mname, name, namelen);
+ mle->mnamelen = namelen;
+ mle->mnamehash = dlm_lockid_hash(name, namelen);
+ }
+
+ atomic_inc(&dlm->mle_tot_count[mle->type]);
+ atomic_inc(&dlm->mle_cur_count[mle->type]);
+
+ /* copy off the node_map and register hb callbacks on our copy */
+ bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES);
+ bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES);
+ clear_bit(dlm->node_num, mle->vote_map);
+ clear_bit(dlm->node_num, mle->node_map);
+
+ /* attach the mle to the domain node up/down events */
+ __dlm_mle_attach_hb_events(dlm, mle);
+}
+
+void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+
+ if (!hlist_unhashed(&mle->master_hash_node))
+ hlist_del_init(&mle->master_hash_node);
+}
+
+void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
+{
+ struct hlist_head *bucket;
+
+ assert_spin_locked(&dlm->master_lock);
+
+ bucket = dlm_master_hash(dlm, mle->mnamehash);
+ hlist_add_head(&mle->master_hash_node, bucket);
+}
+
+/* returns 1 if found, 0 if not */
+static int dlm_find_mle(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry **mle,
+ char *name, unsigned int namelen)
+{
+ struct dlm_master_list_entry *tmpmle;
+ struct hlist_head *bucket;
+ unsigned int hash;
+
+ assert_spin_locked(&dlm->master_lock);
+
+ hash = dlm_lockid_hash(name, namelen);
+ bucket = dlm_master_hash(dlm, hash);
+ hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
+ if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
+ continue;
+ dlm_get_mle(tmpmle);
+ *mle = tmpmle;
+ return 1;
+ }
+ return 0;
+}
+
+void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
+{
+ struct dlm_master_list_entry *mle;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
+ if (node_up)
+ dlm_mle_node_up(dlm, mle, NULL, idx);
+ else
+ dlm_mle_node_down(dlm, mle, NULL, idx);
+ }
+}
+
+static void dlm_mle_node_down(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node, int idx)
+{
+ spin_lock(&mle->spinlock);
+
+ if (!test_bit(idx, mle->node_map))
+ mlog(0, "node %u already removed from nodemap!\n", idx);
+ else
+ clear_bit(idx, mle->node_map);
+
+ spin_unlock(&mle->spinlock);
+}
+
+static void dlm_mle_node_up(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node, int idx)
+{
+ spin_lock(&mle->spinlock);
+
+ if (test_bit(idx, mle->node_map))
+ mlog(0, "node %u already in node map!\n", idx);
+ else
+ set_bit(idx, mle->node_map);
+
+ spin_unlock(&mle->spinlock);
+}
+
+
+int dlm_init_mle_cache(void)
+{
+ dlm_mle_cache = kmem_cache_create("o2dlm_mle",
+ sizeof(struct dlm_master_list_entry),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (dlm_mle_cache == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void dlm_destroy_mle_cache(void)
+{
+ kmem_cache_destroy(dlm_mle_cache);
+}
+
+static void dlm_mle_release(struct kref *kref)
+{
+ struct dlm_master_list_entry *mle;
+ struct dlm_ctxt *dlm;
+
+ mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
+ dlm = mle->dlm;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+
+ mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
+ mle->type);
+
+ /* remove from list if not already */
+ __dlm_unlink_mle(dlm, mle);
+
+ /* detach the mle from the domain node up/down events */
+ __dlm_mle_detach_hb_events(dlm, mle);
+
+ atomic_dec(&dlm->mle_cur_count[mle->type]);
+
+ /* NOTE: kfree under spinlock here.
+ * if this is bad, we can move this to a freelist. */
+ kmem_cache_free(dlm_mle_cache, mle);
+}
+
+
+/*
+ * LOCK RESOURCE FUNCTIONS
+ */
+
+int dlm_init_master_caches(void)
+{
+ dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
+ sizeof(struct dlm_lock_resource),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!dlm_lockres_cache)
+ goto bail;
+
+ dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
+ DLM_LOCKID_NAME_MAX, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dlm_lockname_cache)
+ goto bail;
+
+ return 0;
+bail:
+ dlm_destroy_master_caches();
+ return -ENOMEM;
+}
+
+void dlm_destroy_master_caches(void)
+{
+ kmem_cache_destroy(dlm_lockname_cache);
+ dlm_lockname_cache = NULL;
+
+ kmem_cache_destroy(dlm_lockres_cache);
+ dlm_lockres_cache = NULL;
+}
+
+static void dlm_lockres_release(struct kref *kref)
+{
+ struct dlm_lock_resource *res;
+ struct dlm_ctxt *dlm;
+
+ res = container_of(kref, struct dlm_lock_resource, refs);
+ dlm = res->dlm;
+
+ /* This should not happen -- all lockres' have a name
+ * associated with them at init time. */
+ BUG_ON(!res->lockname.name);
+
+ mlog(0, "destroying lockres %.*s\n", res->lockname.len,
+ res->lockname.name);
+
+ atomic_dec(&dlm->res_cur_count);
+
+ if (!hlist_unhashed(&res->hash_node) ||
+ !list_empty(&res->granted) ||
+ !list_empty(&res->converting) ||
+ !list_empty(&res->blocked) ||
+ !list_empty(&res->dirty) ||
+ !list_empty(&res->recovering) ||
+ !list_empty(&res->purge)) {
+ mlog(ML_ERROR,
+ "Going to BUG for resource %.*s."
+ " We're on a list! [%c%c%c%c%c%c%c]\n",
+ res->lockname.len, res->lockname.name,
+ !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
+ !list_empty(&res->granted) ? 'G' : ' ',
+ !list_empty(&res->converting) ? 'C' : ' ',
+ !list_empty(&res->blocked) ? 'B' : ' ',
+ !list_empty(&res->dirty) ? 'D' : ' ',
+ !list_empty(&res->recovering) ? 'R' : ' ',
+ !list_empty(&res->purge) ? 'P' : ' ');
+
+ dlm_print_one_lock_resource(res);
+ }
+
+ /* By the time we're ready to blow this guy away, we shouldn't
+ * be on any lists. */
+ BUG_ON(!hlist_unhashed(&res->hash_node));
+ BUG_ON(!list_empty(&res->granted));
+ BUG_ON(!list_empty(&res->converting));
+ BUG_ON(!list_empty(&res->blocked));
+ BUG_ON(!list_empty(&res->dirty));
+ BUG_ON(!list_empty(&res->recovering));
+ BUG_ON(!list_empty(&res->purge));
+
+ kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
+
+ kmem_cache_free(dlm_lockres_cache, res);
+}
+
+void dlm_lockres_put(struct dlm_lock_resource *res)
+{
+ kref_put(&res->refs, dlm_lockres_release);
+}
+
+static void dlm_init_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ const char *name, unsigned int namelen)
+{
+ char *qname;
+
+ /* If we memset here, we lose our reference to the kmalloc'd
+ * res->lockname.name, so be sure to init every field
+ * correctly! */
+
+ qname = (char *) res->lockname.name;
+ memcpy(qname, name, namelen);
+
+ res->lockname.len = namelen;
+ res->lockname.hash = dlm_lockid_hash(name, namelen);
+
+ init_waitqueue_head(&res->wq);
+ spin_lock_init(&res->spinlock);
+ INIT_HLIST_NODE(&res->hash_node);
+ INIT_LIST_HEAD(&res->granted);
+ INIT_LIST_HEAD(&res->converting);
+ INIT_LIST_HEAD(&res->blocked);
+ INIT_LIST_HEAD(&res->dirty);
+ INIT_LIST_HEAD(&res->recovering);
+ INIT_LIST_HEAD(&res->purge);
+ INIT_LIST_HEAD(&res->tracking);
+ atomic_set(&res->asts_reserved, 0);
+ res->migration_pending = 0;
+ res->inflight_locks = 0;
+ res->inflight_assert_workers = 0;
+
+ res->dlm = dlm;
+
+ kref_init(&res->refs);
+
+ atomic_inc(&dlm->res_tot_count);
+ atomic_inc(&dlm->res_cur_count);
+
+ /* just for consistency */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
+ spin_unlock(&res->spinlock);
+
+ res->state = DLM_LOCK_RES_IN_PROGRESS;
+
+ res->last_used = 0;
+
+ spin_lock(&dlm->track_lock);
+ list_add_tail(&res->tracking, &dlm->tracking_list);
+ spin_unlock(&dlm->track_lock);
+
+ memset(res->lvb, 0, DLM_LVB_LEN);
+ bitmap_zero(res->refmap, O2NM_MAX_NODES);
+}
+
+struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int namelen)
+{
+ struct dlm_lock_resource *res = NULL;
+
+ res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
+ if (!res)
+ goto error;
+
+ res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
+ if (!res->lockname.name)
+ goto error;
+
+ dlm_init_lockres(dlm, res, name, namelen);
+ return res;
+
+error:
+ if (res)
+ kmem_cache_free(dlm_lockres_cache, res);
+ return NULL;
+}
+
+void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
+{
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ set_bit(bit, res->refmap);
+}
+
+void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, int bit)
+{
+ assert_spin_locked(&res->spinlock);
+
+ mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
+ res->lockname.name, bit, __builtin_return_address(0));
+
+ clear_bit(bit, res->refmap);
+}
+
+static void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ res->inflight_locks++;
+
+ mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
+}
+
+void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
+ __dlm_lockres_grab_inflight_ref(dlm, res);
+}
+
+void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
+
+ BUG_ON(res->inflight_locks == 0);
+
+ res->inflight_locks--;
+
+ mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->inflight_locks,
+ __builtin_return_address(0));
+
+ wake_up(&res->wq);
+}
+
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
+ res->inflight_assert_workers++;
+ mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->inflight_assert_workers);
+}
+
+static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
+ BUG_ON(res->inflight_assert_workers == 0);
+ res->inflight_assert_workers--;
+ mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->inflight_assert_workers);
+}
+
+static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ spin_lock(&res->spinlock);
+ __dlm_lockres_drop_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
+}
+
+/*
+ * lookup a lock resource by name.
+ * may already exist in the hashtable.
+ * lockid is null terminated
+ *
+ * if not, allocate enough for the lockres and for
+ * the temporary structure used in doing the mastering.
+ *
+ * also, do a lookup in the dlm->master_list to see
+ * if another node has begun mastering the same lock.
+ * if so, there should be a block entry in there
+ * for this name, and we should *not* attempt to master
+ * the lock here. need to wait around for that node
+ * to assert_master (or die).
+ *
+ */
+struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
+ const char *lockid,
+ int namelen,
+ int flags)
+{
+ struct dlm_lock_resource *tmpres=NULL, *res=NULL;
+ struct dlm_master_list_entry *mle = NULL;
+ struct dlm_master_list_entry *alloc_mle = NULL;
+ int blocked = 0;
+ int ret, nodenum;
+ struct dlm_node_iter iter;
+ unsigned int hash;
+ int tries = 0;
+ int bit, wait_on_recovery = 0;
+
+ BUG_ON(!lockid);
+
+ hash = dlm_lockid_hash(lockid, namelen);
+
+ mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
+
+lookup:
+ spin_lock(&dlm->spinlock);
+ tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
+ if (tmpres) {
+ spin_unlock(&dlm->spinlock);
+ spin_lock(&tmpres->spinlock);
+
+ /*
+ * Right after dlm spinlock was released, dlm_thread could have
+ * purged the lockres. Check if lockres got unhashed. If so
+ * start over.
+ */
+ if (hlist_unhashed(&tmpres->hash_node)) {
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
+ }
+
+ /* Wait on the thread that is mastering the resource */
+ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ __dlm_wait_on_lockres(tmpres);
+ BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
+ }
+
+ /* Wait on the resource purge to complete before continuing */
+ if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
+ BUG_ON(tmpres->owner == dlm->node_num);
+ __dlm_wait_on_lockres_flags(tmpres,
+ DLM_LOCK_RES_DROPPING_REF);
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
+ }
+
+ /* Grab inflight ref to pin the resource */
+ dlm_lockres_grab_inflight_ref(dlm, tmpres);
+
+ spin_unlock(&tmpres->spinlock);
+ if (res) {
+ spin_lock(&dlm->track_lock);
+ if (!list_empty(&res->tracking))
+ list_del_init(&res->tracking);
+ else
+ mlog(ML_ERROR, "Resource %.*s not "
+ "on the Tracking list\n",
+ res->lockname.len,
+ res->lockname.name);
+ spin_unlock(&dlm->track_lock);
+ dlm_lockres_put(res);
+ }
+ res = tmpres;
+ goto leave;
+ }
+
+ if (!res) {
+ spin_unlock(&dlm->spinlock);
+ mlog(0, "allocating a new resource\n");
+ /* nothing found and we need to allocate one. */
+ alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
+ if (!alloc_mle)
+ goto leave;
+ res = dlm_new_lockres(dlm, lockid, namelen);
+ if (!res)
+ goto leave;
+ goto lookup;
+ }
+
+ mlog(0, "no lockres found, allocated our own: %p\n", res);
+
+ if (flags & LKM_LOCAL) {
+ /* caller knows it's safe to assume it's not mastered elsewhere
+ * DONE! return right away */
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, dlm->node_num);
+ __dlm_insert_lockres(dlm, res);
+ dlm_lockres_grab_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ /* lockres still marked IN_PROGRESS */
+ goto wake_waiters;
+ }
+
+ /* check master list to see if another node has started mastering it */
+ spin_lock(&dlm->master_lock);
+
+ /* if we found a block, wait for lock to be mastered by another node */
+ blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
+ if (blocked) {
+ int mig;
+ if (mle->type == DLM_MLE_MASTER) {
+ mlog(ML_ERROR, "master entry for nonexistent lock!\n");
+ BUG();
+ }
+ mig = (mle->type == DLM_MLE_MIGRATION);
+ /* if there is a migration in progress, let the migration
+ * finish before continuing. we can wait for the absence
+ * of the MIGRATION mle: either the migrate finished or
+ * one of the nodes died and the mle was cleaned up.
+ * if there is a BLOCK here, but it already has a master
+ * set, we are too late. the master does not have a ref
+ * for us in the refmap. detach the mle and drop it.
+ * either way, go back to the top and start over. */
+ if (mig || mle->master != O2NM_MAX_NODES) {
+ BUG_ON(mig && mle->master == dlm->node_num);
+ /* we arrived too late. the master does not
+ * have a ref for us. retry. */
+ mlog(0, "%s:%.*s: late on %s\n",
+ dlm->name, namelen, lockid,
+ mig ? "MIGRATION" : "BLOCK");
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ /* master is known, detach */
+ if (!mig)
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ mle = NULL;
+ /* this is lame, but we can't wait on either
+ * the mle or lockres waitqueue here */
+ if (mig)
+ msleep(100);
+ goto lookup;
+ }
+ } else {
+ /* go ahead and try to master lock on this node */
+ mle = alloc_mle;
+ /* make sure this does not get freed below */
+ alloc_mle = NULL;
+ dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
+ set_bit(dlm->node_num, mle->maybe_map);
+ __dlm_insert_mle(dlm, mle);
+
+ /* still holding the dlm spinlock, check the recovery map
+ * to see if there are any nodes that still need to be
+ * considered. these will not appear in the mle nodemap
+ * but they might own this lockres. wait on them. */
+ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
+ if (bit < O2NM_MAX_NODES) {
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
+ dlm->name, namelen, (char *)lockid, bit);
+ wait_on_recovery = 1;
+ }
+ }
+
+ /* at this point there is either a DLM_MLE_BLOCK or a
+ * DLM_MLE_MASTER on the master list, so it's safe to add the
+ * lockres to the hashtable. anyone who finds the lock will
+ * still have to wait on the IN_PROGRESS. */
+
+ /* finally add the lockres to its hash bucket */
+ __dlm_insert_lockres(dlm, res);
+
+ /* since this lockres is new it doesn't not require the spinlock */
+ __dlm_lockres_grab_inflight_ref(dlm, res);
+
+ /* get an extra ref on the mle in case this is a BLOCK
+ * if so, the creator of the BLOCK may try to put the last
+ * ref at this time in the assert master handler, so we
+ * need an extra one to keep from a bad ptr deref. */
+ dlm_get_mle_inuse(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+redo_request:
+ while (wait_on_recovery) {
+ /* any cluster changes that occurred after dropping the
+ * dlm spinlock would be detectable be a change on the mle,
+ * so we only need to clear out the recovery map once. */
+ if (dlm_is_recovery_lock(lockid, namelen)) {
+ mlog(0, "%s: Recovery map is not empty, but must "
+ "master $RECOVERY lock now\n", dlm->name);
+ if (!dlm_pre_master_reco_lockres(dlm, res))
+ wait_on_recovery = 0;
+ else {
+ mlog(0, "%s: waiting 500ms for heartbeat state "
+ "change\n", dlm->name);
+ msleep(500);
+ }
+ continue;
+ }
+
+ dlm_kick_recovery_thread(dlm);
+ msleep(1000);
+ dlm_wait_for_recovery(dlm);
+
+ spin_lock(&dlm->spinlock);
+ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
+ if (bit < O2NM_MAX_NODES) {
+ mlog(0, "%s: res %.*s, At least one node (%d) "
+ "to recover before lock mastery can begin\n",
+ dlm->name, namelen, (char *)lockid, bit);
+ wait_on_recovery = 1;
+ } else
+ wait_on_recovery = 0;
+ spin_unlock(&dlm->spinlock);
+
+ if (wait_on_recovery)
+ dlm_wait_for_node_recovery(dlm, bit, 10000);
+ }
+
+ /* must wait for lock to be mastered elsewhere */
+ if (blocked)
+ goto wait;
+
+ ret = -EINVAL;
+ dlm_node_iter_init(mle->vote_map, &iter);
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ ret = dlm_do_master_request(res, mle, nodenum);
+ if (ret < 0)
+ mlog_errno(ret);
+ if (mle->master != O2NM_MAX_NODES) {
+ /* found a master ! */
+ if (mle->master <= nodenum)
+ break;
+ /* if our master request has not reached the master
+ * yet, keep going until it does. this is how the
+ * master will know that asserts are needed back to
+ * the lower nodes. */
+ mlog(0, "%s: res %.*s, Requests only up to %u but "
+ "master is %u, keep going\n", dlm->name, namelen,
+ lockid, nodenum, mle->master);
+ }
+ }
+
+wait:
+ /* keep going until the response map includes all nodes */
+ ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
+ if (ret < 0) {
+ wait_on_recovery = 1;
+ mlog(0, "%s: res %.*s, Node map changed, redo the master "
+ "request now, blocked=%d\n", dlm->name, res->lockname.len,
+ res->lockname.name, blocked);
+ if (++tries > 20) {
+ mlog(ML_ERROR, "%s: res %.*s, Spinning on "
+ "dlm_wait_for_lock_mastery, blocked = %d\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, blocked);
+ dlm_print_one_lock_resource(res);
+ dlm_print_one_mle(mle);
+ tries = 0;
+ }
+ goto redo_request;
+ }
+
+ mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
+ res->lockname.name, res->owner);
+ /* make sure we never continue without this */
+ BUG_ON(res->owner == O2NM_MAX_NODES);
+
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ /* put the extra ref */
+ dlm_put_mle_inuse(mle);
+
+wake_waiters:
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+leave:
+ /* need to free the unused mle */
+ if (alloc_mle)
+ kmem_cache_free(dlm_mle_cache, alloc_mle);
+
+ return res;
+}
+
+
+#define DLM_MASTERY_TIMEOUT_MS 5000
+
+static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int *blocked)
+{
+ u8 m;
+ int ret, bit;
+ int map_changed, voting_done;
+ int assert, sleep;
+
+recheck:
+ ret = 0;
+ assert = 0;
+
+ /* check if another node has already become the owner */
+ spin_lock(&res->spinlock);
+ if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
+ res->lockname.len, res->lockname.name, res->owner);
+ spin_unlock(&res->spinlock);
+ /* this will cause the master to re-assert across
+ * the whole cluster, freeing up mles */
+ if (res->owner != dlm->node_num) {
+ ret = dlm_do_master_request(res, mle, res->owner);
+ if (ret < 0) {
+ /* give recovery a chance to run */
+ mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
+ msleep(500);
+ goto recheck;
+ }
+ }
+ ret = 0;
+ goto leave;
+ }
+ spin_unlock(&res->spinlock);
+
+ spin_lock(&mle->spinlock);
+ m = mle->master;
+ map_changed = !bitmap_equal(mle->vote_map, mle->node_map,
+ O2NM_MAX_NODES);
+ voting_done = bitmap_equal(mle->vote_map, mle->response_map,
+ O2NM_MAX_NODES);
+
+ /* restart if we hit any errors */
+ if (map_changed) {
+ int b;
+ mlog(0, "%s: %.*s: node map changed, restarting\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
+ b = (mle->type == DLM_MLE_BLOCK);
+ if ((*blocked && !b) || (!*blocked && b)) {
+ mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ *blocked, b);
+ *blocked = b;
+ }
+ spin_unlock(&mle->spinlock);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto leave;
+ }
+ mlog(0, "%s:%.*s: restart lock mastery succeeded, "
+ "rechecking now\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ goto recheck;
+ } else {
+ if (!voting_done) {
+ mlog(0, "map not changed and voting not done "
+ "for %s:%.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ }
+ }
+
+ if (m != O2NM_MAX_NODES) {
+ /* another node has done an assert!
+ * all done! */
+ sleep = 0;
+ } else {
+ sleep = 1;
+ /* have all nodes responded? */
+ if (voting_done && !*blocked) {
+ bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
+ if (dlm->node_num <= bit) {
+ /* my node number is lowest.
+ * now tell other nodes that I am
+ * mastering this. */
+ mle->master = dlm->node_num;
+ /* ref was grabbed in get_lock_resource
+ * will be dropped in dlmlock_master */
+ assert = 1;
+ sleep = 0;
+ }
+ /* if voting is done, but we have not received
+ * an assert master yet, we must sleep */
+ }
+ }
+
+ spin_unlock(&mle->spinlock);
+
+ /* sleep if we haven't finished voting yet */
+ if (sleep) {
+ unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
+ atomic_set(&mle->woken, 0);
+ (void)wait_event_timeout(mle->wq,
+ (atomic_read(&mle->woken) == 1),
+ timeo);
+ if (res->owner == O2NM_MAX_NODES) {
+ mlog(0, "%s:%.*s: waiting again\n", dlm->name,
+ res->lockname.len, res->lockname.name);
+ goto recheck;
+ }
+ mlog(0, "done waiting, master is %u\n", res->owner);
+ ret = 0;
+ goto leave;
+ }
+
+ ret = 0; /* done */
+ if (assert) {
+ m = dlm->node_num;
+ mlog(0, "about to master %.*s here, this=%u\n",
+ res->lockname.len, res->lockname.name, m);
+ ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
+ if (ret) {
+ /* This is a failure in the network path,
+ * not in the response to the assert_master
+ * (any nonzero response is a BUG on this node).
+ * Most likely a socket just got disconnected
+ * due to node death. */
+ mlog_errno(ret);
+ }
+ /* no longer need to restart lock mastery.
+ * all living nodes have been contacted. */
+ ret = 0;
+ }
+
+ /* set the lockres owner */
+ spin_lock(&res->spinlock);
+ /* mastery reference obtained either during
+ * assert_master_handler or in get_lock_resource */
+ dlm_change_lockres_owner(dlm, res, m);
+ spin_unlock(&res->spinlock);
+
+leave:
+ return ret;
+}
+
+struct dlm_bitmap_diff_iter
+{
+ int curnode;
+ unsigned long *orig_bm;
+ unsigned long *cur_bm;
+ unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+};
+
+enum dlm_node_state_change
+{
+ NODE_DOWN = -1,
+ NODE_NO_CHANGE = 0,
+ NODE_UP
+};
+
+static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
+ unsigned long *orig_bm,
+ unsigned long *cur_bm)
+{
+ unsigned long p1, p2;
+ int i;
+
+ iter->curnode = -1;
+ iter->orig_bm = orig_bm;
+ iter->cur_bm = cur_bm;
+
+ for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
+ p1 = *(iter->orig_bm + i);
+ p2 = *(iter->cur_bm + i);
+ iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
+ }
+}
+
+static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
+ enum dlm_node_state_change *state)
+{
+ int bit;
+
+ if (iter->curnode >= O2NM_MAX_NODES)
+ return -ENOENT;
+
+ bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
+ iter->curnode+1);
+ if (bit >= O2NM_MAX_NODES) {
+ iter->curnode = O2NM_MAX_NODES;
+ return -ENOENT;
+ }
+
+ /* if it was there in the original then this node died */
+ if (test_bit(bit, iter->orig_bm))
+ *state = NODE_DOWN;
+ else
+ *state = NODE_UP;
+
+ iter->curnode = bit;
+ return bit;
+}
+
+
+static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int blocked)
+{
+ struct dlm_bitmap_diff_iter bdi;
+ enum dlm_node_state_change sc;
+ int node;
+ int ret = 0;
+
+ mlog(0, "something happened such that the "
+ "master process may need to be restarted!\n");
+
+ assert_spin_locked(&mle->spinlock);
+
+ dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
+ node = dlm_bitmap_diff_iter_next(&bdi, &sc);
+ while (node >= 0) {
+ if (sc == NODE_UP) {
+ /* a node came up. clear any old vote from
+ * the response map and set it in the vote map
+ * then restart the mastery. */
+ mlog(ML_NOTICE, "node %d up while restarting\n", node);
+
+ /* redo the master request, but only for the new node */
+ mlog(0, "sending request to new node\n");
+ clear_bit(node, mle->response_map);
+ set_bit(node, mle->vote_map);
+ } else {
+ mlog(ML_ERROR, "node down! %d\n", node);
+ if (blocked) {
+ int lowest = find_first_bit(mle->maybe_map,
+ O2NM_MAX_NODES);
+
+ /* act like it was never there */
+ clear_bit(node, mle->maybe_map);
+
+ if (node == lowest) {
+ mlog(0, "expected master %u died"
+ " while this node was blocked "
+ "waiting on it!\n", node);
+ lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES,
+ lowest+1);
+ if (lowest < O2NM_MAX_NODES) {
+ mlog(0, "%s:%.*s:still "
+ "blocked. waiting on %u "
+ "now\n", dlm->name,
+ res->lockname.len,
+ res->lockname.name,
+ lowest);
+ } else {
+ /* mle is an MLE_BLOCK, but
+ * there is now nothing left to
+ * block on. we need to return
+ * all the way back out and try
+ * again with an MLE_MASTER.
+ * dlm_do_local_recovery_cleanup
+ * has already run, so the mle
+ * refcount is ok */
+ mlog(0, "%s:%.*s: no "
+ "longer blocking. try to "
+ "master this here\n",
+ dlm->name,
+ res->lockname.len,
+ res->lockname.name);
+ mle->type = DLM_MLE_MASTER;
+ mle->mleres = res;
+ }
+ }
+ }
+
+ /* now blank out everything, as if we had never
+ * contacted anyone */
+ bitmap_zero(mle->maybe_map, O2NM_MAX_NODES);
+ bitmap_zero(mle->response_map, O2NM_MAX_NODES);
+ /* reset the vote_map to the current node_map */
+ bitmap_copy(mle->vote_map, mle->node_map,
+ O2NM_MAX_NODES);
+ /* put myself into the maybe map */
+ if (mle->type != DLM_MLE_BLOCK)
+ set_bit(dlm->node_num, mle->maybe_map);
+ }
+ ret = -EAGAIN;
+ node = dlm_bitmap_diff_iter_next(&bdi, &sc);
+ }
+ return ret;
+}
+
+
+/*
+ * DLM_MASTER_REQUEST_MSG
+ *
+ * returns: 0 on success,
+ * -errno on a network error
+ *
+ * on error, the caller should assume the target node is "dead"
+ *
+ */
+
+static int dlm_do_master_request(struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle, int to)
+{
+ struct dlm_ctxt *dlm = mle->dlm;
+ struct dlm_master_request request;
+ int ret, response=0, resend;
+
+ memset(&request, 0, sizeof(request));
+ request.node_idx = dlm->node_num;
+
+ BUG_ON(mle->type == DLM_MLE_MIGRATION);
+
+ request.namelen = (u8)mle->mnamelen;
+ memcpy(request.name, mle->mname, request.namelen);
+
+again:
+ ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
+ sizeof(request), to, &response);
+ if (ret < 0) {
+ if (ret == -ESRCH) {
+ /* should never happen */
+ mlog(ML_ERROR, "TCP stack not ready!\n");
+ BUG();
+ } else if (ret == -EINVAL) {
+ mlog(ML_ERROR, "bad args passed to o2net!\n");
+ BUG();
+ } else if (ret == -ENOMEM) {
+ mlog(ML_ERROR, "out of memory while trying to send "
+ "network message! retrying\n");
+ /* this is totally crude */
+ msleep(50);
+ goto again;
+ } else if (!dlm_is_host_down(ret)) {
+ /* not a network error. bad. */
+ mlog_errno(ret);
+ mlog(ML_ERROR, "unhandled error!");
+ BUG();
+ }
+ /* all other errors should be network errors,
+ * and likely indicate node death */
+ mlog(ML_ERROR, "link to %d went down!\n", to);
+ goto out;
+ }
+
+ ret = 0;
+ resend = 0;
+ spin_lock(&mle->spinlock);
+ switch (response) {
+ case DLM_MASTER_RESP_YES:
+ set_bit(to, mle->response_map);
+ mlog(0, "node %u is the master, response=YES\n", to);
+ mlog(0, "%s:%.*s: master node %u now knows I have a "
+ "reference\n", dlm->name, res->lockname.len,
+ res->lockname.name, to);
+ mle->master = to;
+ break;
+ case DLM_MASTER_RESP_NO:
+ mlog(0, "node %u not master, response=NO\n", to);
+ set_bit(to, mle->response_map);
+ break;
+ case DLM_MASTER_RESP_MAYBE:
+ mlog(0, "node %u not master, response=MAYBE\n", to);
+ set_bit(to, mle->response_map);
+ set_bit(to, mle->maybe_map);
+ break;
+ case DLM_MASTER_RESP_ERROR:
+ mlog(0, "node %u hit an error, resending\n", to);
+ resend = 1;
+ response = 0;
+ break;
+ default:
+ mlog(ML_ERROR, "bad response! %u\n", response);
+ BUG();
+ }
+ spin_unlock(&mle->spinlock);
+ if (resend) {
+ /* this is also totally crude */
+ msleep(50);
+ goto again;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * locks that can be taken here:
+ * dlm->spinlock
+ * res->spinlock
+ * mle->spinlock
+ * dlm->master_list
+ *
+ * if possible, TRIM THIS DOWN!!!
+ */
+int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ u8 response = DLM_MASTER_RESP_MAYBE;
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
+ struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
+ char *name;
+ unsigned int namelen, hash;
+ int found, ret;
+ int set_maybe;
+ int dispatch_assert = 0;
+ int dispatched = 0;
+
+ if (!dlm_grab(dlm))
+ return DLM_MASTER_RESP_NO;
+
+ if (!dlm_domain_fully_joined(dlm)) {
+ response = DLM_MASTER_RESP_NO;
+ goto send_response;
+ }
+
+ name = request->name;
+ namelen = request->namelen;
+ hash = dlm_lockid_hash(name, namelen);
+
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ response = DLM_IVBUFLEN;
+ goto send_response;
+ }
+
+way_up_top:
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+ if (res) {
+ spin_unlock(&dlm->spinlock);
+
+ /* take care of the easy cases up front */
+ spin_lock(&res->spinlock);
+
+ /*
+ * Right after dlm spinlock was released, dlm_thread could have
+ * purged the lockres. Check if lockres got unhashed. If so
+ * start over.
+ */
+ if (hlist_unhashed(&res->hash_node)) {
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ goto way_up_top;
+ }
+
+ if (res->state & (DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_MIGRATING)) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
+ "being recovered/migrated\n");
+ response = DLM_MASTER_RESP_ERROR;
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+ goto send_response;
+ }
+
+ if (res->owner == dlm->node_num) {
+ dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
+ spin_unlock(&res->spinlock);
+ response = DLM_MASTER_RESP_YES;
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+
+ /* this node is the owner.
+ * there is some extra work that needs to
+ * happen now. the requesting node has
+ * caused all nodes up to this one to
+ * create mles. this node now needs to
+ * go back and clean those up. */
+ dispatch_assert = 1;
+ goto send_response;
+ } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ spin_unlock(&res->spinlock);
+ // mlog(0, "node %u is the master\n", res->owner);
+ response = DLM_MASTER_RESP_NO;
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+ goto send_response;
+ }
+
+ /* ok, there is no owner. either this node is
+ * being blocked, or it is actively trying to
+ * master this lock. */
+ if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
+ mlog(ML_ERROR, "lock with no owner should be "
+ "in-progress!\n");
+ BUG();
+ }
+
+ // mlog(0, "lockres is in progress...\n");
+ spin_lock(&dlm->master_lock);
+ found = dlm_find_mle(dlm, &tmpmle, name, namelen);
+ if (!found) {
+ mlog(ML_ERROR, "no mle found for this lock!\n");
+ BUG();
+ }
+ set_maybe = 1;
+ spin_lock(&tmpmle->spinlock);
+ if (tmpmle->type == DLM_MLE_BLOCK) {
+ // mlog(0, "this node is waiting for "
+ // "lockres to be mastered\n");
+ response = DLM_MASTER_RESP_NO;
+ } else if (tmpmle->type == DLM_MLE_MIGRATION) {
+ mlog(0, "node %u is master, but trying to migrate to "
+ "node %u.\n", tmpmle->master, tmpmle->new_master);
+ if (tmpmle->master == dlm->node_num) {
+ mlog(ML_ERROR, "no owner on lockres, but this "
+ "node is trying to migrate it to %u?!\n",
+ tmpmle->new_master);
+ BUG();
+ } else {
+ /* the real master can respond on its own */
+ response = DLM_MASTER_RESP_NO;
+ }
+ } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ set_maybe = 0;
+ if (tmpmle->master == dlm->node_num) {
+ response = DLM_MASTER_RESP_YES;
+ /* this node will be the owner.
+ * go back and clean the mles on any
+ * other nodes */
+ dispatch_assert = 1;
+ dlm_lockres_set_refmap_bit(dlm, res,
+ request->node_idx);
+ } else
+ response = DLM_MASTER_RESP_NO;
+ } else {
+ // mlog(0, "this node is attempting to "
+ // "master lockres\n");
+ response = DLM_MASTER_RESP_MAYBE;
+ }
+ if (set_maybe)
+ set_bit(request->node_idx, tmpmle->maybe_map);
+ spin_unlock(&tmpmle->spinlock);
+
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&res->spinlock);
+
+ /* keep the mle attached to heartbeat events */
+ dlm_put_mle(tmpmle);
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+ goto send_response;
+ }
+
+ /*
+ * lockres doesn't exist on this node
+ * if there is an MLE_BLOCK, return NO
+ * if there is an MLE_MASTER, return MAYBE
+ * otherwise, add an MLE_BLOCK, return NO
+ */
+ spin_lock(&dlm->master_lock);
+ found = dlm_find_mle(dlm, &tmpmle, name, namelen);
+ if (!found) {
+ /* this lockid has never been seen on this node yet */
+ // mlog(0, "no mle found\n");
+ if (!mle) {
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
+ if (!mle) {
+ response = DLM_MASTER_RESP_ERROR;
+ mlog_errno(-ENOMEM);
+ goto send_response;
+ }
+ goto way_up_top;
+ }
+
+ // mlog(0, "this is second time thru, already allocated, "
+ // "add the block.\n");
+ dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
+ set_bit(request->node_idx, mle->maybe_map);
+ __dlm_insert_mle(dlm, mle);
+ response = DLM_MASTER_RESP_NO;
+ } else {
+ spin_lock(&tmpmle->spinlock);
+ if (tmpmle->master == dlm->node_num) {
+ mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
+ BUG();
+ }
+ if (tmpmle->type == DLM_MLE_BLOCK)
+ response = DLM_MASTER_RESP_NO;
+ else if (tmpmle->type == DLM_MLE_MIGRATION) {
+ mlog(0, "migration mle was found (%u->%u)\n",
+ tmpmle->master, tmpmle->new_master);
+ /* real master can respond on its own */
+ response = DLM_MASTER_RESP_NO;
+ } else
+ response = DLM_MASTER_RESP_MAYBE;
+ set_bit(request->node_idx, tmpmle->maybe_map);
+ spin_unlock(&tmpmle->spinlock);
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ if (found) {
+ /* keep the mle attached to heartbeat events */
+ dlm_put_mle(tmpmle);
+ }
+send_response:
+ /*
+ * __dlm_lookup_lockres() grabbed a reference to this lockres.
+ * The reference is released by dlm_assert_master_worker() under
+ * the call to dlm_dispatch_assert_master(). If
+ * dlm_assert_master_worker() isn't called, we drop it here.
+ */
+ if (dispatch_assert) {
+ mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
+ dlm->node_num, res->lockname.len, res->lockname.name);
+ spin_lock(&res->spinlock);
+ ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
+ DLM_ASSERT_MASTER_MLE_CLEANUP);
+ if (ret < 0) {
+ mlog(ML_ERROR, "failed to dispatch assert master work\n");
+ response = DLM_MASTER_RESP_ERROR;
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ } else {
+ dispatched = 1;
+ __dlm_lockres_grab_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
+ }
+ } else {
+ if (res)
+ dlm_lockres_put(res);
+ }
+
+ if (!dispatched)
+ dlm_put(dlm);
+ return response;
+}
+
+/*
+ * DLM_ASSERT_MASTER_MSG
+ */
+
+
+/*
+ * NOTE: this can be used for debugging
+ * can periodically run all locks owned by this node
+ * and re-assert across the cluster...
+ */
+static int dlm_do_assert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ void *nodemap, u32 flags)
+{
+ struct dlm_assert_master assert;
+ int to, tmpret;
+ struct dlm_node_iter iter;
+ int ret = 0;
+ int reassert;
+ const char *lockname = res->lockname.name;
+ unsigned int namelen = res->lockname.len;
+
+ BUG_ON(namelen > O2NM_MAX_NAME_LEN);
+
+ spin_lock(&res->spinlock);
+ res->state |= DLM_LOCK_RES_SETREF_INPROG;
+ spin_unlock(&res->spinlock);
+
+again:
+ reassert = 0;
+
+ /* note that if this nodemap is empty, it returns 0 */
+ dlm_node_iter_init(nodemap, &iter);
+ while ((to = dlm_node_iter_next(&iter)) >= 0) {
+ int r = 0;
+ struct dlm_master_list_entry *mle = NULL;
+
+ mlog(0, "sending assert master to %d (%.*s)\n", to,
+ namelen, lockname);
+ memset(&assert, 0, sizeof(assert));
+ assert.node_idx = dlm->node_num;
+ assert.namelen = namelen;
+ memcpy(assert.name, lockname, namelen);
+ assert.flags = cpu_to_be32(flags);
+
+ tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
+ &assert, sizeof(assert), to, &r);
+ if (tmpret < 0) {
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", tmpret,
+ DLM_ASSERT_MASTER_MSG, dlm->key, to);
+ if (!dlm_is_host_down(tmpret)) {
+ mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
+ BUG();
+ }
+ /* a node died. finish out the rest of the nodes. */
+ mlog(0, "link to %d went down!\n", to);
+ /* any nonzero status return will do */
+ ret = tmpret;
+ r = 0;
+ } else if (r < 0) {
+ /* ok, something horribly messed. kill thyself. */
+ mlog(ML_ERROR,"during assert master of %.*s to %u, "
+ "got %d.\n", namelen, lockname, to, r);
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ if (dlm_find_mle(dlm, &mle, (char *)lockname,
+ namelen)) {
+ dlm_print_one_mle(mle);
+ __dlm_put_mle(mle);
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+ BUG();
+ }
+
+ if (r & DLM_ASSERT_RESPONSE_REASSERT &&
+ !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
+ mlog(ML_ERROR, "%.*s: very strange, "
+ "master MLE but no lockres on %u\n",
+ namelen, lockname, to);
+ }
+
+ if (r & DLM_ASSERT_RESPONSE_REASSERT) {
+ mlog(0, "%.*s: node %u create mles on other "
+ "nodes and requests a re-assert\n",
+ namelen, lockname, to);
+ reassert = 1;
+ }
+ if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
+ mlog(0, "%.*s: node %u has a reference to this "
+ "lockres, set the bit in the refmap\n",
+ namelen, lockname, to);
+ spin_lock(&res->spinlock);
+ dlm_lockres_set_refmap_bit(dlm, res, to);
+ spin_unlock(&res->spinlock);
+ }
+ }
+
+ if (reassert)
+ goto again;
+
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ return ret;
+}
+
+/*
+ * locks that can be taken here:
+ * dlm->spinlock
+ * res->spinlock
+ * mle->spinlock
+ * dlm->master_list
+ *
+ * if possible, TRIM THIS DOWN!!!
+ */
+int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_master_list_entry *mle = NULL;
+ struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ char *name;
+ unsigned int namelen, hash;
+ u32 flags;
+ int master_request = 0, have_lockres_ref = 0;
+ int ret = 0;
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ name = assert->name;
+ namelen = assert->namelen;
+ hash = dlm_lockid_hash(name, namelen);
+ flags = be32_to_cpu(assert->flags);
+
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ mlog(ML_ERROR, "Invalid name length!");
+ goto done;
+ }
+
+ spin_lock(&dlm->spinlock);
+
+ if (flags)
+ mlog(0, "assert_master with flags: %u\n", flags);
+
+ /* find the MLE */
+ spin_lock(&dlm->master_lock);
+ if (!dlm_find_mle(dlm, &mle, name, namelen)) {
+ /* not an error, could be master just re-asserting */
+ mlog(0, "just got an assert_master from %u, but no "
+ "MLE for it! (%.*s)\n", assert->node_idx,
+ namelen, name);
+ } else {
+ int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
+ if (bit >= O2NM_MAX_NODES) {
+ /* not necessarily an error, though less likely.
+ * could be master just re-asserting. */
+ mlog(0, "no bits set in the maybe_map, but %u "
+ "is asserting! (%.*s)\n", assert->node_idx,
+ namelen, name);
+ } else if (bit != assert->node_idx) {
+ if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
+ mlog(0, "master %u was found, %u should "
+ "back off\n", assert->node_idx, bit);
+ } else {
+ /* with the fix for bug 569, a higher node
+ * number winning the mastery will respond
+ * YES to mastery requests, but this node
+ * had no way of knowing. let it pass. */
+ mlog(0, "%u is the lowest node, "
+ "%u is asserting. (%.*s) %u must "
+ "have begun after %u won.\n", bit,
+ assert->node_idx, namelen, name, bit,
+ assert->node_idx);
+ }
+ }
+ if (mle->type == DLM_MLE_MIGRATION) {
+ if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
+ mlog(0, "%s:%.*s: got cleanup assert"
+ " from %u for migration\n",
+ dlm->name, namelen, name,
+ assert->node_idx);
+ } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
+ mlog(0, "%s:%.*s: got unrelated assert"
+ " from %u for migration, ignoring\n",
+ dlm->name, namelen, name,
+ assert->node_idx);
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+ goto done;
+ }
+ }
+ }
+ spin_unlock(&dlm->master_lock);
+
+ /* ok everything checks out with the MLE
+ * now check to see if there is a lockres */
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ mlog(ML_ERROR, "%u asserting but %.*s is "
+ "RECOVERING!\n", assert->node_idx, namelen, name);
+ goto kill;
+ }
+ if (!mle) {
+ if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
+ res->owner != assert->node_idx) {
+ mlog(ML_ERROR, "DIE! Mastery assert from %u, "
+ "but current owner is %u! (%.*s)\n",
+ assert->node_idx, res->owner, namelen,
+ name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+ } else if (mle->type != DLM_MLE_MIGRATION) {
+ if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* owner is just re-asserting */
+ if (res->owner == assert->node_idx) {
+ mlog(0, "owner %u re-asserting on "
+ "lock %.*s\n", assert->node_idx,
+ namelen, name);
+ goto ok;
+ }
+ mlog(ML_ERROR, "got assert_master from "
+ "node %u, but %u is the owner! "
+ "(%.*s)\n", assert->node_idx,
+ res->owner, namelen, name);
+ goto kill;
+ }
+ if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
+ mlog(ML_ERROR, "got assert from %u, but lock "
+ "with no owner should be "
+ "in-progress! (%.*s)\n",
+ assert->node_idx,
+ namelen, name);
+ goto kill;
+ }
+ } else /* mle->type == DLM_MLE_MIGRATION */ {
+ /* should only be getting an assert from new master */
+ if (assert->node_idx != mle->new_master) {
+ mlog(ML_ERROR, "got assert from %u, but "
+ "new master is %u, and old master "
+ "was %u (%.*s)\n",
+ assert->node_idx, mle->new_master,
+ mle->master, namelen, name);
+ goto kill;
+ }
+
+ }
+ok:
+ spin_unlock(&res->spinlock);
+ }
+
+ // mlog(0, "woo! got an assert_master from node %u!\n",
+ // assert->node_idx);
+ if (mle) {
+ int extra_ref = 0;
+ int nn = -1;
+ int rr, err = 0;
+
+ spin_lock(&mle->spinlock);
+ if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
+ extra_ref = 1;
+ else {
+ /* MASTER mle: if any bits set in the response map
+ * then the calling node needs to re-assert to clear
+ * up nodes that this node contacted */
+ while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
+ nn+1)) < O2NM_MAX_NODES) {
+ if (nn != dlm->node_num && nn != assert->node_idx) {
+ master_request = 1;
+ break;
+ }
+ }
+ }
+ mle->master = assert->node_idx;
+ atomic_set(&mle->woken, 1);
+ wake_up(&mle->wq);
+ spin_unlock(&mle->spinlock);
+
+ if (res) {
+ int wake = 0;
+ spin_lock(&res->spinlock);
+ if (mle->type == DLM_MLE_MIGRATION) {
+ mlog(0, "finishing off migration of lockres %.*s, "
+ "from %u to %u\n",
+ res->lockname.len, res->lockname.name,
+ dlm->node_num, mle->new_master);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ wake = 1;
+ dlm_change_lockres_owner(dlm, res, mle->new_master);
+ BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
+ } else {
+ dlm_change_lockres_owner(dlm, res, mle->master);
+ }
+ spin_unlock(&res->spinlock);
+ have_lockres_ref = 1;
+ if (wake)
+ wake_up(&res->wq);
+ }
+
+ /* master is known, detach if not already detached.
+ * ensures that only one assert_master call will happen
+ * on this mle. */
+ spin_lock(&dlm->master_lock);
+
+ rr = kref_read(&mle->mle_refs);
+ if (mle->inuse > 0) {
+ if (extra_ref && rr < 3)
+ err = 1;
+ else if (!extra_ref && rr < 2)
+ err = 1;
+ } else {
+ if (extra_ref && rr < 2)
+ err = 1;
+ else if (!extra_ref && rr < 1)
+ err = 1;
+ }
+ if (err) {
+ mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
+ "that will mess up this node, refs=%d, extra=%d, "
+ "inuse=%d\n", dlm->name, namelen, name,
+ assert->node_idx, rr, extra_ref, mle->inuse);
+ dlm_print_one_mle(mle);
+ }
+ __dlm_unlink_mle(dlm, mle);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ __dlm_put_mle(mle);
+ if (extra_ref) {
+ /* the assert master message now balances the extra
+ * ref given by the master / migration request message.
+ * if this is the last put, it will be removed
+ * from the list. */
+ __dlm_put_mle(mle);
+ }
+ spin_unlock(&dlm->master_lock);
+ } else if (res) {
+ if (res->owner != assert->node_idx) {
+ mlog(0, "assert_master from %u, but current "
+ "owner is %u (%.*s), no mle\n", assert->node_idx,
+ res->owner, namelen, name);
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+
+done:
+ ret = 0;
+ if (res) {
+ spin_lock(&res->spinlock);
+ res->state |= DLM_LOCK_RES_SETREF_INPROG;
+ spin_unlock(&res->spinlock);
+ *ret_data = (void *)res;
+ }
+ dlm_put(dlm);
+ if (master_request) {
+ mlog(0, "need to tell master to reassert\n");
+ /* positive. negative would shoot down the node. */
+ ret |= DLM_ASSERT_RESPONSE_REASSERT;
+ if (!have_lockres_ref) {
+ mlog(ML_ERROR, "strange, got assert from %u, MASTER "
+ "mle present here for %s:%.*s, but no lockres!\n",
+ assert->node_idx, dlm->name, namelen, name);
+ }
+ }
+ if (have_lockres_ref) {
+ /* let the master know we have a reference to the lockres */
+ ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
+ mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
+ dlm->name, namelen, name, assert->node_idx);
+ }
+ return ret;
+
+kill:
+ /* kill the caller! */
+ mlog(ML_ERROR, "Bad message received from another node. Dumping state "
+ "and killing the other node now! This node is OK and can continue.\n");
+ __dlm_print_one_lock_resource(res);
+ spin_unlock(&res->spinlock);
+ spin_lock(&dlm->master_lock);
+ if (mle)
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+ *ret_data = (void *)res;
+ dlm_put(dlm);
+ return -EINVAL;
+}
+
+void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
+{
+ struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
+
+ if (ret_data) {
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ }
+ return;
+}
+
+int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ int ignore_higher, u8 request_from, u32 flags)
+{
+ struct dlm_work_item *item;
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return -ENOMEM;
+
+
+ /* queue up work for dlm_assert_master_worker */
+ dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
+ item->u.am.lockres = res; /* already have a ref */
+ /* can optionally ignore node numbers higher than this node */
+ item->u.am.ignore_higher = ignore_higher;
+ item->u.am.request_from = request_from;
+ item->u.am.flags = flags;
+
+ if (ignore_higher)
+ mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
+ res->lockname.name);
+
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
+ return 0;
+}
+
+static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ int ret = 0;
+ struct dlm_lock_resource *res;
+ unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int ignore_higher;
+ int bit;
+ u8 request_from;
+ u32 flags;
+
+ dlm = item->dlm;
+ res = item->u.am.lockres;
+ ignore_higher = item->u.am.ignore_higher;
+ request_from = item->u.am.request_from;
+ flags = item->u.am.flags;
+
+ spin_lock(&dlm->spinlock);
+ bitmap_copy(nodemap, dlm->domain_map, O2NM_MAX_NODES);
+ spin_unlock(&dlm->spinlock);
+
+ clear_bit(dlm->node_num, nodemap);
+ if (ignore_higher) {
+ /* if is this just to clear up mles for nodes below
+ * this node, do not send the message to the original
+ * caller or any node number higher than this */
+ clear_bit(request_from, nodemap);
+ bit = dlm->node_num;
+ while (1) {
+ bit = find_next_bit(nodemap, O2NM_MAX_NODES,
+ bit+1);
+ if (bit >= O2NM_MAX_NODES)
+ break;
+ clear_bit(bit, nodemap);
+ }
+ }
+
+ /*
+ * If we're migrating this lock to someone else, we are no
+ * longer allowed to assert out own mastery. OTOH, we need to
+ * prevent migration from starting while we're still asserting
+ * our dominance. The reserved ast delays migration.
+ */
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ mlog(0, "Someone asked us to assert mastery, but we're "
+ "in the middle of migration. Skipping assert, "
+ "the new master will handle that.\n");
+ spin_unlock(&res->spinlock);
+ goto put;
+ } else
+ __dlm_lockres_reserve_ast(res);
+ spin_unlock(&res->spinlock);
+
+ /* this call now finishes out the nodemap
+ * even if one or more nodes die */
+ mlog(0, "worker about to master %.*s here, this=%u\n",
+ res->lockname.len, res->lockname.name, dlm->node_num);
+ ret = dlm_do_assert_master(dlm, res, nodemap, flags);
+ if (ret < 0) {
+ /* no need to restart, we are done */
+ if (!dlm_is_host_down(ret))
+ mlog_errno(ret);
+ }
+
+ /* Ok, we've asserted ourselves. Let's let migration start. */
+ dlm_lockres_release_ast(dlm, res);
+
+put:
+ dlm_lockres_drop_inflight_worker(dlm, res);
+
+ dlm_lockres_put(res);
+
+ mlog(0, "finished with dlm_assert_master_worker\n");
+}
+
+/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
+ * We cannot wait for node recovery to complete to begin mastering this
+ * lockres because this lockres is used to kick off recovery! ;-)
+ * So, do a pre-check on all living nodes to see if any of those nodes
+ * think that $RECOVERY is currently mastered by a dead node. If so,
+ * we wait a short time to allow that node to get notified by its own
+ * heartbeat stack, then check again. All $RECOVERY lock resources
+ * mastered by dead nodes are purged when the heartbeat callback is
+ * fired, so we can know for sure that it is safe to continue once
+ * the node returns a live node or no node. */
+static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ struct dlm_node_iter iter;
+ int nodenum;
+ int ret = 0;
+ u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ /* do not send to self */
+ if (nodenum == dlm->node_num)
+ continue;
+ ret = dlm_do_master_requery(dlm, res, nodenum, &master);
+ if (ret < 0) {
+ mlog_errno(ret);
+ if (!dlm_is_host_down(ret))
+ BUG();
+ /* host is down, so answer for that node would be
+ * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
+ ret = 0;
+ }
+
+ if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* check to see if this master is in the recovery map */
+ spin_lock(&dlm->spinlock);
+ if (test_bit(master, dlm->recovery_map)) {
+ mlog(ML_NOTICE, "%s: node %u has not seen "
+ "node %u go down yet, and thinks the "
+ "dead node is mastering the recovery "
+ "lock. must wait.\n", dlm->name,
+ nodenum, master);
+ ret = -EAGAIN;
+ }
+ spin_unlock(&dlm->spinlock);
+ mlog(0, "%s: reco lock master is %u\n", dlm->name,
+ master);
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * DLM_DEREF_LOCKRES_MSG
+ */
+
+int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ struct dlm_deref_lockres deref;
+ int ret = 0, r;
+ const char *lockname;
+ unsigned int namelen;
+
+ lockname = res->lockname.name;
+ namelen = res->lockname.len;
+ BUG_ON(namelen > O2NM_MAX_NAME_LEN);
+
+ memset(&deref, 0, sizeof(deref));
+ deref.node_idx = dlm->node_num;
+ deref.namelen = namelen;
+ memcpy(deref.name, lockname, namelen);
+
+ ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
+ &deref, sizeof(deref), res->owner, &r);
+ if (ret < 0)
+ mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
+ dlm->name, namelen, lockname, ret, res->owner);
+ else if (r < 0) {
+ /* BAD. other node says I did not have a ref. */
+ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+ dlm->name, namelen, lockname, res->owner, r);
+ dlm_print_one_lock_resource(res);
+ if (r == -ENOMEM)
+ BUG();
+ } else
+ ret = r;
+
+ return ret;
+}
+
+int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ char *name;
+ unsigned int namelen;
+ int ret = -EINVAL;
+ u8 node;
+ unsigned int hash;
+ struct dlm_work_item *item;
+ int cleared = 0;
+ int dispatch = 0;
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ name = deref->name;
+ namelen = deref->namelen;
+ node = deref->node_idx;
+
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ mlog(ML_ERROR, "Invalid name length!");
+ goto done;
+ }
+ if (deref->node_idx >= O2NM_MAX_NODES) {
+ mlog(ML_ERROR, "Invalid node number: %u\n", node);
+ goto done;
+ }
+
+ hash = dlm_lockid_hash(name, namelen);
+
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
+ if (!res) {
+ spin_unlock(&dlm->spinlock);
+ mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
+ dlm->name, namelen, name);
+ goto done;
+ }
+ spin_unlock(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_SETREF_INPROG)
+ dispatch = 1;
+ else {
+ BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
+ if (test_bit(node, res->refmap)) {
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
+ cleared = 1;
+ }
+ }
+ spin_unlock(&res->spinlock);
+
+ if (!dispatch) {
+ if (cleared)
+ dlm_lockres_calc_usage(dlm, res);
+ else {
+ mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
+ "but it is already dropped!\n", dlm->name,
+ res->lockname.len, res->lockname.name, node);
+ dlm_print_one_lock_resource(res);
+ }
+ ret = DLM_DEREF_RESPONSE_DONE;
+ goto done;
+ }
+
+ item = kzalloc(sizeof(*item), GFP_NOFS);
+ if (!item) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto done;
+ }
+
+ dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
+ item->u.dl.deref_res = res;
+ item->u.dl.deref_node = node;
+
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
+ return DLM_DEREF_RESPONSE_INPROG;
+
+done:
+ if (res)
+ dlm_lockres_put(res);
+ dlm_put(dlm);
+
+ return ret;
+}
+
+int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_deref_lockres_done *deref
+ = (struct dlm_deref_lockres_done *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ char *name;
+ unsigned int namelen;
+ int ret = -EINVAL;
+ u8 node;
+ unsigned int hash;
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ name = deref->name;
+ namelen = deref->namelen;
+ node = deref->node_idx;
+
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ mlog(ML_ERROR, "Invalid name length!");
+ goto done;
+ }
+ if (deref->node_idx >= O2NM_MAX_NODES) {
+ mlog(ML_ERROR, "Invalid node number: %u\n", node);
+ goto done;
+ }
+
+ hash = dlm_lockid_hash(name, namelen);
+
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
+ if (!res) {
+ spin_unlock(&dlm->spinlock);
+ mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
+ dlm->name, namelen, name);
+ goto done;
+ }
+
+ spin_lock(&res->spinlock);
+ if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
+ "but it is already derefed!\n", dlm->name,
+ res->lockname.len, res->lockname.name, node);
+ ret = 0;
+ goto done;
+ }
+
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ spin_unlock(&dlm->spinlock);
+
+ ret = 0;
+done:
+ if (res)
+ dlm_lockres_put(res);
+ dlm_put(dlm);
+ return ret;
+}
+
+static void dlm_drop_lockres_ref_done(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 node)
+{
+ struct dlm_deref_lockres_done deref;
+ int ret = 0, r;
+ const char *lockname;
+ unsigned int namelen;
+
+ lockname = res->lockname.name;
+ namelen = res->lockname.len;
+ BUG_ON(namelen > O2NM_MAX_NAME_LEN);
+
+ memset(&deref, 0, sizeof(deref));
+ deref.node_idx = dlm->node_num;
+ deref.namelen = namelen;
+ memcpy(deref.name, lockname, namelen);
+
+ ret = o2net_send_message(DLM_DEREF_LOCKRES_DONE, dlm->key,
+ &deref, sizeof(deref), node, &r);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
+ " to node %u\n", dlm->name, namelen,
+ lockname, ret, node);
+ } else if (r < 0) {
+ /* ignore the error */
+ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
+ dlm->name, namelen, lockname, node, r);
+ dlm_print_one_lock_resource(res);
+ }
+}
+
+static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_ctxt *dlm;
+ struct dlm_lock_resource *res;
+ u8 node;
+ u8 cleared = 0;
+
+ dlm = item->dlm;
+ res = item->u.dl.deref_res;
+ node = item->u.dl.deref_node;
+
+ spin_lock(&res->spinlock);
+ BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
+ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
+ if (test_bit(node, res->refmap)) {
+ dlm_lockres_clear_refmap_bit(dlm, res, node);
+ cleared = 1;
+ }
+ spin_unlock(&res->spinlock);
+
+ dlm_drop_lockres_ref_done(dlm, res, node);
+
+ if (cleared) {
+ mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
+ dlm->name, res->lockname.len, res->lockname.name, node);
+ dlm_lockres_calc_usage(dlm, res);
+ } else {
+ mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
+ "but it is already dropped!\n", dlm->name,
+ res->lockname.len, res->lockname.name, node);
+ dlm_print_one_lock_resource(res);
+ }
+
+ dlm_lockres_put(res);
+}
+
+/*
+ * A migratable resource is one that is :
+ * 1. locally mastered, and,
+ * 2. zero local locks, and,
+ * 3. one or more non-local locks, or, one or more references
+ * Returns 1 if yes, 0 if not.
+ */
+static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ enum dlm_lockres_list idx;
+ int nonlocal = 0, node_ref;
+ struct list_head *queue;
+ struct dlm_lock *lock;
+ u64 cookie;
+
+ assert_spin_locked(&res->spinlock);
+
+ /* delay migration when the lockres is in MIGRATING state */
+ if (res->state & DLM_LOCK_RES_MIGRATING)
+ return 0;
+
+ /* delay migration when the lockres is in RECOCERING state */
+ if (res->state & (DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_RECOVERY_WAITING))
+ return 0;
+
+ if (res->owner != dlm->node_num)
+ return 0;
+
+ for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
+ queue = dlm_list_idx_to_ptr(res, idx);
+ list_for_each_entry(lock, queue, list) {
+ if (lock->ml.node != dlm->node_num) {
+ nonlocal++;
+ continue;
+ }
+ cookie = be64_to_cpu(lock->ml.cookie);
+ mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on "
+ "%s list\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(cookie),
+ dlm_get_lock_cookie_seq(cookie),
+ dlm_list_in_text(idx));
+ return 0;
+ }
+ }
+
+ if (!nonlocal) {
+ node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES);
+ if (node_ref >= O2NM_MAX_NODES)
+ return 0;
+ }
+
+ mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+
+ return 1;
+}
+
+/*
+ * DLM_MIGRATE_LOCKRES
+ */
+
+
+static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 target)
+{
+ struct dlm_master_list_entry *mle = NULL;
+ struct dlm_master_list_entry *oldmle = NULL;
+ struct dlm_migratable_lockres *mres = NULL;
+ int ret = 0;
+ const char *name;
+ unsigned int namelen;
+ int mle_added = 0;
+ int wake = 0;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ name = res->lockname.name;
+ namelen = res->lockname.len;
+
+ mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
+ target);
+
+ /* preallocate up front. if this fails, abort */
+ ret = -ENOMEM;
+ mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
+ if (!mres) {
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
+ if (!mle) {
+ mlog_errno(ret);
+ goto leave;
+ }
+ ret = 0;
+
+ /*
+ * clear any existing master requests and
+ * add the migration mle to the list
+ */
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
+ namelen, target, dlm->node_num);
+ /* get an extra reference on the mle.
+ * otherwise the assert_master from the new
+ * master will destroy this.
+ */
+ if (ret != -EEXIST)
+ dlm_get_mle_inuse(mle);
+
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ if (ret == -EEXIST) {
+ mlog(0, "another process is already migrating it\n");
+ goto fail;
+ }
+ mle_added = 1;
+
+ /*
+ * set the MIGRATING flag and flush asts
+ * if we fail after this we need to re-dirty the lockres
+ */
+ if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
+ mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
+ "the target went down.\n", res->lockname.len,
+ res->lockname.name, target);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ wake = 1;
+ spin_unlock(&res->spinlock);
+ ret = -EINVAL;
+ }
+
+fail:
+ if (ret != -EEXIST && oldmle) {
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, oldmle);
+ dlm_put_mle(oldmle);
+ }
+
+ if (ret < 0) {
+ if (mle_added) {
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
+ } else if (mle) {
+ kmem_cache_free(dlm_mle_cache, mle);
+ mle = NULL;
+ }
+ goto leave;
+ }
+
+ /*
+ * at this point, we have a migration target, an mle
+ * in the master list, and the MIGRATING flag set on
+ * the lockres
+ */
+
+ /* now that remote nodes are spinning on the MIGRATING flag,
+ * ensure that all assert_master work is flushed. */
+ flush_workqueue(dlm->dlm_worker);
+
+ /* notify new node and send all lock state */
+ /* call send_one_lockres with migration flag.
+ * this serves as notice to the target node that a
+ * migration is starting. */
+ ret = dlm_send_one_lockres(dlm, res, mres, target,
+ DLM_MRES_MIGRATION);
+
+ if (ret < 0) {
+ mlog(0, "migration to node %u failed with %d\n",
+ target, ret);
+ /* migration failed, detach and clean up mle */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ wake = 1;
+ spin_unlock(&res->spinlock);
+ if (dlm_is_host_down(ret))
+ dlm_wait_for_node_death(dlm, target,
+ DLM_NODE_DEATH_WAIT_MAX);
+ goto leave;
+ }
+
+ /* at this point, the target sends a message to all nodes,
+ * (using dlm_do_migrate_request). this node is skipped since
+ * we had to put an mle in the list to begin the process. this
+ * node now waits for target to do an assert master. this node
+ * will be the last one notified, ensuring that the migration
+ * is complete everywhere. if the target dies while this is
+ * going on, some nodes could potentially see the target as the
+ * master, so it is important that my recovery finds the migration
+ * mle and sets the master to UNKNOWN. */
+
+
+ /* wait for new node to assert master */
+ while (1) {
+ ret = wait_event_interruptible_timeout(mle->wq,
+ (atomic_read(&mle->woken) == 1),
+ msecs_to_jiffies(5000));
+
+ if (ret >= 0) {
+ if (atomic_read(&mle->woken) == 1 ||
+ res->owner == target)
+ break;
+
+ mlog(0, "%s:%.*s: timed out during migration\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ /* avoid hang during shutdown when migrating lockres
+ * to a node which also goes down */
+ if (dlm_is_node_dead(dlm, target)) {
+ mlog(0, "%s:%.*s: expected migration "
+ "target %u is no longer up, restarting\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, target);
+ ret = -EINVAL;
+ /* migration failed, detach and clean up mle */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ wake = 1;
+ spin_unlock(&res->spinlock);
+ goto leave;
+ }
+ } else
+ mlog(0, "%s:%.*s: caught signal during migration\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ }
+
+ /* all done, set the owner, clear the flag */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, target);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ dlm_remove_nonlocal_locks(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle_inuse(mle);
+ ret = 0;
+
+ dlm_lockres_calc_usage(dlm, res);
+
+leave:
+ /* re-dirty the lockres if we failed */
+ if (ret < 0)
+ dlm_kick_thread(dlm, res);
+
+ /* wake up waiters if the MIGRATING flag got set
+ * but migration failed */
+ if (wake)
+ wake_up(&res->wq);
+
+ if (mres)
+ free_page((unsigned long)mres);
+
+ dlm_put(dlm);
+
+ mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
+ name, target, ret);
+ return ret;
+}
+
+/*
+ * Should be called only after beginning the domain leave process.
+ * There should not be any remaining locks on nonlocal lock resources,
+ * and there should be no local locks left on locally mastered resources.
+ *
+ * Called with the dlm spinlock held, may drop it to do migration, but
+ * will re-acquire before exit.
+ *
+ * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
+ */
+int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+ __must_hold(&dlm->spinlock)
+{
+ int ret;
+ int lock_dropped = 0;
+ u8 target = O2NM_MAX_NODES;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+ if (dlm_is_lockres_migratable(dlm, res))
+ target = dlm_pick_migration_target(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ if (target == O2NM_MAX_NODES)
+ goto leave;
+
+ /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
+ spin_unlock(&dlm->spinlock);
+ lock_dropped = 1;
+ ret = dlm_migrate_lockres(dlm, res, target);
+ if (ret)
+ mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ target, ret);
+ spin_lock(&dlm->spinlock);
+leave:
+ return lock_dropped;
+}
+
+int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ int ret;
+ spin_lock(&dlm->ast_lock);
+ spin_lock(&lock->spinlock);
+ ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
+ spin_unlock(&lock->spinlock);
+ spin_unlock(&dlm->ast_lock);
+ return ret;
+}
+
+static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 mig_target)
+{
+ int can_proceed;
+ spin_lock(&res->spinlock);
+ can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
+ spin_unlock(&res->spinlock);
+
+ /* target has died, so make the caller break out of the
+ * wait_event, but caller must recheck the domain_map */
+ spin_lock(&dlm->spinlock);
+ if (!test_bit(mig_target, dlm->domain_map))
+ can_proceed = 1;
+ spin_unlock(&dlm->spinlock);
+ return can_proceed;
+}
+
+static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ int ret;
+ spin_lock(&res->spinlock);
+ ret = !!(res->state & DLM_LOCK_RES_DIRTY);
+ spin_unlock(&res->spinlock);
+ return ret;
+}
+
+
+static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 target)
+{
+ int ret = 0;
+
+ mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
+ res->lockname.len, res->lockname.name, dlm->node_num,
+ target);
+ /* need to set MIGRATING flag on lockres. this is done by
+ * ensuring that all asts have been flushed for this lockres. */
+ spin_lock(&res->spinlock);
+ BUG_ON(res->migration_pending);
+ res->migration_pending = 1;
+ /* strategy is to reserve an extra ast then release
+ * it below, letting the release do all of the work */
+ __dlm_lockres_reserve_ast(res);
+ spin_unlock(&res->spinlock);
+
+ /* now flush all the pending asts */
+ dlm_kick_thread(dlm, res);
+ /* before waiting on DIRTY, block processes which may
+ * try to dirty the lockres before MIGRATING is set */
+ spin_lock(&res->spinlock);
+ BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
+ res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
+ spin_unlock(&res->spinlock);
+ /* now wait on any pending asts and the DIRTY state */
+ wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
+ dlm_lockres_release_ast(dlm, res);
+
+ mlog(0, "about to wait on migration_wq, dirty=%s\n",
+ res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+ /* if the extra ref we just put was the final one, this
+ * will pass thru immediately. otherwise, we need to wait
+ * for the last ast to finish. */
+again:
+ ret = wait_event_interruptible_timeout(dlm->migration_wq,
+ dlm_migration_can_proceed(dlm, res, target),
+ msecs_to_jiffies(1000));
+ if (ret < 0) {
+ mlog(0, "woken again: migrating? %s, dead? %s\n",
+ res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
+ test_bit(target, dlm->domain_map) ? "no":"yes");
+ } else {
+ mlog(0, "all is well: migrating? %s, dead? %s\n",
+ res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
+ test_bit(target, dlm->domain_map) ? "no":"yes");
+ }
+ if (!dlm_migration_can_proceed(dlm, res, target)) {
+ mlog(0, "trying again...\n");
+ goto again;
+ }
+
+ ret = 0;
+ /* did the target go down or die? */
+ spin_lock(&dlm->spinlock);
+ if (!test_bit(target, dlm->domain_map)) {
+ mlog(ML_ERROR, "aha. migration target %u just went down\n",
+ target);
+ ret = -EHOSTDOWN;
+ }
+ spin_unlock(&dlm->spinlock);
+
+ /*
+ * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for
+ * another try; otherwise, we are sure the MIGRATING state is there,
+ * drop the unneeded state which blocked threads trying to DIRTY
+ */
+ spin_lock(&res->spinlock);
+ BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
+ res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
+ if (!ret)
+ BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
+ else
+ res->migration_pending = 0;
+ spin_unlock(&res->spinlock);
+
+ /*
+ * at this point:
+ *
+ * o the DLM_LOCK_RES_MIGRATING flag is set if target not down
+ * o there are no pending asts on this lockres
+ * o all processes trying to reserve an ast on this
+ * lockres must wait for the MIGRATING flag to clear
+ */
+ return ret;
+}
+
+/* last step in the migration process.
+ * original master calls this to free all of the dlm_lock
+ * structures that used to be for other nodes. */
+static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ struct list_head *queue = &res->granted;
+ int i, bit;
+ struct dlm_lock *lock, *next;
+
+ assert_spin_locked(&res->spinlock);
+
+ BUG_ON(res->owner == dlm->node_num);
+
+ for (i=0; i<3; i++) {
+ list_for_each_entry_safe(lock, next, queue, list) {
+ if (lock->ml.node != dlm->node_num) {
+ mlog(0, "putting lock for node %u\n",
+ lock->ml.node);
+ /* be extra careful */
+ BUG_ON(!list_empty(&lock->ast_list));
+ BUG_ON(!list_empty(&lock->bast_list));
+ BUG_ON(lock->ast_pending);
+ BUG_ON(lock->bast_pending);
+ dlm_lockres_clear_refmap_bit(dlm, res,
+ lock->ml.node);
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ /* In a normal unlock, we would have added a
+ * DLM_UNLOCK_FREE_LOCK action. Force it. */
+ dlm_lock_put(lock);
+ }
+ }
+ queue++;
+ }
+ bit = 0;
+ while (1) {
+ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
+ if (bit >= O2NM_MAX_NODES)
+ break;
+ /* do not clear the local node reference, if there is a
+ * process holding this, let it drop the ref itself */
+ if (bit != dlm->node_num) {
+ mlog(0, "%s:%.*s: node %u had a ref to this "
+ "migrating lockres, clearing\n", dlm->name,
+ res->lockname.len, res->lockname.name, bit);
+ dlm_lockres_clear_refmap_bit(dlm, res, bit);
+ }
+ bit++;
+ }
+}
+
+/*
+ * Pick a node to migrate the lock resource to. This function selects a
+ * potential target based first on the locks and then on refmap. It skips
+ * nodes that are in the process of exiting the domain.
+ */
+static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ enum dlm_lockres_list idx;
+ struct list_head *queue;
+ struct dlm_lock *lock;
+ int noderef;
+ u8 nodenum = O2NM_MAX_NODES;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ /* Go through all the locks */
+ for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
+ queue = dlm_list_idx_to_ptr(res, idx);
+ list_for_each_entry(lock, queue, list) {
+ if (lock->ml.node == dlm->node_num)
+ continue;
+ if (test_bit(lock->ml.node, dlm->exit_domain_map))
+ continue;
+ nodenum = lock->ml.node;
+ goto bail;
+ }
+ }
+
+ /* Go thru the refmap */
+ noderef = -1;
+ while (1) {
+ noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
+ noderef + 1);
+ if (noderef >= O2NM_MAX_NODES)
+ break;
+ if (noderef == dlm->node_num)
+ continue;
+ if (test_bit(noderef, dlm->exit_domain_map))
+ continue;
+ nodenum = noderef;
+ goto bail;
+ }
+
+bail:
+ return nodenum;
+}
+
+/* this is called by the new master once all lockres
+ * data has been received */
+static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 master, u8 new_master,
+ struct dlm_node_iter *iter)
+{
+ struct dlm_migrate_request migrate;
+ int ret, skip, status = 0;
+ int nodenum;
+
+ memset(&migrate, 0, sizeof(migrate));
+ migrate.namelen = res->lockname.len;
+ memcpy(migrate.name, res->lockname.name, migrate.namelen);
+ migrate.new_master = new_master;
+ migrate.master = master;
+
+ ret = 0;
+
+ /* send message to all nodes, except the master and myself */
+ while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
+ if (nodenum == master ||
+ nodenum == new_master)
+ continue;
+
+ /* We could race exit domain. If exited, skip. */
+ spin_lock(&dlm->spinlock);
+ skip = (!test_bit(nodenum, dlm->domain_map));
+ spin_unlock(&dlm->spinlock);
+ if (skip) {
+ clear_bit(nodenum, iter->node_map);
+ continue;
+ }
+
+ ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
+ &migrate, sizeof(migrate), nodenum,
+ &status);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send "
+ "MIGRATE_REQUEST to node %u\n", dlm->name,
+ migrate.namelen, migrate.name, ret, nodenum);
+ if (!dlm_is_host_down(ret)) {
+ mlog(ML_ERROR, "unhandled error=%d!\n", ret);
+ BUG();
+ }
+ clear_bit(nodenum, iter->node_map);
+ ret = 0;
+ } else if (status < 0) {
+ mlog(0, "migrate request (node %u) returned %d!\n",
+ nodenum, status);
+ ret = status;
+ } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
+ /* during the migration request we short-circuited
+ * the mastery of the lockres. make sure we have
+ * a mastery ref for nodenum */
+ mlog(0, "%s:%.*s: need ref for node %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ nodenum);
+ spin_lock(&res->spinlock);
+ dlm_lockres_set_refmap_bit(dlm, res, nodenum);
+ spin_unlock(&res->spinlock);
+ }
+ }
+
+ if (ret < 0)
+ mlog_errno(ret);
+
+ mlog(0, "returning ret=%d\n", ret);
+ return ret;
+}
+
+
+/* if there is an existing mle for this lockres, we now know who the master is.
+ * (the one who sent us *this* message) we can clear it up right away.
+ * since the process that put the mle on the list still has a reference to it,
+ * we can unhash it now, set the master and wake the process. as a result,
+ * we will have no mle in the list to start with. now we can add an mle for
+ * the migration and this should be the only one found for those scanning the
+ * list. */
+int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
+ struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
+ const char *name;
+ unsigned int namelen, hash;
+ int ret = 0;
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ name = migrate->name;
+ namelen = migrate->namelen;
+ hash = dlm_lockid_hash(name, namelen);
+
+ /* preallocate.. if this fails, abort */
+ mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
+
+ if (!mle) {
+ ret = -ENOMEM;
+ goto leave;
+ }
+
+ /* check for pre-existing lock */
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen, hash);
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ /* if all is working ok, this can only mean that we got
+ * a migrate request from a node that we now see as
+ * dead. what can we do here? drop it to the floor? */
+ spin_unlock(&res->spinlock);
+ mlog(ML_ERROR, "Got a migrate request, but the "
+ "lockres is marked as recovering!");
+ kmem_cache_free(dlm_mle_cache, mle);
+ ret = -EINVAL; /* need a better solution */
+ goto unlock;
+ }
+ res->state |= DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ }
+
+ spin_lock(&dlm->master_lock);
+ /* ignore status. only nonzero status would BUG. */
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
+ name, namelen,
+ migrate->new_master,
+ migrate->master);
+
+ if (ret < 0)
+ kmem_cache_free(dlm_mle_cache, mle);
+
+ spin_unlock(&dlm->master_lock);
+unlock:
+ spin_unlock(&dlm->spinlock);
+
+ if (oldmle) {
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, oldmle);
+ dlm_put_mle(oldmle);
+ }
+
+ if (res)
+ dlm_lockres_put(res);
+leave:
+ dlm_put(dlm);
+ return ret;
+}
+
+/* must be holding dlm->spinlock and dlm->master_lock
+ * when adding a migration mle, we can clear any other mles
+ * in the master list because we know with certainty that
+ * the master is "master". so we remove any old mle from
+ * the list after setting it's master field, and then add
+ * the new migration mle. this way we can hold with the rule
+ * of having only one mle for a given lock name at all times. */
+static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ struct dlm_master_list_entry **oldmle,
+ const char *name, unsigned int namelen,
+ u8 new_master, u8 master)
+{
+ int found;
+ int ret = 0;
+
+ *oldmle = NULL;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+
+ /* caller is responsible for any ref taken here on oldmle */
+ found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
+ if (found) {
+ struct dlm_master_list_entry *tmp = *oldmle;
+ spin_lock(&tmp->spinlock);
+ if (tmp->type == DLM_MLE_MIGRATION) {
+ if (master == dlm->node_num) {
+ /* ah another process raced me to it */
+ mlog(0, "tried to migrate %.*s, but some "
+ "process beat me to it\n",
+ namelen, name);
+ spin_unlock(&tmp->spinlock);
+ return -EEXIST;
+ } else {
+ /* bad. 2 NODES are trying to migrate! */
+ mlog(ML_ERROR, "migration error mle: "
+ "master=%u new_master=%u // request: "
+ "master=%u new_master=%u // "
+ "lockres=%.*s\n",
+ tmp->master, tmp->new_master,
+ master, new_master,
+ namelen, name);
+ BUG();
+ }
+ } else {
+ /* this is essentially what assert_master does */
+ tmp->master = master;
+ atomic_set(&tmp->woken, 1);
+ wake_up(&tmp->wq);
+ /* remove it so that only one mle will be found */
+ __dlm_unlink_mle(dlm, tmp);
+ __dlm_mle_detach_hb_events(dlm, tmp);
+ if (tmp->type == DLM_MLE_MASTER) {
+ ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
+ mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
+ "telling master to get ref "
+ "for cleared out mle during "
+ "migration\n", dlm->name,
+ namelen, name, master,
+ new_master);
+ }
+ }
+ spin_unlock(&tmp->spinlock);
+ }
+
+ /* now add a migration mle to the tail of the list */
+ dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
+ mle->new_master = new_master;
+ /* the new master will be sending an assert master for this.
+ * at that point we will get the refmap reference */
+ mle->master = master;
+ /* do this for consistency with other mle types */
+ set_bit(new_master, mle->maybe_map);
+ __dlm_insert_mle(dlm, mle);
+
+ return ret;
+}
+
+/*
+ * Sets the owner of the lockres, associated to the mle, to UNKNOWN
+ */
+static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ struct dlm_lock_resource *res;
+
+ /* Find the lockres associated to the mle and set its owner to UNK */
+ res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
+ mle->mnamehash);
+ if (res) {
+ spin_unlock(&dlm->master_lock);
+
+ /* move lockres onto recovery list */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
+ dlm_move_lockres_to_recovery_list(dlm, res);
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+
+ /* about to get rid of mle, detach from heartbeat */
+ __dlm_mle_detach_hb_events(dlm, mle);
+
+ /* dump the mle */
+ spin_lock(&dlm->master_lock);
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ }
+
+ return res;
+}
+
+static void dlm_clean_migration_mle(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ __dlm_mle_detach_hb_events(dlm, mle);
+
+ spin_lock(&mle->spinlock);
+ __dlm_unlink_mle(dlm, mle);
+ atomic_set(&mle->woken, 1);
+ spin_unlock(&mle->spinlock);
+
+ wake_up(&mle->wq);
+}
+
+static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle, u8 dead_node)
+{
+ int bit;
+
+ BUG_ON(mle->type != DLM_MLE_BLOCK);
+
+ spin_lock(&mle->spinlock);
+ bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
+ if (bit != dead_node) {
+ mlog(0, "mle found, but dead node %u would not have been "
+ "master\n", dead_node);
+ spin_unlock(&mle->spinlock);
+ } else {
+ /* Must drop the refcount by one since the assert_master will
+ * never arrive. This may result in the mle being unlinked and
+ * freed, but there may still be a process waiting in the
+ * dlmlock path which is fine. */
+ mlog(0, "node %u was expected master\n", dead_node);
+ atomic_set(&mle->woken, 1);
+ spin_unlock(&mle->spinlock);
+ wake_up(&mle->wq);
+
+ /* Do not need events any longer, so detach from heartbeat */
+ __dlm_mle_detach_hb_events(dlm, mle);
+ __dlm_put_mle(mle);
+ }
+}
+
+void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct dlm_master_list_entry *mle;
+ struct dlm_lock_resource *res;
+ struct hlist_head *bucket;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node);
+top:
+ assert_spin_locked(&dlm->spinlock);
+
+ /* clean the master list */
+ spin_lock(&dlm->master_lock);
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_master_hash(dlm, i);
+ hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
+ BUG_ON(mle->type != DLM_MLE_BLOCK &&
+ mle->type != DLM_MLE_MASTER &&
+ mle->type != DLM_MLE_MIGRATION);
+
+ /* MASTER mles are initiated locally. The waiting
+ * process will notice the node map change shortly.
+ * Let that happen as normal. */
+ if (mle->type == DLM_MLE_MASTER)
+ continue;
+
+ /* BLOCK mles are initiated by other nodes. Need to
+ * clean up if the dead node would have been the
+ * master. */
+ if (mle->type == DLM_MLE_BLOCK) {
+ dlm_clean_block_mle(dlm, mle, dead_node);
+ continue;
+ }
+
+ /* Everything else is a MIGRATION mle */
+
+ /* The rule for MIGRATION mles is that the master
+ * becomes UNKNOWN if *either* the original or the new
+ * master dies. All UNKNOWN lockres' are sent to
+ * whichever node becomes the recovery master. The new
+ * master is responsible for determining if there is
+ * still a master for this lockres, or if he needs to
+ * take over mastery. Either way, this node should
+ * expect another message to resolve this. */
+
+ if (mle->master != dead_node &&
+ mle->new_master != dead_node)
+ continue;
+
+ if (mle->new_master == dead_node && mle->inuse) {
+ mlog(ML_NOTICE, "%s: target %u died during "
+ "migration from %u, the MLE is "
+ "still keep used, ignore it!\n",
+ dlm->name, dead_node,
+ mle->master);
+ continue;
+ }
+
+ /* If we have reached this point, this mle needs to be
+ * removed from the list and freed. */
+ dlm_clean_migration_mle(dlm, mle);
+
+ mlog(0, "%s: node %u died during migration from "
+ "%u to %u!\n", dlm->name, dead_node, mle->master,
+ mle->new_master);
+
+ /* If we find a lockres associated with the mle, we've
+ * hit this rare case that messes up our lock ordering.
+ * If so, we need to drop the master lock so that we can
+ * take the lockres lock, meaning that we will have to
+ * restart from the head of list. */
+ res = dlm_reset_mleres_owner(dlm, mle);
+ if (res)
+ /* restart */
+ goto top;
+
+ /* This may be the last reference */
+ __dlm_put_mle(mle);
+ }
+ }
+ spin_unlock(&dlm->master_lock);
+}
+
+int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ u8 old_master)
+{
+ struct dlm_node_iter iter;
+ int ret = 0;
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ clear_bit(old_master, iter.node_map);
+ clear_bit(dlm->node_num, iter.node_map);
+ spin_unlock(&dlm->spinlock);
+
+ /* ownership of the lockres is changing. account for the
+ * mastery reference here since old_master will briefly have
+ * a reference after the migration completes */
+ spin_lock(&res->spinlock);
+ dlm_lockres_set_refmap_bit(dlm, res, old_master);
+ spin_unlock(&res->spinlock);
+
+ mlog(0, "now time to do a migrate request to other nodes\n");
+ ret = dlm_do_migrate_request(dlm, res, old_master,
+ dlm->node_num, &iter);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ mlog(0, "doing assert master of %.*s to all except the original node\n",
+ res->lockname.len, res->lockname.name);
+ /* this call now finishes out the nodemap
+ * even if one or more nodes die */
+ ret = dlm_do_assert_master(dlm, res, iter.node_map,
+ DLM_ASSERT_MASTER_FINISH_MIGRATION);
+ if (ret < 0) {
+ /* no longer need to retry. all living nodes contacted. */
+ mlog_errno(ret);
+ ret = 0;
+ }
+
+ bitmap_zero(iter.node_map, O2NM_MAX_NODES);
+ set_bit(old_master, iter.node_map);
+ mlog(0, "doing assert master of %.*s back to %u\n",
+ res->lockname.len, res->lockname.name, old_master);
+ ret = dlm_do_assert_master(dlm, res, iter.node_map,
+ DLM_ASSERT_MASTER_FINISH_MIGRATION);
+ if (ret < 0) {
+ mlog(0, "assert master to original master failed "
+ "with %d.\n", ret);
+ /* the only nonzero status here would be because of
+ * a dead original node. we're done. */
+ ret = 0;
+ }
+
+ /* all done, set the owner, clear the flag */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, dlm->node_num);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ /* re-dirty it on the new master */
+ dlm_kick_thread(dlm, res);
+ wake_up(&res->wq);
+leave:
+ return ret;
+}
+
+/*
+ * LOCKRES AST REFCOUNT
+ * this is integral to migration
+ */
+
+/* for future intent to call an ast, reserve one ahead of time.
+ * this should be called only after waiting on the lockres
+ * with dlm_wait_on_lockres, and while still holding the
+ * spinlock after the call. */
+void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ __dlm_print_one_lock_resource(res);
+ }
+ BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
+
+ atomic_inc(&res->asts_reserved);
+}
+
+/*
+ * used to drop the reserved ast, either because it went unused,
+ * or because the ast/bast was actually called.
+ *
+ * also, if there is a pending migration on this lockres,
+ * and this was the last pending ast on the lockres,
+ * atomically set the MIGRATING flag before we drop the lock.
+ * this is how we ensure that migration can proceed with no
+ * asts in progress. note that it is ok if the state of the
+ * queues is such that a lock should be granted in the future
+ * or that a bast should be fired, because the new master will
+ * shuffle the lists on this lockres as soon as it is migrated.
+ */
+void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
+ return;
+
+ if (!res->migration_pending) {
+ spin_unlock(&res->spinlock);
+ return;
+ }
+
+ BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
+ res->migration_pending = 0;
+ res->state |= DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ wake_up(&dlm->migration_wq);
+}
+
+void dlm_force_free_mles(struct dlm_ctxt *dlm)
+{
+ int i;
+ struct hlist_head *bucket;
+ struct dlm_master_list_entry *mle;
+ struct hlist_node *tmp;
+
+ /*
+ * We notified all other nodes that we are exiting the domain and
+ * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
+ * around we force free them and wake any processes that are waiting
+ * on the mles
+ */
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+
+ BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
+ BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES));
+
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_master_hash(dlm, i);
+ hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
+ if (mle->type != DLM_MLE_BLOCK) {
+ mlog(ML_ERROR, "bad mle: %p\n", mle);
+ dlm_print_one_mle(mle);
+ }
+ atomic_set(&mle->woken, 1);
+ wake_up(&mle->wq);
+
+ __dlm_unlink_mle(dlm, mle);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ __dlm_put_mle(mle);
+ }
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+}
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
new file mode 100644
index 0000000000..50da8af988
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -0,0 +1,2955 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmrecovery.c
+ *
+ * recovery stuff
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
+#include "../cluster/masklog.h"
+
+static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
+
+static int dlm_recovery_thread(void *data);
+static int dlm_do_recovery(struct dlm_ctxt *dlm);
+
+static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
+static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
+static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
+static int dlm_request_all_locks(struct dlm_ctxt *dlm,
+ u8 request_from, u8 dead_node);
+static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm);
+
+static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
+static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
+ const char *lockname, int namelen,
+ int total_locks, u64 cookie,
+ u8 flags, u8 master);
+static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to,
+ struct dlm_lock_resource *res,
+ int total_locks);
+static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres);
+static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
+static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
+ u8 dead_node, u8 send_to);
+static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
+static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
+ struct list_head *list, u8 dead_node);
+static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
+ u8 dead_node, u8 new_master);
+static void dlm_reco_ast(void *astdata);
+static void dlm_reco_bast(void *astdata, int blocked_type);
+static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
+static void dlm_request_all_locks_worker(struct dlm_work_item *item,
+ void *data);
+static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
+static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 *real_master);
+
+static u64 dlm_get_next_mig_cookie(void);
+
+static DEFINE_SPINLOCK(dlm_reco_state_lock);
+static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
+static u64 dlm_mig_cookie = 1;
+
+static u64 dlm_get_next_mig_cookie(void)
+{
+ u64 c;
+ spin_lock(&dlm_mig_cookie_lock);
+ c = dlm_mig_cookie;
+ if (dlm_mig_cookie == (~0ULL))
+ dlm_mig_cookie = 1;
+ else
+ dlm_mig_cookie++;
+ spin_unlock(&dlm_mig_cookie_lock);
+ return c;
+}
+
+static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
+ u8 dead_node)
+{
+ assert_spin_locked(&dlm->spinlock);
+ if (dlm->reco.dead_node != dead_node)
+ mlog(0, "%s: changing dead_node from %u to %u\n",
+ dlm->name, dlm->reco.dead_node, dead_node);
+ dlm->reco.dead_node = dead_node;
+}
+
+static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
+ u8 master)
+{
+ assert_spin_locked(&dlm->spinlock);
+ mlog(0, "%s: changing new_master from %u to %u\n",
+ dlm->name, dlm->reco.new_master, master);
+ dlm->reco.new_master = master;
+}
+
+static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
+{
+ assert_spin_locked(&dlm->spinlock);
+ clear_bit(dlm->reco.dead_node, dlm->recovery_map);
+ dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
+ dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
+}
+
+/* Worker function used during recovery. */
+void dlm_dispatch_work(struct work_struct *work)
+{
+ struct dlm_ctxt *dlm =
+ container_of(work, struct dlm_ctxt, dispatched_work);
+ LIST_HEAD(tmp_list);
+ struct dlm_work_item *item, *next;
+ dlm_workfunc_t *workfunc;
+ int tot=0;
+
+ spin_lock(&dlm->work_lock);
+ list_splice_init(&dlm->work_list, &tmp_list);
+ spin_unlock(&dlm->work_lock);
+
+ list_for_each_entry(item, &tmp_list, list) {
+ tot++;
+ }
+ mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
+
+ list_for_each_entry_safe(item, next, &tmp_list, list) {
+ workfunc = item->func;
+ list_del_init(&item->list);
+
+ /* already have ref on dlm to avoid having
+ * it disappear. just double-check. */
+ BUG_ON(item->dlm != dlm);
+
+ /* this is allowed to sleep and
+ * call network stuff */
+ workfunc(item, item->data);
+
+ dlm_put(dlm);
+ kfree(item);
+ }
+}
+
+/*
+ * RECOVERY THREAD
+ */
+
+void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
+{
+ /* wake the recovery thread
+ * this will wake the reco thread in one of three places
+ * 1) sleeping with no recovery happening
+ * 2) sleeping with recovery mastered elsewhere
+ * 3) recovery mastered here, waiting on reco data */
+
+ wake_up(&dlm->dlm_reco_thread_wq);
+}
+
+/* Launch the recovery thread */
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
+{
+ mlog(0, "starting dlm recovery thread...\n");
+
+ dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
+ "dlm_reco-%s", dlm->name);
+ if (IS_ERR(dlm->dlm_reco_thread_task)) {
+ mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
+ dlm->dlm_reco_thread_task = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
+{
+ if (dlm->dlm_reco_thread_task) {
+ mlog(0, "waiting for dlm recovery thread to exit\n");
+ kthread_stop(dlm->dlm_reco_thread_task);
+ dlm->dlm_reco_thread_task = NULL;
+ }
+}
+
+
+
+/*
+ * this is lame, but here's how recovery works...
+ * 1) all recovery threads cluster wide will work on recovering
+ * ONE node at a time
+ * 2) negotiate who will take over all the locks for the dead node.
+ * thats right... ALL the locks.
+ * 3) once a new master is chosen, everyone scans all locks
+ * and moves aside those mastered by the dead guy
+ * 4) each of these locks should be locked until recovery is done
+ * 5) the new master collects up all of secondary lock queue info
+ * one lock at a time, forcing each node to communicate back
+ * before continuing
+ * 6) each secondary lock queue responds with the full known lock info
+ * 7) once the new master has run all its locks, it sends a ALLDONE!
+ * message to everyone
+ * 8) upon receiving this message, the secondary queue node unlocks
+ * and responds to the ALLDONE
+ * 9) once the new master gets responses from everyone, he unlocks
+ * everything and recovery for this dead node is done
+ *10) go back to 2) while there are still dead nodes
+ *
+ */
+
+static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
+{
+ struct dlm_reco_node_data *ndata;
+ struct dlm_lock_resource *res;
+
+ mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
+ dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
+ dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
+ dlm->reco.dead_node, dlm->reco.new_master);
+
+ list_for_each_entry(ndata, &dlm->reco.node_data, list) {
+ char *st = "unknown";
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ st = "init";
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ st = "requesting";
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ st = "dead";
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ st = "receiving";
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ st = "requested";
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ st = "done";
+ break;
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ st = "finalize-sent";
+ break;
+ default:
+ st = "bad";
+ break;
+ }
+ mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
+ dlm->name, ndata->node_num, st);
+ }
+ list_for_each_entry(res, &dlm->reco.resources, recovering) {
+ mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ }
+}
+
+#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
+
+static int dlm_recovery_thread(void *data)
+{
+ int status;
+ struct dlm_ctxt *dlm = data;
+ unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
+
+ mlog(0, "dlm thread running for %s...\n", dlm->name);
+
+ while (!kthread_should_stop()) {
+ if (dlm_domain_fully_joined(dlm)) {
+ status = dlm_do_recovery(dlm);
+ if (status == -EAGAIN) {
+ /* do not sleep, recheck immediately. */
+ continue;
+ }
+ if (status < 0)
+ mlog_errno(status);
+ }
+
+ wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
+ kthread_should_stop(),
+ timeout);
+ }
+
+ mlog(0, "quitting DLM recovery thread\n");
+ return 0;
+}
+
+/* returns true when the recovery master has contacted us */
+static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
+{
+ int ready;
+ spin_lock(&dlm->spinlock);
+ ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
+ spin_unlock(&dlm->spinlock);
+ return ready;
+}
+
+/* returns true if node is no longer in the domain
+ * could be dead or just not joined */
+int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
+{
+ int dead;
+ spin_lock(&dlm->spinlock);
+ dead = !test_bit(node, dlm->domain_map);
+ spin_unlock(&dlm->spinlock);
+ return dead;
+}
+
+/* returns true if node is no longer in the domain
+ * could be dead or just not joined */
+static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
+{
+ int recovered;
+ spin_lock(&dlm->spinlock);
+ recovered = !test_bit(node, dlm->recovery_map);
+ spin_unlock(&dlm->spinlock);
+ return recovered;
+}
+
+
+void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
+{
+ if (dlm_is_node_dead(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_dead(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
+ wait_event(dlm->dlm_reco_thread_wq,
+ dlm_is_node_dead(dlm, node));
+}
+
+void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
+{
+ if (dlm_is_node_recovered(dlm, node))
+ return;
+
+ printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
+ "domain %s\n", node, dlm->name);
+
+ if (timeout)
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node),
+ msecs_to_jiffies(timeout));
+ else
+ wait_event(dlm->dlm_reco_thread_wq,
+ dlm_is_node_recovered(dlm, node));
+}
+
+/* callers of the top-level api calls (dlmlock/dlmunlock) should
+ * block on the dlm->reco.event when recovery is in progress.
+ * the dlm recovery thread will set this state when it begins
+ * recovering a dead node (as the new master or not) and clear
+ * the state and wake as soon as all affected lock resources have
+ * been marked with the RECOVERY flag */
+static int dlm_in_recovery(struct dlm_ctxt *dlm)
+{
+ int in_recovery;
+ spin_lock(&dlm->spinlock);
+ in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ spin_unlock(&dlm->spinlock);
+ return in_recovery;
+}
+
+
+void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
+{
+ if (dlm_in_recovery(dlm)) {
+ mlog(0, "%s: reco thread %d in recovery: "
+ "state=%d, master=%u, dead=%u\n",
+ dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
+ dlm->reco.state, dlm->reco.new_master,
+ dlm->reco.dead_node);
+ }
+ wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
+}
+
+static void dlm_begin_recovery(struct dlm_ctxt *dlm)
+{
+ assert_spin_locked(&dlm->spinlock);
+ BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
+ dlm->name, dlm->reco.dead_node);
+ dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
+}
+
+static void dlm_end_recovery(struct dlm_ctxt *dlm)
+{
+ spin_lock(&dlm->spinlock);
+ BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
+ dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
+ spin_unlock(&dlm->spinlock);
+ printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
+ wake_up(&dlm->reco.event);
+}
+
+static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
+{
+ printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
+ "dead node %u in domain %s\n", dlm->reco.new_master,
+ (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
+ dlm->reco.dead_node, dlm->name);
+}
+
+static int dlm_do_recovery(struct dlm_ctxt *dlm)
+{
+ int status = 0;
+ int ret;
+
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->migrate_done) {
+ mlog(0, "%s: no need do recovery after migrating all "
+ "lock resources\n", dlm->name);
+ spin_unlock(&dlm->spinlock);
+ return 0;
+ }
+
+ /* check to see if the new master has died */
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
+ test_bit(dlm->reco.new_master, dlm->recovery_map)) {
+ mlog(0, "new master %u died while recovering %u!\n",
+ dlm->reco.new_master, dlm->reco.dead_node);
+ /* unset the new_master, leave dead_node */
+ dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
+ }
+
+ /* select a target to recover */
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
+ int bit;
+
+ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
+ if (bit >= O2NM_MAX_NODES || bit < 0)
+ dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
+ else
+ dlm_set_reco_dead_node(dlm, bit);
+ } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
+ /* BUG? */
+ mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
+ dlm->reco.dead_node);
+ dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
+ }
+
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
+ // mlog(0, "nothing to recover! sleeping now!\n");
+ spin_unlock(&dlm->spinlock);
+ /* return to main thread loop and sleep. */
+ return 0;
+ }
+ mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
+ dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
+ dlm->reco.dead_node);
+
+ /* take write barrier */
+ /* (stops the list reshuffling thread, proxy ast handling) */
+ dlm_begin_recovery(dlm);
+
+ spin_unlock(&dlm->spinlock);
+
+ if (dlm->reco.new_master == dlm->node_num)
+ goto master_here;
+
+ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
+ /* choose a new master, returns 0 if this node
+ * is the master, -EEXIST if it's another node.
+ * this does not return until a new master is chosen
+ * or recovery completes entirely. */
+ ret = dlm_pick_recovery_master(dlm);
+ if (!ret) {
+ /* already notified everyone. go. */
+ goto master_here;
+ }
+ mlog(0, "another node will master this recovery session.\n");
+ }
+
+ dlm_print_recovery_master(dlm);
+
+ /* it is safe to start everything back up here
+ * because all of the dead node's lock resources
+ * have been marked as in-recovery */
+ dlm_end_recovery(dlm);
+
+ /* sleep out in main dlm_recovery_thread loop. */
+ return 0;
+
+master_here:
+ dlm_print_recovery_master(dlm);
+
+ status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
+ if (status < 0) {
+ /* we should never hit this anymore */
+ mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
+ "retrying.\n", dlm->name, status, dlm->reco.dead_node);
+ /* yield a bit to allow any final network messages
+ * to get handled on remaining nodes */
+ msleep(100);
+ } else {
+ /* success! see if any other nodes need recovery */
+ mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
+ dlm->name, dlm->reco.dead_node, dlm->node_num);
+ spin_lock(&dlm->spinlock);
+ __dlm_reset_recovery(dlm);
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
+ spin_unlock(&dlm->spinlock);
+ }
+ dlm_end_recovery(dlm);
+
+ /* continue and look for another dead node */
+ return -EAGAIN;
+}
+
+static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ int status = 0;
+ struct dlm_reco_node_data *ndata;
+ int all_nodes_done;
+ int destroy = 0;
+ int pass = 0;
+
+ do {
+ /* we have become recovery master. there is no escaping
+ * this, so just keep trying until we get it. */
+ status = dlm_init_recovery_area(dlm, dead_node);
+ if (status < 0) {
+ mlog(ML_ERROR, "%s: failed to alloc recovery area, "
+ "retrying\n", dlm->name);
+ msleep(1000);
+ }
+ } while (status != 0);
+
+ /* safe to access the node data list without a lock, since this
+ * process is the only one to change the list */
+ list_for_each_entry(ndata, &dlm->reco.node_data, list) {
+ BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
+ ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
+
+ mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
+ ndata->node_num);
+
+ if (ndata->node_num == dlm->node_num) {
+ ndata->state = DLM_RECO_NODE_DATA_DONE;
+ continue;
+ }
+
+ do {
+ status = dlm_request_all_locks(dlm, ndata->node_num,
+ dead_node);
+ if (status < 0) {
+ mlog_errno(status);
+ if (dlm_is_host_down(status)) {
+ /* node died, ignore it for recovery */
+ status = 0;
+ ndata->state = DLM_RECO_NODE_DATA_DEAD;
+ /* wait for the domain map to catch up
+ * with the network state. */
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_is_node_dead(dlm,
+ ndata->node_num),
+ msecs_to_jiffies(1000));
+ mlog(0, "waited 1 sec for %u, "
+ "dead? %s\n", ndata->node_num,
+ dlm_is_node_dead(dlm, ndata->node_num) ?
+ "yes" : "no");
+ } else {
+ /* -ENOMEM on the other node */
+ mlog(0, "%s: node %u returned "
+ "%d during recovery, retrying "
+ "after a short wait\n",
+ dlm->name, ndata->node_num,
+ status);
+ msleep(100);
+ }
+ }
+ } while (status != 0);
+
+ spin_lock(&dlm_reco_state_lock);
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ BUG();
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ mlog(0, "node %u died after requesting "
+ "recovery info for node %u\n",
+ ndata->node_num, dead_node);
+ /* fine. don't need this node's info.
+ * continue without it. */
+ break;
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
+ mlog(0, "now receiving recovery data from "
+ "node %u for dead node %u\n",
+ ndata->node_num, dead_node);
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ mlog(0, "already receiving recovery data from "
+ "node %u for dead node %u\n",
+ ndata->node_num, dead_node);
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ mlog(0, "already DONE receiving recovery data "
+ "from node %u for dead node %u\n",
+ ndata->node_num, dead_node);
+ break;
+ }
+ spin_unlock(&dlm_reco_state_lock);
+ }
+
+ mlog(0, "%s: Done requesting all lock info\n", dlm->name);
+
+ /* nodes should be sending reco data now
+ * just need to wait */
+
+ while (1) {
+ /* check all the nodes now to see if we are
+ * done, or if anyone died */
+ all_nodes_done = 1;
+ spin_lock(&dlm_reco_state_lock);
+ list_for_each_entry(ndata, &dlm->reco.node_data, list) {
+ mlog(0, "checking recovery state of node %u\n",
+ ndata->node_num);
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ mlog(ML_ERROR, "bad ndata state for "
+ "node %u: state=%d\n",
+ ndata->node_num, ndata->state);
+ BUG();
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ mlog(0, "node %u died after "
+ "requesting recovery info for "
+ "node %u\n", ndata->node_num,
+ dead_node);
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ mlog(0, "%s: node %u still in state %s\n",
+ dlm->name, ndata->node_num,
+ ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
+ "receiving" : "requested");
+ all_nodes_done = 0;
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ mlog(0, "%s: node %u state is done\n",
+ dlm->name, ndata->node_num);
+ break;
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ mlog(0, "%s: node %u state is finalize\n",
+ dlm->name, ndata->node_num);
+ break;
+ }
+ }
+ spin_unlock(&dlm_reco_state_lock);
+
+ mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
+ all_nodes_done?"yes":"no");
+ if (all_nodes_done) {
+ int ret;
+
+ /* Set this flag on recovery master to avoid
+ * a new recovery for another dead node start
+ * before the recovery is not done. That may
+ * cause recovery hung.*/
+ spin_lock(&dlm->spinlock);
+ dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
+ spin_unlock(&dlm->spinlock);
+
+ /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
+ * just send a finalize message to everyone and
+ * clean up */
+ mlog(0, "all nodes are done! send finalize\n");
+ ret = dlm_send_finalize_reco_message(dlm);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ spin_lock(&dlm->spinlock);
+ dlm_finish_local_lockres_recovery(dlm, dead_node,
+ dlm->node_num);
+ spin_unlock(&dlm->spinlock);
+ mlog(0, "should be done with recovery!\n");
+
+ mlog(0, "finishing recovery of %s at %lu, "
+ "dead=%u, this=%u, new=%u\n", dlm->name,
+ jiffies, dlm->reco.dead_node,
+ dlm->node_num, dlm->reco.new_master);
+ destroy = 1;
+ status = 0;
+ /* rescan everything marked dirty along the way */
+ dlm_kick_thread(dlm, NULL);
+ break;
+ }
+ /* wait to be signalled, with periodic timeout
+ * to check for node death */
+ wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
+ kthread_should_stop(),
+ msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
+
+ }
+
+ if (destroy)
+ dlm_destroy_recovery_area(dlm);
+
+ return status;
+}
+
+static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ int num=0;
+ struct dlm_reco_node_data *ndata;
+
+ spin_lock(&dlm->spinlock);
+ bitmap_copy(dlm->reco.node_map, dlm->domain_map, O2NM_MAX_NODES);
+ /* nodes can only be removed (by dying) after dropping
+ * this lock, and death will be trapped later, so this should do */
+ spin_unlock(&dlm->spinlock);
+
+ while (1) {
+ num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
+ if (num >= O2NM_MAX_NODES) {
+ break;
+ }
+ BUG_ON(num == dead_node);
+
+ ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
+ if (!ndata) {
+ dlm_destroy_recovery_area(dlm);
+ return -ENOMEM;
+ }
+ ndata->node_num = num;
+ ndata->state = DLM_RECO_NODE_DATA_INIT;
+ spin_lock(&dlm_reco_state_lock);
+ list_add_tail(&ndata->list, &dlm->reco.node_data);
+ spin_unlock(&dlm_reco_state_lock);
+ num++;
+ }
+
+ return 0;
+}
+
+static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm)
+{
+ struct dlm_reco_node_data *ndata, *next;
+ LIST_HEAD(tmplist);
+
+ spin_lock(&dlm_reco_state_lock);
+ list_splice_init(&dlm->reco.node_data, &tmplist);
+ spin_unlock(&dlm_reco_state_lock);
+
+ list_for_each_entry_safe(ndata, next, &tmplist, list) {
+ list_del_init(&ndata->list);
+ kfree(ndata);
+ }
+}
+
+static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
+ u8 dead_node)
+{
+ struct dlm_lock_request lr;
+ int ret;
+ int status;
+
+ mlog(0, "\n");
+
+
+ mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
+ "to %u\n", dead_node, request_from);
+
+ memset(&lr, 0, sizeof(lr));
+ lr.node_idx = dlm->node_num;
+ lr.dead_node = dead_node;
+
+ // send message
+ ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
+ &lr, sizeof(lr), request_from, &status);
+
+ /* negative status is handled by caller */
+ if (ret < 0)
+ mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
+ "to recover dead node %u\n", dlm->name, ret,
+ request_from, dead_node);
+ else
+ ret = status;
+ // return from here, then
+ // sleep until all received or error
+ return ret;
+
+}
+
+int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
+ char *buf = NULL;
+ struct dlm_work_item *item = NULL;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ if (lr->dead_node != dlm->reco.dead_node) {
+ mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
+ "dead_node is %u\n", dlm->name, lr->node_idx,
+ lr->dead_node, dlm->reco.dead_node);
+ dlm_print_reco_node_status(dlm);
+ /* this is a hack */
+ dlm_put(dlm);
+ return -ENOMEM;
+ }
+ BUG_ON(lr->dead_node != dlm->reco.dead_node);
+
+ item = kzalloc(sizeof(*item), GFP_NOFS);
+ if (!item) {
+ dlm_put(dlm);
+ return -ENOMEM;
+ }
+
+ /* this will get freed by dlm_request_all_locks_worker */
+ buf = (char *) __get_free_page(GFP_NOFS);
+ if (!buf) {
+ kfree(item);
+ dlm_put(dlm);
+ return -ENOMEM;
+ }
+
+ /* queue up work for dlm_request_all_locks_worker */
+ dlm_grab(dlm); /* get an extra ref for the work item */
+ dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
+ item->u.ral.reco_master = lr->node_idx;
+ item->u.ral.dead_node = lr->dead_node;
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
+
+ dlm_put(dlm);
+ return 0;
+}
+
+static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_migratable_lockres *mres;
+ struct dlm_lock_resource *res;
+ struct dlm_ctxt *dlm;
+ LIST_HEAD(resources);
+ int ret;
+ u8 dead_node, reco_master;
+ int skip_all_done = 0;
+
+ dlm = item->dlm;
+ dead_node = item->u.ral.dead_node;
+ reco_master = item->u.ral.reco_master;
+ mres = (struct dlm_migratable_lockres *)data;
+
+ mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
+ dlm->name, dead_node, reco_master);
+
+ if (dead_node != dlm->reco.dead_node ||
+ reco_master != dlm->reco.new_master) {
+ /* worker could have been created before the recovery master
+ * died. if so, do not continue, but do not error. */
+ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
+ mlog(ML_NOTICE, "%s: will not send recovery state, "
+ "recovery master %u died, thread=(dead=%u,mas=%u)"
+ " current=(dead=%u,mas=%u)\n", dlm->name,
+ reco_master, dead_node, reco_master,
+ dlm->reco.dead_node, dlm->reco.new_master);
+ } else {
+ mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
+ "master=%u), request(dead=%u, master=%u)\n",
+ dlm->name, dlm->reco.dead_node,
+ dlm->reco.new_master, dead_node, reco_master);
+ }
+ goto leave;
+ }
+
+ /* lock resources should have already been moved to the
+ * dlm->reco.resources list. now move items from that list
+ * to a temp list if the dead owner matches. note that the
+ * whole cluster recovers only one node at a time, so we
+ * can safely move UNKNOWN lock resources for each recovery
+ * session. */
+ dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
+
+ /* now we can begin blasting lockreses without the dlm lock */
+
+ /* any errors returned will be due to the new_master dying,
+ * the dlm_reco_thread should detect this */
+ list_for_each_entry(res, &resources, recovering) {
+ ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
+ DLM_MRES_RECOVERY);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: node %u went down while sending "
+ "recovery state for dead node %u, ret=%d\n", dlm->name,
+ reco_master, dead_node, ret);
+ skip_all_done = 1;
+ break;
+ }
+ }
+
+ /* move the resources back to the list */
+ spin_lock(&dlm->spinlock);
+ list_splice_init(&resources, &dlm->reco.resources);
+ spin_unlock(&dlm->spinlock);
+
+ if (!skip_all_done) {
+ ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: node %u went down while sending "
+ "recovery all-done for dead node %u, ret=%d\n",
+ dlm->name, reco_master, dead_node, ret);
+ }
+ }
+leave:
+ free_page((unsigned long)data);
+}
+
+
+static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
+{
+ int ret, tmpret;
+ struct dlm_reco_data_done done_msg;
+
+ memset(&done_msg, 0, sizeof(done_msg));
+ done_msg.node_idx = dlm->node_num;
+ done_msg.dead_node = dead_node;
+ mlog(0, "sending DATA DONE message to %u, "
+ "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
+ done_msg.dead_node);
+
+ ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
+ sizeof(done_msg), send_to, &tmpret);
+ if (ret < 0) {
+ mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
+ "to recover dead node %u\n", dlm->name, ret, send_to,
+ dead_node);
+ if (!dlm_is_host_down(ret)) {
+ BUG();
+ }
+ } else
+ ret = tmpret;
+ return ret;
+}
+
+
+int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
+ struct dlm_reco_node_data *ndata = NULL;
+ int ret = -EINVAL;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
+ "node_idx=%u, this node=%u\n", done->dead_node,
+ dlm->reco.dead_node, done->node_idx, dlm->node_num);
+
+ mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
+ "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
+ "node_idx=%u, this node=%u\n", done->dead_node,
+ dlm->reco.dead_node, done->node_idx, dlm->node_num);
+
+ spin_lock(&dlm_reco_state_lock);
+ list_for_each_entry(ndata, &dlm->reco.node_data, list) {
+ if (ndata->node_num != done->node_idx)
+ continue;
+
+ switch (ndata->state) {
+ /* should have moved beyond INIT but not to FINALIZE yet */
+ case DLM_RECO_NODE_DATA_INIT:
+ case DLM_RECO_NODE_DATA_DEAD:
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ mlog(ML_ERROR, "bad ndata state for node %u:"
+ " state=%d\n", ndata->node_num,
+ ndata->state);
+ BUG();
+ break;
+ /* these states are possible at this point, anywhere along
+ * the line of recovery */
+ case DLM_RECO_NODE_DATA_DONE:
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ mlog(0, "node %u is DONE sending "
+ "recovery data!\n",
+ ndata->node_num);
+
+ ndata->state = DLM_RECO_NODE_DATA_DONE;
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock(&dlm_reco_state_lock);
+
+ /* wake the recovery thread, some node is done */
+ if (!ret)
+ dlm_kick_recovery_thread(dlm);
+
+ if (ret < 0)
+ mlog(ML_ERROR, "failed to find recovery node data for node "
+ "%u\n", done->node_idx);
+ dlm_put(dlm);
+
+ mlog(0, "leaving reco data done handler, ret=%d\n", ret);
+ return ret;
+}
+
+static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
+ struct list_head *list,
+ u8 dead_node)
+{
+ struct dlm_lock_resource *res, *next;
+ struct dlm_lock *lock;
+
+ spin_lock(&dlm->spinlock);
+ list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
+ /* always prune any $RECOVERY entries for dead nodes,
+ * otherwise hangs can occur during later recovery */
+ if (dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ spin_lock(&res->spinlock);
+ list_for_each_entry(lock, &res->granted, list) {
+ if (lock->ml.node == dead_node) {
+ mlog(0, "AHA! there was "
+ "a $RECOVERY lock for dead "
+ "node %u (%s)!\n",
+ dead_node, dlm->name);
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ /* Can't schedule DLM_UNLOCK_FREE_LOCK
+ * - do manually */
+ dlm_lock_put(lock);
+ break;
+ }
+ }
+ spin_unlock(&res->spinlock);
+ continue;
+ }
+
+ if (res->owner == dead_node) {
+ mlog(0, "found lockres owned by dead node while "
+ "doing recovery for node %u. sending it.\n",
+ dead_node);
+ list_move_tail(&res->recovering, list);
+ } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "found UNKNOWN owner while doing recovery "
+ "for node %u. sending it.\n", dead_node);
+ list_move_tail(&res->recovering, list);
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+}
+
+static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
+{
+ int total_locks = 0;
+ struct list_head *iter, *queue = &res->granted;
+ int i;
+
+ for (i=0; i<3; i++) {
+ list_for_each(iter, queue)
+ total_locks++;
+ queue++;
+ }
+ return total_locks;
+}
+
+
+static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to,
+ struct dlm_lock_resource *res,
+ int total_locks)
+{
+ u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
+ int mres_total_locks = be32_to_cpu(mres->total_locks);
+ int ret = 0, status = 0;
+ u8 orig_flags = mres->flags,
+ orig_master = mres->master;
+
+ BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
+ if (!mres->num_locks)
+ return 0;
+
+ /* add an all-done flag if we reached the last lock */
+ orig_flags = mres->flags;
+ BUG_ON(total_locks > mres_total_locks);
+ if (total_locks == mres_total_locks)
+ mres->flags |= DLM_MRES_ALL_DONE;
+
+ mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
+ send_to);
+
+ /* send it */
+ ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
+ struct_size(mres, ml, mres->num_locks),
+ send_to, &status);
+ if (ret < 0) {
+ /* XXX: negative status is not handled.
+ * this will end up killing this node. */
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
+ "node %u (%s)\n", dlm->name, mres->lockname_len,
+ mres->lockname, ret, send_to,
+ (orig_flags & DLM_MRES_MIGRATION ?
+ "migration" : "recovery"));
+ } else {
+ /* might get an -ENOMEM back here */
+ ret = status;
+ if (ret < 0) {
+ mlog_errno(ret);
+
+ if (ret == -EFAULT) {
+ mlog(ML_ERROR, "node %u told me to kill "
+ "myself!\n", send_to);
+ BUG();
+ }
+ }
+ }
+
+ /* zero and reinit the message buffer */
+ dlm_init_migratable_lockres(mres, res->lockname.name,
+ res->lockname.len, mres_total_locks,
+ mig_cookie, orig_flags, orig_master);
+ return ret;
+}
+
+static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
+ const char *lockname, int namelen,
+ int total_locks, u64 cookie,
+ u8 flags, u8 master)
+{
+ /* mres here is one full page */
+ clear_page(mres);
+ mres->lockname_len = namelen;
+ memcpy(mres->lockname, lockname, namelen);
+ mres->num_locks = 0;
+ mres->total_locks = cpu_to_be32(total_locks);
+ mres->mig_cookie = cpu_to_be64(cookie);
+ mres->flags = flags;
+ mres->master = master;
+}
+
+static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
+ struct dlm_migratable_lockres *mres,
+ int queue)
+{
+ if (!lock->lksb)
+ return;
+
+ /* Ignore lvb in all locks in the blocked list */
+ if (queue == DLM_BLOCKED_LIST)
+ return;
+
+ /* Only consider lvbs in locks with granted EX or PR lock levels */
+ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
+ return;
+
+ if (dlm_lvb_is_empty(mres->lvb)) {
+ memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+ return;
+ }
+
+ /* Ensure the lvb copied for migration matches in other valid locks */
+ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
+ return;
+
+ mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
+ "node=%u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->lockres->lockname.len, lock->lockres->lockname.name,
+ lock->ml.node);
+ dlm_print_one_lock_resource(lock->lockres);
+ BUG();
+}
+
+/* returns 1 if this lock fills the network structure,
+ * 0 otherwise */
+static int dlm_add_lock_to_array(struct dlm_lock *lock,
+ struct dlm_migratable_lockres *mres, int queue)
+{
+ struct dlm_migratable_lock *ml;
+ int lock_num = mres->num_locks;
+
+ ml = &(mres->ml[lock_num]);
+ ml->cookie = lock->ml.cookie;
+ ml->type = lock->ml.type;
+ ml->convert_type = lock->ml.convert_type;
+ ml->highest_blocked = lock->ml.highest_blocked;
+ ml->list = queue;
+ if (lock->lksb) {
+ ml->flags = lock->lksb->flags;
+ dlm_prepare_lvb_for_migration(lock, mres, queue);
+ }
+ ml->node = lock->ml.node;
+ mres->num_locks++;
+ /* we reached the max, send this network message */
+ if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
+ return 1;
+ return 0;
+}
+
+static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres)
+{
+ struct dlm_lock dummy;
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.ml.cookie = 0;
+ dummy.ml.type = LKM_IVMODE;
+ dummy.ml.convert_type = LKM_IVMODE;
+ dummy.ml.highest_blocked = LKM_IVMODE;
+ dummy.lksb = NULL;
+ dummy.ml.node = dlm->node_num;
+ dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
+}
+
+static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lock *ml,
+ u8 *nodenum)
+{
+ if (unlikely(ml->cookie == 0 &&
+ ml->type == LKM_IVMODE &&
+ ml->convert_type == LKM_IVMODE &&
+ ml->highest_blocked == LKM_IVMODE &&
+ ml->list == DLM_BLOCKED_LIST)) {
+ *nodenum = ml->node;
+ return 1;
+ }
+ return 0;
+}
+
+int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to, u8 flags)
+{
+ struct list_head *queue;
+ int total_locks, i;
+ u64 mig_cookie = 0;
+ struct dlm_lock *lock;
+ int ret = 0;
+
+ BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
+
+ mlog(0, "sending to %u\n", send_to);
+
+ total_locks = dlm_num_locks_in_lockres(res);
+ if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
+ /* rare, but possible */
+ mlog(0, "argh. lockres has %d locks. this will "
+ "require more than one network packet to "
+ "migrate\n", total_locks);
+ mig_cookie = dlm_get_next_mig_cookie();
+ }
+
+ dlm_init_migratable_lockres(mres, res->lockname.name,
+ res->lockname.len, total_locks,
+ mig_cookie, flags, res->owner);
+
+ total_locks = 0;
+ for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
+ queue = dlm_list_idx_to_ptr(res, i);
+ list_for_each_entry(lock, queue, list) {
+ /* add another lock. */
+ total_locks++;
+ if (!dlm_add_lock_to_array(lock, mres, i))
+ continue;
+
+ /* this filled the lock message,
+ * we must send it immediately. */
+ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
+ res, total_locks);
+ if (ret < 0)
+ goto error;
+ }
+ }
+ if (total_locks == 0) {
+ /* send a dummy lock to indicate a mastery reference only */
+ mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
+ "migration");
+ dlm_add_dummy_lock(dlm, mres);
+ }
+ /* flush any remaining locks */
+ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
+ if (ret < 0)
+ goto error;
+ return ret;
+
+error:
+ mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
+ dlm->name, ret);
+ if (!dlm_is_host_down(ret))
+ BUG();
+ mlog(0, "%s: node %u went down while sending %s "
+ "lockres %.*s\n", dlm->name, send_to,
+ flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
+ res->lockname.len, res->lockname.name);
+ return ret;
+}
+
+
+
+/*
+ * this message will contain no more than one page worth of
+ * recovery data, and it will work on only one lockres.
+ * there may be many locks in this page, and we may need to wait
+ * for additional packets to complete all the locks (rare, but
+ * possible).
+ */
+/*
+ * NOTE: the allocation error cases here are scary
+ * we really cannot afford to fail an alloc in recovery
+ * do we spin? returning an error only delays the problem really
+ */
+
+int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_migratable_lockres *mres =
+ (struct dlm_migratable_lockres *)msg->buf;
+ int ret = 0;
+ u8 real_master;
+ u8 extra_refs = 0;
+ char *buf = NULL;
+ struct dlm_work_item *item = NULL;
+ struct dlm_lock_resource *res = NULL;
+ unsigned int hash;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ if (!dlm_joined(dlm)) {
+ mlog(ML_ERROR, "Domain %s not joined! "
+ "lockres %.*s, master %u\n",
+ dlm->name, mres->lockname_len,
+ mres->lockname, mres->master);
+ dlm_put(dlm);
+ return -EINVAL;
+ }
+
+ BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
+
+ real_master = mres->master;
+ if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* cannot migrate a lockres with no master */
+ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
+ }
+
+ mlog(0, "%s message received from node %u\n",
+ (mres->flags & DLM_MRES_RECOVERY) ?
+ "recovery" : "migration", mres->master);
+ if (mres->flags & DLM_MRES_ALL_DONE)
+ mlog(0, "all done flag. all lockres data received!\n");
+
+ ret = -ENOMEM;
+ buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
+ item = kzalloc(sizeof(*item), GFP_NOFS);
+ if (!buf || !item)
+ goto leave;
+
+ /* lookup the lock to see if we have a secondary queue for this
+ * already... just add the locks in and this will have its owner
+ * and RECOVERY flag changed when it completes. */
+ hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
+ hash);
+ if (res) {
+ /* this will get a ref on res */
+ /* mark it as recovering/migrating and hash it */
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(0, "%s: node is attempting to migrate "
+ "lockres %.*s, but marked as dropping "
+ " ref!\n", dlm->name,
+ mres->lockname_len, mres->lockname);
+ ret = -EINVAL;
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ dlm_lockres_put(res);
+ goto leave;
+ }
+
+ if (mres->flags & DLM_MRES_RECOVERY) {
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ } else {
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ /* this is at least the second
+ * lockres message */
+ mlog(0, "lock %.*s is already migrating\n",
+ mres->lockname_len,
+ mres->lockname);
+ } else if (res->state & DLM_LOCK_RES_RECOVERING) {
+ /* caller should BUG */
+ mlog(ML_ERROR, "node is attempting to migrate "
+ "lock %.*s, but marked as recovering!\n",
+ mres->lockname_len, mres->lockname);
+ ret = -EFAULT;
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ dlm_lockres_put(res);
+ goto leave;
+ }
+ res->state |= DLM_LOCK_RES_MIGRATING;
+ }
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ } else {
+ spin_unlock(&dlm->spinlock);
+ /* need to allocate, just like if it was
+ * mastered here normally */
+ res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
+ if (!res)
+ goto leave;
+
+ /* to match the ref that we would have gotten if
+ * dlm_lookup_lockres had succeeded */
+ dlm_lockres_get(res);
+
+ /* mark it as recovering/migrating and hash it */
+ if (mres->flags & DLM_MRES_RECOVERY)
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ else
+ res->state |= DLM_LOCK_RES_MIGRATING;
+
+ spin_lock(&dlm->spinlock);
+ __dlm_insert_lockres(dlm, res);
+ spin_unlock(&dlm->spinlock);
+
+ /* Add an extra ref for this lock-less lockres lest the
+ * dlm_thread purges it before we get the chance to add
+ * locks to it */
+ dlm_lockres_get(res);
+
+ /* There are three refs that need to be put.
+ * 1. Taken above.
+ * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
+ * 3. dlm_lookup_lockres()
+ * The first one is handled at the end of this function. The
+ * other two are handled in the worker thread after locks have
+ * been attached. Yes, we don't wait for purge time to match
+ * kref_init. The lockres will still have atleast one ref
+ * added because it is in the hash __dlm_insert_lockres() */
+ extra_refs++;
+
+ /* now that the new lockres is inserted,
+ * make it usable by other processes */
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ }
+
+ /* at this point we have allocated everything we need,
+ * and we have a hashed lockres with an extra ref and
+ * the proper res->state flags. */
+ ret = 0;
+ spin_lock(&res->spinlock);
+ /* drop this either when master requery finds a different master
+ * or when a lock is added by the recovery worker */
+ dlm_lockres_grab_inflight_ref(dlm, res);
+ if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* migration cannot have an unknown master */
+ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
+ mlog(0, "recovery has passed me a lockres with an "
+ "unknown owner.. will need to requery: "
+ "%.*s\n", mres->lockname_len, mres->lockname);
+ } else {
+ /* take a reference now to pin the lockres, drop it
+ * when locks are added in the worker */
+ dlm_change_lockres_owner(dlm, res, dlm->node_num);
+ }
+ spin_unlock(&res->spinlock);
+
+ /* queue up work for dlm_mig_lockres_worker */
+ dlm_grab(dlm); /* get an extra ref for the work item */
+ memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
+ dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
+ item->u.ml.lockres = res; /* already have a ref */
+ item->u.ml.real_master = real_master;
+ item->u.ml.extra_ref = extra_refs;
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+ queue_work(dlm->dlm_worker, &dlm->dispatched_work);
+
+leave:
+ /* One extra ref taken needs to be put here */
+ if (extra_refs)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+ if (ret < 0) {
+ kfree(buf);
+ kfree(item);
+ mlog_errno(ret);
+ }
+
+ return ret;
+}
+
+
+static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_ctxt *dlm;
+ struct dlm_migratable_lockres *mres;
+ int ret = 0;
+ struct dlm_lock_resource *res;
+ u8 real_master;
+ u8 extra_ref;
+
+ dlm = item->dlm;
+ mres = (struct dlm_migratable_lockres *)data;
+
+ res = item->u.ml.lockres;
+ real_master = item->u.ml.real_master;
+ extra_ref = item->u.ml.extra_ref;
+
+ if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* this case is super-rare. only occurs if
+ * node death happens during migration. */
+again:
+ ret = dlm_lockres_master_requery(dlm, res, &real_master);
+ if (ret < 0) {
+ mlog(0, "dlm_lockres_master_requery ret=%d\n",
+ ret);
+ goto again;
+ }
+ if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "lockres %.*s not claimed. "
+ "this node will take it.\n",
+ res->lockname.len, res->lockname.name);
+ } else {
+ spin_lock(&res->spinlock);
+ dlm_lockres_drop_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+ mlog(0, "master needs to respond to sender "
+ "that node %u still owns %.*s\n",
+ real_master, res->lockname.len,
+ res->lockname.name);
+ /* cannot touch this lockres */
+ goto leave;
+ }
+ }
+
+ ret = dlm_process_recovery_data(dlm, res, mres);
+ if (ret < 0)
+ mlog(0, "dlm_process_recovery_data returned %d\n", ret);
+ else
+ mlog(0, "dlm_process_recovery_data succeeded\n");
+
+ if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
+ (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
+ ret = dlm_finish_migration(dlm, res, mres->master);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+leave:
+ /* See comment in dlm_mig_lockres_handler() */
+ if (res) {
+ if (extra_ref)
+ dlm_lockres_put(res);
+ dlm_lockres_put(res);
+ }
+ kfree(data);
+}
+
+
+
+static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 *real_master)
+{
+ struct dlm_node_iter iter;
+ int nodenum;
+ int ret = 0;
+
+ *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
+
+ /* we only reach here if one of the two nodes in a
+ * migration died while the migration was in progress.
+ * at this point we need to requery the master. we
+ * know that the new_master got as far as creating
+ * an mle on at least one node, but we do not know
+ * if any nodes had actually cleared the mle and set
+ * the master to the new_master. the old master
+ * is supposed to set the owner to UNKNOWN in the
+ * event of a new_master death, so the only possible
+ * responses that we can get from nodes here are
+ * that the master is new_master, or that the master
+ * is UNKNOWN.
+ * if all nodes come back with UNKNOWN then we know
+ * the lock needs remastering here.
+ * if any node comes back with a valid master, check
+ * to see if that master is the one that we are
+ * recovering. if so, then the new_master died and
+ * we need to remaster this lock. if not, then the
+ * new_master survived and that node will respond to
+ * other nodes about the owner.
+ * if there is an owner, this node needs to dump this
+ * lockres and alert the sender that this lockres
+ * was rejected. */
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ /* do not send to self */
+ if (nodenum == dlm->node_num)
+ continue;
+ ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
+ if (ret < 0) {
+ mlog_errno(ret);
+ if (!dlm_is_host_down(ret))
+ BUG();
+ /* host is down, so answer for that node would be
+ * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
+ }
+ if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "lock master is %u\n", *real_master);
+ break;
+ }
+ }
+ return ret;
+}
+
+
+int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ u8 nodenum, u8 *real_master)
+{
+ int ret;
+ struct dlm_master_requery req;
+ int status = DLM_LOCK_RES_OWNER_UNKNOWN;
+
+ memset(&req, 0, sizeof(req));
+ req.node_idx = dlm->node_num;
+ req.namelen = res->lockname.len;
+ memcpy(req.name, res->lockname.name, res->lockname.len);
+
+resend:
+ ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
+ &req, sizeof(req), nodenum, &status);
+ if (ret < 0)
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
+ dlm->key, nodenum);
+ else if (status == -ENOMEM) {
+ mlog_errno(status);
+ msleep(50);
+ goto resend;
+ } else {
+ BUG_ON(status < 0);
+ BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
+ *real_master = (u8) (status & 0xff);
+ mlog(0, "node %u responded to master requery with %u\n",
+ nodenum, *real_master);
+ ret = 0;
+ }
+ return ret;
+}
+
+
+/* this function cannot error, so unless the sending
+ * or receiving of the message failed, the owner can
+ * be trusted */
+int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ unsigned int hash;
+ int master = DLM_LOCK_RES_OWNER_UNKNOWN;
+ u32 flags = DLM_ASSERT_MASTER_REQUERY;
+ int dispatched = 0;
+
+ if (!dlm_grab(dlm)) {
+ /* since the domain has gone away on this
+ * node, the proper response is UNKNOWN */
+ return master;
+ }
+
+ hash = dlm_lockid_hash(req->name, req->namelen);
+
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
+ if (res) {
+ spin_lock(&res->spinlock);
+ master = res->owner;
+ if (master == dlm->node_num) {
+ int ret = dlm_dispatch_assert_master(dlm, res,
+ 0, 0, flags);
+ if (ret < 0) {
+ mlog_errno(ret);
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ spin_unlock(&dlm->spinlock);
+ dlm_put(dlm);
+ /* sender will take care of this and retry */
+ return ret;
+ } else {
+ dispatched = 1;
+ __dlm_lockres_grab_inflight_worker(dlm, res);
+ spin_unlock(&res->spinlock);
+ }
+ } else {
+ /* put.. incase we are not the master */
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+
+ if (!dispatched)
+ dlm_put(dlm);
+ return master;
+}
+
+static inline struct list_head *
+dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
+{
+ struct list_head *ret;
+ BUG_ON(list_num < 0);
+ BUG_ON(list_num > 2);
+ ret = &(res->granted);
+ ret += list_num;
+ return ret;
+}
+/* TODO: do ast flush business
+ * TODO: do MIGRATING and RECOVERING spinning
+ */
+
+/*
+* NOTE about in-flight requests during migration:
+*
+* Before attempting the migrate, the master has marked the lockres as
+* MIGRATING and then flushed all of its pending ASTS. So any in-flight
+* requests either got queued before the MIGRATING flag got set, in which
+* case the lock data will reflect the change and a return message is on
+* the way, or the request failed to get in before MIGRATING got set. In
+* this case, the caller will be told to spin and wait for the MIGRATING
+* flag to be dropped, then recheck the master.
+* This holds true for the convert, cancel and unlock cases, and since lvb
+* updates are tied to these same messages, it applies to lvb updates as
+* well. For the lock case, there is no way a lock can be on the master
+* queue and not be on the secondary queue since the lock is always added
+* locally first. This means that the new target node will never be sent
+* a lock that he doesn't already have on the list.
+* In total, this means that the local lock is correct and should not be
+* updated to match the one sent by the master. Any messages sent back
+* from the master before the MIGRATING flag will bring the lock properly
+* up-to-date, and the change will be ordered properly for the waiter.
+* We will *not* attempt to modify the lock underneath the waiter.
+*/
+
+static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres)
+{
+ struct dlm_migratable_lock *ml;
+ struct list_head *queue, *iter;
+ struct list_head *tmpq = NULL;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ int ret = 0;
+ int i, j, bad;
+ struct dlm_lock *lock;
+ u8 from = O2NM_MAX_NODES;
+ __be64 c;
+
+ mlog(0, "running %d locks for this lockres\n", mres->num_locks);
+ for (i=0; i<mres->num_locks; i++) {
+ ml = &(mres->ml[i]);
+
+ if (dlm_is_dummy_lock(dlm, ml, &from)) {
+ /* placeholder, just need to set the refmap bit */
+ BUG_ON(mres->num_locks != 1);
+ mlog(0, "%s:%.*s: dummy lock for %u\n",
+ dlm->name, mres->lockname_len, mres->lockname,
+ from);
+ spin_lock(&res->spinlock);
+ dlm_lockres_set_refmap_bit(dlm, res, from);
+ spin_unlock(&res->spinlock);
+ break;
+ }
+ BUG_ON(ml->highest_blocked != LKM_IVMODE);
+ newlock = NULL;
+ lksb = NULL;
+
+ queue = dlm_list_num_to_pointer(res, ml->list);
+ tmpq = NULL;
+
+ /* if the lock is for the local node it needs to
+ * be moved to the proper location within the queue.
+ * do not allocate a new lock structure. */
+ if (ml->node == dlm->node_num) {
+ /* MIGRATION ONLY! */
+ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
+
+ lock = NULL;
+ spin_lock(&res->spinlock);
+ for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
+ tmpq = dlm_list_idx_to_ptr(res, j);
+ list_for_each(iter, tmpq) {
+ lock = list_entry(iter,
+ struct dlm_lock, list);
+ if (lock->ml.cookie == ml->cookie)
+ break;
+ lock = NULL;
+ }
+ if (lock)
+ break;
+ }
+
+ /* lock is always created locally first, and
+ * destroyed locally last. it must be on the list */
+ if (!lock) {
+ c = ml->cookie;
+ mlog(ML_ERROR, "Could not find local lock "
+ "with cookie %u:%llu, node %u, "
+ "list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ if (lock->ml.node != ml->node) {
+ c = lock->ml.cookie;
+ mlog(ML_ERROR, "Mismatched node# in lock "
+ "cookie %u:%llu, name %.*s, node %u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ res->lockname.len, res->lockname.name,
+ lock->ml.node);
+ c = ml->cookie;
+ mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
+ "node %u, list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ if (tmpq != queue) {
+ c = ml->cookie;
+ mlog(0, "Lock cookie %u:%llu was on list %u "
+ "instead of list %u for %.*s\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ j, ml->list, res->lockname.len,
+ res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ spin_unlock(&res->spinlock);
+ continue;
+ }
+
+ /* see NOTE above about why we do not update
+ * to match the master here */
+
+ /* move the lock to its proper place */
+ /* do not alter lock refcount. switching lists. */
+ list_move_tail(&lock->list, queue);
+ spin_unlock(&res->spinlock);
+
+ mlog(0, "just reordered a local lock!\n");
+ continue;
+ }
+
+ /* lock is for another node. */
+ newlock = dlm_new_lock(ml->type, ml->node,
+ be64_to_cpu(ml->cookie), NULL);
+ if (!newlock) {
+ ret = -ENOMEM;
+ goto leave;
+ }
+ lksb = newlock->lksb;
+ dlm_lock_attach_lockres(newlock, res);
+
+ if (ml->convert_type != LKM_IVMODE) {
+ BUG_ON(queue != &res->converting);
+ newlock->ml.convert_type = ml->convert_type;
+ }
+ lksb->flags |= (ml->flags &
+ (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
+
+ if (ml->type == LKM_NLMODE)
+ goto skip_lvb;
+
+ /*
+ * If the lock is in the blocked list it can't have a valid lvb,
+ * so skip it
+ */
+ if (ml->list == DLM_BLOCKED_LIST)
+ goto skip_lvb;
+
+ if (!dlm_lvb_is_empty(mres->lvb)) {
+ if (lksb->flags & DLM_LKSB_PUT_LVB) {
+ /* other node was trying to update
+ * lvb when node died. recreate the
+ * lksb with the updated lvb. */
+ memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
+ /* the lock resource lvb update must happen
+ * NOW, before the spinlock is dropped.
+ * we no longer wait for the AST to update
+ * the lvb. */
+ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
+ } else {
+ /* otherwise, the node is sending its
+ * most recent valid lvb info */
+ BUG_ON(ml->type != LKM_EXMODE &&
+ ml->type != LKM_PRMODE);
+ if (!dlm_lvb_is_empty(res->lvb) &&
+ (ml->type == LKM_EXMODE ||
+ memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
+ int i;
+ mlog(ML_ERROR, "%s:%.*s: received bad "
+ "lvb! type=%d\n", dlm->name,
+ res->lockname.len,
+ res->lockname.name, ml->type);
+ printk("lockres lvb=[");
+ for (i=0; i<DLM_LVB_LEN; i++)
+ printk("%02x", res->lvb[i]);
+ printk("]\nmigrated lvb=[");
+ for (i=0; i<DLM_LVB_LEN; i++)
+ printk("%02x", mres->lvb[i]);
+ printk("]\n");
+ dlm_print_one_lock_resource(res);
+ BUG();
+ }
+ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
+ }
+ }
+skip_lvb:
+
+ /* NOTE:
+ * wrt lock queue ordering and recovery:
+ * 1. order of locks on granted queue is
+ * meaningless.
+ * 2. order of locks on converting queue is
+ * LOST with the node death. sorry charlie.
+ * 3. order of locks on the blocked queue is
+ * also LOST.
+ * order of locks does not affect integrity, it
+ * just means that a lock request may get pushed
+ * back in line as a result of the node death.
+ * also note that for a given node the lock order
+ * for its secondary queue locks is preserved
+ * relative to each other, but clearly *not*
+ * preserved relative to locks from other nodes.
+ */
+ bad = 0;
+ spin_lock(&res->spinlock);
+ list_for_each_entry(lock, queue, list) {
+ if (lock->ml.cookie == ml->cookie) {
+ c = lock->ml.cookie;
+ mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
+ "exists on this lockres!\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)));
+
+ mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
+ "node=%u, cookie=%u:%llu, queue=%d\n",
+ ml->type, ml->convert_type, ml->node,
+ dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
+ ml->list);
+
+ __dlm_print_one_lock_resource(res);
+ bad = 1;
+ break;
+ }
+ }
+ if (!bad) {
+ dlm_lock_get(newlock);
+ if (mres->flags & DLM_MRES_RECOVERY &&
+ ml->list == DLM_CONVERTING_LIST &&
+ newlock->ml.type >
+ newlock->ml.convert_type) {
+ /* newlock is doing downconvert, add it to the
+ * head of converting list */
+ list_add(&newlock->list, queue);
+ } else
+ list_add_tail(&newlock->list, queue);
+ mlog(0, "%s:%.*s: added lock for node %u, "
+ "setting refmap bit\n", dlm->name,
+ res->lockname.len, res->lockname.name, ml->node);
+ dlm_lockres_set_refmap_bit(dlm, res, ml->node);
+ }
+ spin_unlock(&res->spinlock);
+ }
+ mlog(0, "done running all the locks\n");
+
+leave:
+ /* balance the ref taken when the work was queued */
+ spin_lock(&res->spinlock);
+ dlm_lockres_drop_inflight_ref(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ if (ret < 0)
+ mlog_errno(ret);
+
+ return ret;
+}
+
+void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ int i;
+ struct list_head *queue;
+ struct dlm_lock *lock, *next;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ if (!list_empty(&res->recovering)) {
+ mlog(0,
+ "Recovering res %s:%.*s, is already on recovery list!\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ list_del_init(&res->recovering);
+ dlm_lockres_put(res);
+ }
+ /* We need to hold a reference while on the recovery list */
+ dlm_lockres_get(res);
+ list_add_tail(&res->recovering, &dlm->reco.resources);
+
+ /* find any pending locks and put them back on proper list */
+ for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
+ queue = dlm_list_idx_to_ptr(res, i);
+ list_for_each_entry_safe(lock, next, queue, list) {
+ dlm_lock_get(lock);
+ if (lock->convert_pending) {
+ /* move converting lock back to granted */
+ mlog(0, "node died with convert pending "
+ "on %.*s. move back to granted list.\n",
+ res->lockname.len, res->lockname.name);
+ dlm_revert_pending_convert(res, lock);
+ lock->convert_pending = 0;
+ } else if (lock->lock_pending) {
+ /* remove pending lock requests completely */
+ BUG_ON(i != DLM_BLOCKED_LIST);
+ mlog(0, "node died with lock pending "
+ "on %.*s. remove from blocked list and skip.\n",
+ res->lockname.len, res->lockname.name);
+ /* lock will be floating until ref in
+ * dlmlock_remote is freed after the network
+ * call returns. ok for it to not be on any
+ * list since no ast can be called
+ * (the master is dead). */
+ dlm_revert_pending_lock(res, lock);
+ lock->lock_pending = 0;
+ } else if (lock->unlock_pending) {
+ /* if an unlock was in progress, treat as
+ * if this had completed successfully
+ * before sending this lock state to the
+ * new master. note that the dlm_unlock
+ * call is still responsible for calling
+ * the unlockast. that will happen after
+ * the network call times out. for now,
+ * just move lists to prepare the new
+ * recovery master. */
+ BUG_ON(i != DLM_GRANTED_LIST);
+ mlog(0, "node died with unlock pending "
+ "on %.*s. remove from blocked list and skip.\n",
+ res->lockname.len, res->lockname.name);
+ dlm_commit_pending_unlock(res, lock);
+ lock->unlock_pending = 0;
+ } else if (lock->cancel_pending) {
+ /* if a cancel was in progress, treat as
+ * if this had completed successfully
+ * before sending this lock state to the
+ * new master */
+ BUG_ON(i != DLM_CONVERTING_LIST);
+ mlog(0, "node died with cancel pending "
+ "on %.*s. move back to granted list.\n",
+ res->lockname.len, res->lockname.name);
+ dlm_commit_pending_cancel(res, lock);
+ lock->cancel_pending = 0;
+ }
+ dlm_lock_put(lock);
+ }
+ }
+}
+
+
+
+/* removes all recovered locks from the recovery list.
+ * sets the res->owner to the new master.
+ * unsets the RECOVERY flag and wakes waiters. */
+static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
+ u8 dead_node, u8 new_master)
+{
+ int i;
+ struct hlist_head *bucket;
+ struct dlm_lock_resource *res, *next;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
+ if (res->owner == dead_node) {
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
+ list_del_init(&res->recovering);
+ spin_lock(&res->spinlock);
+ /* new_master has our reference from
+ * the lock state sent during recovery */
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ if (__dlm_lockres_has_locks(res))
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ }
+ }
+
+ /* this will become unnecessary eventually, but
+ * for now we need to run the whole hash, clear
+ * the RECOVERING state and set the owner
+ * if necessary */
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_lockres_hash(dlm, i);
+ hlist_for_each_entry(res, bucket, hash_node) {
+ if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ }
+
+ if (!(res->state & DLM_LOCK_RES_RECOVERING))
+ continue;
+
+ if (res->owner != dead_node &&
+ res->owner != dlm->node_num)
+ continue;
+
+ if (!list_empty(&res->recovering)) {
+ list_del_init(&res->recovering);
+ dlm_lockres_put(res);
+ }
+
+ /* new_master has our reference from
+ * the lock state sent during recovery */
+ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ res->owner, new_master);
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ if (__dlm_lockres_has_locks(res))
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ }
+ }
+}
+
+static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
+{
+ if (local) {
+ if (lock->ml.type != LKM_EXMODE &&
+ lock->ml.type != LKM_PRMODE)
+ return 1;
+ } else if (lock->ml.type == LKM_EXMODE)
+ return 1;
+ return 0;
+}
+
+static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 dead_node)
+{
+ struct list_head *queue;
+ struct dlm_lock *lock;
+ int blank_lvb = 0, local = 0;
+ int i;
+ u8 search_node;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (res->owner == dlm->node_num)
+ /* if this node owned the lockres, and if the dead node
+ * had an EX when he died, blank out the lvb */
+ search_node = dead_node;
+ else {
+ /* if this is a secondary lockres, and we had no EX or PR
+ * locks granted, we can no longer trust the lvb */
+ search_node = dlm->node_num;
+ local = 1; /* check local state for valid lvb */
+ }
+
+ for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
+ queue = dlm_list_idx_to_ptr(res, i);
+ list_for_each_entry(lock, queue, list) {
+ if (lock->ml.node == search_node) {
+ if (dlm_lvb_needs_invalidation(lock, local)) {
+ /* zero the lksb lvb and lockres lvb */
+ blank_lvb = 1;
+ memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
+ }
+ }
+ }
+ }
+
+ if (blank_lvb) {
+ mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
+ res->lockname.len, res->lockname.name, dead_node);
+ memset(res->lvb, 0, DLM_LVB_LEN);
+ }
+}
+
+static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 dead_node)
+{
+ struct dlm_lock *lock, *next;
+ unsigned int freed = 0;
+
+ /* this node is the lockres master:
+ * 1) remove any stale locks for the dead node
+ * 2) if the dead node had an EX when he died, blank out the lvb
+ */
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ /* We do two dlm_lock_put(). One for removing from list and the other is
+ * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
+
+ /* TODO: check pending_asts, pending_basts here */
+ list_for_each_entry_safe(lock, next, &res->granted, list) {
+ if (lock->ml.node == dead_node) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
+ dlm_lock_put(lock);
+ freed++;
+ }
+ }
+ list_for_each_entry_safe(lock, next, &res->converting, list) {
+ if (lock->ml.node == dead_node) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
+ dlm_lock_put(lock);
+ freed++;
+ }
+ }
+ list_for_each_entry_safe(lock, next, &res->blocked, list) {
+ if (lock->ml.node == dead_node) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
+ dlm_lock_put(lock);
+ freed++;
+ }
+ }
+
+ if (freed) {
+ mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
+ "dropping ref from lockres\n", dlm->name,
+ res->lockname.len, res->lockname.name, freed, dead_node);
+ if(!test_bit(dead_node, res->refmap)) {
+ mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
+ "but ref was not set\n", dlm->name,
+ res->lockname.len, res->lockname.name, freed, dead_node);
+ __dlm_print_one_lock_resource(res);
+ }
+ res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
+ } else if (test_bit(dead_node, res->refmap)) {
+ mlog(0, "%s:%.*s: dead node %u had a ref, but had "
+ "no locks and had not purged before dying\n", dlm->name,
+ res->lockname.len, res->lockname.name, dead_node);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
+ }
+
+ /* do not kick thread yet */
+ __dlm_dirty_lockres(dlm, res);
+}
+
+static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct dlm_lock_resource *res;
+ int i;
+ struct hlist_head *bucket;
+ struct hlist_node *tmp;
+ struct dlm_lock *lock;
+
+
+ /* purge any stale mles */
+ dlm_clean_master_list(dlm, dead_node);
+
+ /*
+ * now clean up all lock resources. there are two rules:
+ *
+ * 1) if the dead node was the master, move the lockres
+ * to the recovering list. set the RECOVERING flag.
+ * this lockres needs to be cleaned up before it can
+ * be used further.
+ *
+ * 2) if this node was the master, remove all locks from
+ * each of the lockres queues that were owned by the
+ * dead node. once recovery finishes, the dlm thread
+ * can be kicked again to see if any ASTs or BASTs
+ * need to be fired as a result.
+ */
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
+ bucket = dlm_lockres_hash(dlm, i);
+ hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
+ /* always prune any $RECOVERY entries for dead nodes,
+ * otherwise hangs can occur during later recovery */
+ if (dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ spin_lock(&res->spinlock);
+ list_for_each_entry(lock, &res->granted, list) {
+ if (lock->ml.node == dead_node) {
+ mlog(0, "AHA! there was "
+ "a $RECOVERY lock for dead "
+ "node %u (%s)!\n",
+ dead_node, dlm->name);
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ /* Can't schedule
+ * DLM_UNLOCK_FREE_LOCK
+ * - do manually */
+ dlm_lock_put(lock);
+ break;
+ }
+ }
+
+ if ((res->owner == dead_node) &&
+ (res->state & DLM_LOCK_RES_DROPPING_REF)) {
+ dlm_lockres_get(res);
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ continue;
+ } else if (res->owner == dlm->node_num)
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
+ spin_unlock(&res->spinlock);
+ continue;
+ }
+ spin_lock(&res->spinlock);
+ /* zero the lvb if necessary */
+ dlm_revalidate_lvb(dlm, res, dead_node);
+ if (res->owner == dead_node) {
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(0, "%s:%.*s: owned by "
+ "dead node %u, this node was "
+ "dropping its ref when master died. "
+ "continue, purging the lockres.\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, dead_node);
+ dlm_lockres_get(res);
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ continue;
+ }
+ dlm_move_lockres_to_recovery_list(dlm, res);
+ } else if (res->owner == dlm->node_num) {
+ dlm_free_dead_locks(dlm, res, dead_node);
+ __dlm_lockres_calc_usage(dlm, res);
+ } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ if (test_bit(dead_node, res->refmap)) {
+ mlog(0, "%s:%.*s: dead node %u had a ref, but had "
+ "no locks and had not purged before dying\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, dead_node);
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
+ }
+ }
+ spin_unlock(&res->spinlock);
+ }
+ }
+
+}
+
+static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ if (dlm->reco.new_master == idx) {
+ mlog(0, "%s: recovery master %d just died\n",
+ dlm->name, idx);
+ if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ /* finalize1 was reached, so it is safe to clear
+ * the new_master and dead_node. that recovery
+ * is complete. */
+ mlog(0, "%s: dead master %d had reached "
+ "finalize1 state, clearing\n", dlm->name, idx);
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
+ __dlm_reset_recovery(dlm);
+ }
+ }
+
+ /* Clean up join state on node death. */
+ if (dlm->joining_node == idx) {
+ mlog(0, "Clearing join state for node %u\n", idx);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+ }
+
+ /* check to see if the node is already considered dead */
+ if (!test_bit(idx, dlm->live_nodes_map)) {
+ mlog(0, "for domain %s, node %d is already dead. "
+ "another node likely did recovery already.\n",
+ dlm->name, idx);
+ return;
+ }
+
+ /* check to see if we do not care about this node */
+ if (!test_bit(idx, dlm->domain_map)) {
+ /* This also catches the case that we get a node down
+ * but haven't joined the domain yet. */
+ mlog(0, "node %u already removed from domain!\n", idx);
+ return;
+ }
+
+ clear_bit(idx, dlm->live_nodes_map);
+
+ /* make sure local cleanup occurs before the heartbeat events */
+ if (!test_bit(idx, dlm->recovery_map))
+ dlm_do_local_recovery_cleanup(dlm, idx);
+
+ /* notify anything attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, idx, 0);
+
+ mlog(0, "node %u being removed from domain map!\n", idx);
+ clear_bit(idx, dlm->domain_map);
+ clear_bit(idx, dlm->exit_domain_map);
+ /* wake up migration waiters if a node goes down.
+ * perhaps later we can genericize this for other waiters. */
+ wake_up(&dlm->migration_wq);
+
+ set_bit(idx, dlm->recovery_map);
+}
+
+void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+
+ if (!dlm_grab(dlm))
+ return;
+
+ /*
+ * This will notify any dlm users that a node in our domain
+ * went away without notifying us first.
+ */
+ if (test_bit(idx, dlm->domain_map))
+ dlm_fire_domain_eviction_callbacks(dlm, idx);
+
+ spin_lock(&dlm->spinlock);
+ __dlm_hb_node_down(dlm, idx);
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+}
+
+void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+
+ if (!dlm_grab(dlm))
+ return;
+
+ spin_lock(&dlm->spinlock);
+ set_bit(idx, dlm->live_nodes_map);
+ /* do NOT notify mle attached to the heartbeat events.
+ * new nodes are not interesting in mastery until joined. */
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+}
+
+static void dlm_reco_ast(void *astdata)
+{
+ struct dlm_ctxt *dlm = astdata;
+ mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
+ dlm->node_num, dlm->name);
+}
+static void dlm_reco_bast(void *astdata, int blocked_type)
+{
+ struct dlm_ctxt *dlm = astdata;
+ mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
+ dlm->node_num, dlm->name);
+}
+static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
+{
+ mlog(0, "unlockast for recovery lock fired!\n");
+}
+
+/*
+ * dlm_pick_recovery_master will continually attempt to use
+ * dlmlock() on the special "$RECOVERY" lockres with the
+ * LKM_NOQUEUE flag to get an EX. every thread that enters
+ * this function on each node racing to become the recovery
+ * master will not stop attempting this until either:
+ * a) this node gets the EX (and becomes the recovery master),
+ * or b) dlm->reco.new_master gets set to some nodenum
+ * != O2NM_INVALID_NODE_NUM (another node will do the reco).
+ * so each time a recovery master is needed, the entire cluster
+ * will sync at this point. if the new master dies, that will
+ * be detected in dlm_do_recovery */
+static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
+{
+ enum dlm_status ret;
+ struct dlm_lockstatus lksb;
+ int status = -EINVAL;
+
+ mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
+ dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
+again:
+ memset(&lksb, 0, sizeof(lksb));
+
+ ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
+ DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
+ dlm_reco_ast, dlm, dlm_reco_bast);
+
+ mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
+ dlm->name, ret, lksb.status);
+
+ if (ret == DLM_NORMAL) {
+ mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
+ dlm->name, dlm->node_num);
+
+ /* got the EX lock. check to see if another node
+ * just became the reco master */
+ if (dlm_reco_master_ready(dlm)) {
+ mlog(0, "%s: got reco EX lock, but %u will "
+ "do the recovery\n", dlm->name,
+ dlm->reco.new_master);
+ status = -EEXIST;
+ } else {
+ status = 0;
+
+ /* see if recovery was already finished elsewhere */
+ spin_lock(&dlm->spinlock);
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
+ status = -EINVAL;
+ mlog(0, "%s: got reco EX lock, but "
+ "node got recovered already\n", dlm->name);
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
+ mlog(ML_ERROR, "%s: new master is %u "
+ "but no dead node!\n",
+ dlm->name, dlm->reco.new_master);
+ BUG();
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+ }
+
+ /* if this node has actually become the recovery master,
+ * set the master and send the messages to begin recovery */
+ if (!status) {
+ mlog(0, "%s: dead=%u, this=%u, sending "
+ "begin_reco now\n", dlm->name,
+ dlm->reco.dead_node, dlm->node_num);
+ status = dlm_send_begin_reco_message(dlm,
+ dlm->reco.dead_node);
+ /* this always succeeds */
+ BUG_ON(status);
+
+ /* set the new_master to this node */
+ spin_lock(&dlm->spinlock);
+ dlm_set_reco_master(dlm, dlm->node_num);
+ spin_unlock(&dlm->spinlock);
+ }
+
+ /* recovery lock is a special case. ast will not get fired,
+ * so just go ahead and unlock it. */
+ ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
+ if (ret == DLM_DENIED) {
+ mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
+ ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
+ }
+ if (ret != DLM_NORMAL) {
+ /* this would really suck. this could only happen
+ * if there was a network error during the unlock
+ * because of node death. this means the unlock
+ * is actually "done" and the lock structure is
+ * even freed. we can continue, but only
+ * because this specific lock name is special. */
+ mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
+ }
+ } else if (ret == DLM_NOTQUEUED) {
+ mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
+ dlm->name, dlm->node_num);
+ /* another node is master. wait on
+ * reco.new_master != O2NM_INVALID_NODE_NUM
+ * for at most one second */
+ wait_event_timeout(dlm->dlm_reco_thread_wq,
+ dlm_reco_master_ready(dlm),
+ msecs_to_jiffies(1000));
+ if (!dlm_reco_master_ready(dlm)) {
+ mlog(0, "%s: reco master taking awhile\n",
+ dlm->name);
+ goto again;
+ }
+ /* another node has informed this one that it is reco master */
+ mlog(0, "%s: reco master %u is ready to recover %u\n",
+ dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
+ status = -EEXIST;
+ } else if (ret == DLM_RECOVERING) {
+ mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
+ dlm->name, dlm->node_num);
+ goto again;
+ } else {
+ struct dlm_lock_resource *res;
+
+ /* dlmlock returned something other than NOTQUEUED or NORMAL */
+ mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
+ "lksb.status=%s\n", dlm->name, dlm_errname(ret),
+ dlm_errname(lksb.status));
+ res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
+ DLM_RECOVERY_LOCK_NAME_LEN);
+ if (res) {
+ dlm_print_one_lock_resource(res);
+ dlm_lockres_put(res);
+ } else {
+ mlog(ML_ERROR, "recovery lock not found\n");
+ }
+ BUG();
+ }
+
+ return status;
+}
+
+static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct dlm_begin_reco br;
+ int ret = 0;
+ struct dlm_node_iter iter;
+ int nodenum;
+ int status;
+
+ mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+ clear_bit(dead_node, iter.node_map);
+
+ memset(&br, 0, sizeof(br));
+ br.node_idx = dlm->node_num;
+ br.dead_node = dead_node;
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ ret = 0;
+ if (nodenum == dead_node) {
+ mlog(0, "not sending begin reco to dead node "
+ "%u\n", dead_node);
+ continue;
+ }
+ if (nodenum == dlm->node_num) {
+ mlog(0, "not sending begin reco to self\n");
+ continue;
+ }
+retry:
+ mlog(0, "attempting to send begin reco msg to %d\n",
+ nodenum);
+ ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
+ &br, sizeof(br), nodenum, &status);
+ /* negative status is handled ok by caller here */
+ if (ret >= 0)
+ ret = status;
+ if (dlm_is_host_down(ret)) {
+ /* node is down. not involved in recovery
+ * so just keep going */
+ mlog(ML_NOTICE, "%s: node %u was down when sending "
+ "begin reco msg (%d)\n", dlm->name, nodenum, ret);
+ ret = 0;
+ }
+
+ /*
+ * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
+ * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
+ * We are handling both for compatibility reasons.
+ */
+ if (ret == -EAGAIN || ret == EAGAIN) {
+ mlog(0, "%s: trying to start recovery of node "
+ "%u, but node %u is waiting for last recovery "
+ "to complete, backoff for a bit\n", dlm->name,
+ dead_node, nodenum);
+ msleep(100);
+ goto retry;
+ }
+ if (ret < 0) {
+ struct dlm_lock_resource *res;
+
+ /* this is now a serious problem, possibly ENOMEM
+ * in the network stack. must retry */
+ mlog_errno(ret);
+ mlog(ML_ERROR, "begin reco of dlm %s to node %u "
+ "returned %d\n", dlm->name, nodenum, ret);
+ res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
+ DLM_RECOVERY_LOCK_NAME_LEN);
+ if (res) {
+ dlm_print_one_lock_resource(res);
+ dlm_lockres_put(res);
+ } else {
+ mlog(ML_ERROR, "recovery lock not found\n");
+ }
+ /* sleep for a bit in hopes that we can avoid
+ * another ENOMEM */
+ msleep(100);
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
+
+ /* ok to return 0, domain has gone away */
+ if (!dlm_grab(dlm))
+ return 0;
+
+ spin_lock(&dlm->spinlock);
+ if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
+ "but this node is in finalize state, waiting on finalize2\n",
+ dlm->name, br->node_idx, br->dead_node,
+ dlm->reco.dead_node, dlm->reco.new_master);
+ spin_unlock(&dlm->spinlock);
+ dlm_put(dlm);
+ return -EAGAIN;
+ }
+ spin_unlock(&dlm->spinlock);
+
+ mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
+ dlm->name, br->node_idx, br->dead_node,
+ dlm->reco.dead_node, dlm->reco.new_master);
+
+ dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
+
+ spin_lock(&dlm->spinlock);
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
+ if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
+ mlog(0, "%s: new_master %u died, changing "
+ "to %u\n", dlm->name, dlm->reco.new_master,
+ br->node_idx);
+ } else {
+ mlog(0, "%s: new_master %u NOT DEAD, changing "
+ "to %u\n", dlm->name, dlm->reco.new_master,
+ br->node_idx);
+ /* may not have seen the new master as dead yet */
+ }
+ }
+ if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
+ mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
+ "node %u changing it to %u\n", dlm->name,
+ dlm->reco.dead_node, br->node_idx, br->dead_node);
+ }
+ dlm_set_reco_master(dlm, br->node_idx);
+ dlm_set_reco_dead_node(dlm, br->dead_node);
+ if (!test_bit(br->dead_node, dlm->recovery_map)) {
+ mlog(0, "recovery master %u sees %u as dead, but this "
+ "node has not yet. marking %u as dead\n",
+ br->node_idx, br->dead_node, br->dead_node);
+ if (!test_bit(br->dead_node, dlm->domain_map) ||
+ !test_bit(br->dead_node, dlm->live_nodes_map))
+ mlog(0, "%u not in domain/live_nodes map "
+ "so setting it in reco map manually\n",
+ br->dead_node);
+ /* force the recovery cleanup in __dlm_hb_node_down
+ * both of these will be cleared in a moment */
+ set_bit(br->dead_node, dlm->domain_map);
+ set_bit(br->dead_node, dlm->live_nodes_map);
+ __dlm_hb_node_down(dlm, br->dead_node);
+ }
+ spin_unlock(&dlm->spinlock);
+
+ dlm_kick_recovery_thread(dlm);
+
+ mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
+ dlm->name, br->node_idx, br->dead_node,
+ dlm->reco.dead_node, dlm->reco.new_master);
+
+ dlm_put(dlm);
+ return 0;
+}
+
+#define DLM_FINALIZE_STAGE2 0x01
+static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
+{
+ int ret = 0;
+ struct dlm_finalize_reco fr;
+ struct dlm_node_iter iter;
+ int nodenum;
+ int status;
+ int stage = 1;
+
+ mlog(0, "finishing recovery for node %s:%u, "
+ "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+stage2:
+ memset(&fr, 0, sizeof(fr));
+ fr.node_idx = dlm->node_num;
+ fr.dead_node = dlm->reco.dead_node;
+ if (stage == 2)
+ fr.flags |= DLM_FINALIZE_STAGE2;
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ if (nodenum == dlm->node_num)
+ continue;
+ ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
+ &fr, sizeof(fr), nodenum, &status);
+ if (ret >= 0)
+ ret = status;
+ if (ret < 0) {
+ mlog(ML_ERROR, "Error %d when sending message %u (key "
+ "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
+ dlm->key, nodenum);
+ if (dlm_is_host_down(ret)) {
+ /* this has no effect on this recovery
+ * session, so set the status to zero to
+ * finish out the last recovery */
+ mlog(ML_ERROR, "node %u went down after this "
+ "node finished recovery.\n", nodenum);
+ ret = 0;
+ continue;
+ }
+ break;
+ }
+ }
+ if (stage == 1) {
+ /* reset the node_iter back to the top and send finalize2 */
+ iter.curnode = -1;
+ stage = 2;
+ goto stage2;
+ }
+
+ return ret;
+}
+
+int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
+ int stage = 1;
+
+ /* ok to return 0, domain has gone away */
+ if (!dlm_grab(dlm))
+ return 0;
+
+ if (fr->flags & DLM_FINALIZE_STAGE2)
+ stage = 2;
+
+ mlog(0, "%s: node %u finalizing recovery stage%d of "
+ "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
+ fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
+
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->reco.new_master != fr->node_idx) {
+ mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
+ "%u is supposed to be the new master, dead=%u\n",
+ fr->node_idx, dlm->reco.new_master, fr->dead_node);
+ BUG();
+ }
+ if (dlm->reco.dead_node != fr->dead_node) {
+ mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
+ "node %u, but node %u is supposed to be dead\n",
+ fr->node_idx, fr->dead_node, dlm->reco.dead_node);
+ BUG();
+ }
+
+ switch (stage) {
+ case 1:
+ dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
+ if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
+ mlog(ML_ERROR, "%s: received finalize1 from "
+ "new master %u for dead node %u, but "
+ "this node has already received it!\n",
+ dlm->name, fr->node_idx, fr->dead_node);
+ dlm_print_reco_node_status(dlm);
+ BUG();
+ }
+ dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
+ spin_unlock(&dlm->spinlock);
+ break;
+ case 2:
+ if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
+ mlog(ML_ERROR, "%s: received finalize2 from "
+ "new master %u for dead node %u, but "
+ "this node did not have finalize1!\n",
+ dlm->name, fr->node_idx, fr->dead_node);
+ dlm_print_reco_node_status(dlm);
+ BUG();
+ }
+ dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
+ __dlm_reset_recovery(dlm);
+ spin_unlock(&dlm->spinlock);
+ dlm_kick_recovery_thread(dlm);
+ break;
+ }
+
+ mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
+ dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
+
+ dlm_put(dlm);
+ return 0;
+}
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
new file mode 100644
index 0000000000..eedf07ca23
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -0,0 +1,809 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmthread.c
+ *
+ * standalone DLM module
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
+#include "../cluster/masklog.h"
+
+static int dlm_thread(void *data);
+static void dlm_flush_asts(struct dlm_ctxt *dlm);
+
+/* will exit holding res->spinlock, but may drop in function */
+/* waits until flags are cleared on res->state */
+void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ assert_spin_locked(&res->spinlock);
+
+ add_wait_queue(&res->wq, &wait);
+repeat:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (res->state & flags) {
+ spin_unlock(&res->spinlock);
+ schedule();
+ spin_lock(&res->spinlock);
+ goto repeat;
+ }
+ remove_wait_queue(&res->wq, &wait);
+ __set_current_state(TASK_RUNNING);
+}
+
+int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
+{
+ if (list_empty(&res->granted) &&
+ list_empty(&res->converting) &&
+ list_empty(&res->blocked))
+ return 0;
+ return 1;
+}
+
+/* "unused": the lockres has no locks, is not on the dirty list,
+ * has no inflight locks (in the gap between mastery and acquiring
+ * the first lock), and has no bits in its refmap.
+ * truly ready to be freed. */
+int __dlm_lockres_unused(struct dlm_lock_resource *res)
+{
+ int bit;
+
+ assert_spin_locked(&res->spinlock);
+
+ if (__dlm_lockres_has_locks(res))
+ return 0;
+
+ /* Locks are in the process of being created */
+ if (res->inflight_locks)
+ return 0;
+
+ if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
+ return 0;
+
+ if (res->state & (DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_RECOVERY_WAITING))
+ return 0;
+
+ /* Another node has this resource with this node as the master */
+ bit = find_first_bit(res->refmap, O2NM_MAX_NODES);
+ if (bit < O2NM_MAX_NODES)
+ return 0;
+
+ return 1;
+}
+
+
+/* Call whenever you may have added or deleted something from one of
+ * the lockres queue's. This will figure out whether it belongs on the
+ * unused list or not and does the appropriate thing. */
+void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (__dlm_lockres_unused(res)){
+ if (list_empty(&res->purge)) {
+ mlog(0, "%s: Adding res %.*s to purge list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+
+ res->last_used = jiffies;
+ dlm_lockres_get(res);
+ list_add_tail(&res->purge, &dlm->purge_list);
+ dlm->purge_count++;
+ }
+ } else if (!list_empty(&res->purge)) {
+ mlog(0, "%s: Removing res %.*s from purge list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+
+ list_del_init(&res->purge);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+ }
+}
+
+void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
+
+ __dlm_lockres_calc_usage(dlm, res);
+
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+}
+
+/*
+ * Do the real purge work:
+ * unhash the lockres, and
+ * clear flag DLM_LOCK_RES_DROPPING_REF.
+ * It requires dlm and lockres spinlock to be taken.
+ */
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (!list_empty(&res->purge)) {
+ mlog(0, "%s: Removing res %.*s from purgelist\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ list_del_init(&res->purge);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+ }
+
+ if (!__dlm_lockres_unused(res)) {
+ mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ __dlm_unhash_lockres(dlm, res);
+
+ spin_lock(&dlm->track_lock);
+ if (!list_empty(&res->tracking))
+ list_del_init(&res->tracking);
+ else {
+ mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ }
+ spin_unlock(&dlm->track_lock);
+
+ /*
+ * lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource.
+ */
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+}
+
+static void dlm_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ int master;
+ int ret = 0;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ master = (res->owner == dlm->node_num);
+
+ mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
+ res->lockname.len, res->lockname.name, master);
+
+ if (!master) {
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ spin_unlock(&res->spinlock);
+ return;
+ }
+
+ res->state |= DLM_LOCK_RES_DROPPING_REF;
+ /* drop spinlock... retake below */
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+ /* This ensures that clear refmap is sent after the set */
+ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
+ spin_unlock(&res->spinlock);
+
+ /* clear our bit from the master's refmap, ignore errors */
+ ret = dlm_drop_lockres_ref(dlm, res);
+ if (ret < 0) {
+ if (!dlm_is_host_down(ret))
+ BUG();
+ }
+ spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
+ }
+
+ if (!list_empty(&res->purge)) {
+ mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
+ dlm->name, res->lockname.len, res->lockname.name, master);
+ list_del_init(&res->purge);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+ }
+
+ if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
+ mlog(0, "%s: deref %.*s in progress\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ spin_unlock(&res->spinlock);
+ return;
+ }
+
+ if (!__dlm_lockres_unused(res)) {
+ mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ __dlm_unhash_lockres(dlm, res);
+
+ spin_lock(&dlm->track_lock);
+ if (!list_empty(&res->tracking))
+ list_del_init(&res->tracking);
+ else {
+ mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
+ res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ }
+ spin_unlock(&dlm->track_lock);
+
+ /* lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource. */
+ if (!master) {
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ } else
+ spin_unlock(&res->spinlock);
+}
+
+static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+ int purge_now)
+{
+ unsigned int run_max, unused;
+ unsigned long purge_jiffies;
+ struct dlm_lock_resource *lockres;
+
+ spin_lock(&dlm->spinlock);
+ run_max = dlm->purge_count;
+
+ while(run_max && !list_empty(&dlm->purge_list)) {
+ run_max--;
+
+ lockres = list_entry(dlm->purge_list.next,
+ struct dlm_lock_resource, purge);
+
+ spin_lock(&lockres->spinlock);
+
+ purge_jiffies = lockres->last_used +
+ msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
+
+ /* Make sure that we want to be processing this guy at
+ * this time. */
+ if (!purge_now && time_after(purge_jiffies, jiffies)) {
+ /* Since resources are added to the purge list
+ * in tail order, we can stop at the first
+ * unpurgable resource -- anyone added after
+ * him will have a greater last_used value */
+ spin_unlock(&lockres->spinlock);
+ break;
+ }
+
+ /* Status of the lockres *might* change so double
+ * check. If the lockres is unused, holding the dlm
+ * spinlock will prevent people from getting and more
+ * refs on it. */
+ unused = __dlm_lockres_unused(lockres);
+ if (!unused ||
+ (lockres->state & DLM_LOCK_RES_MIGRATING) ||
+ (lockres->inflight_assert_workers != 0)) {
+ mlog(0, "%s: res %.*s is in use or being remastered, "
+ "used %d, state %d, assert master workers %u\n",
+ dlm->name, lockres->lockname.len,
+ lockres->lockname.name,
+ !unused, lockres->state,
+ lockres->inflight_assert_workers);
+ list_move_tail(&lockres->purge, &dlm->purge_list);
+ spin_unlock(&lockres->spinlock);
+ continue;
+ }
+
+ dlm_lockres_get(lockres);
+
+ dlm_purge_lockres(dlm, lockres);
+
+ dlm_lockres_put(lockres);
+
+ /* Avoid adding any scheduling latencies */
+ cond_resched_lock(&dlm->spinlock);
+ }
+
+ spin_unlock(&dlm->spinlock);
+}
+
+static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ struct dlm_lock *lock, *target;
+ int can_grant = 1;
+
+ /*
+ * Because this function is called with the lockres
+ * spinlock, and because we know that it is not migrating/
+ * recovering/in-progress, it is fine to reserve asts and
+ * basts right before queueing them all throughout
+ */
+ assert_spin_locked(&dlm->ast_lock);
+ assert_spin_locked(&res->spinlock);
+ BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
+ DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_IN_PROGRESS)));
+
+converting:
+ if (list_empty(&res->converting))
+ goto blocked;
+ mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
+ res->lockname.len, res->lockname.name);
+
+ target = list_entry(res->converting.next, struct dlm_lock, list);
+ if (target->ml.convert_type == LKM_IVMODE) {
+ mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ BUG();
+ }
+ list_for_each_entry(lock, &res->granted, list) {
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type,
+ target->ml.convert_type)) {
+ can_grant = 0;
+ /* queue the BAST if not already */
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ __dlm_queue_bast(dlm, lock);
+ }
+ /* update the highest_blocked if needed */
+ if (lock->ml.highest_blocked < target->ml.convert_type)
+ lock->ml.highest_blocked =
+ target->ml.convert_type;
+ }
+ }
+
+ list_for_each_entry(lock, &res->converting, list) {
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type,
+ target->ml.convert_type)) {
+ can_grant = 0;
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ __dlm_queue_bast(dlm, lock);
+ }
+ if (lock->ml.highest_blocked < target->ml.convert_type)
+ lock->ml.highest_blocked =
+ target->ml.convert_type;
+ }
+ }
+
+ /* we can convert the lock */
+ if (can_grant) {
+ spin_lock(&target->spinlock);
+ BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
+
+ mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
+ "%d => %d, node %u\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
+ target->ml.type,
+ target->ml.convert_type, target->ml.node);
+
+ target->ml.type = target->ml.convert_type;
+ target->ml.convert_type = LKM_IVMODE;
+ list_move_tail(&target->list, &res->granted);
+
+ BUG_ON(!target->lksb);
+ target->lksb->status = DLM_NORMAL;
+
+ spin_unlock(&target->spinlock);
+
+ __dlm_lockres_reserve_ast(res);
+ __dlm_queue_ast(dlm, target);
+ /* go back and check for more */
+ goto converting;
+ }
+
+blocked:
+ if (list_empty(&res->blocked))
+ goto leave;
+ target = list_entry(res->blocked.next, struct dlm_lock, list);
+
+ list_for_each_entry(lock, &res->granted, list) {
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
+ can_grant = 0;
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ __dlm_queue_bast(dlm, lock);
+ }
+ if (lock->ml.highest_blocked < target->ml.type)
+ lock->ml.highest_blocked = target->ml.type;
+ }
+ }
+
+ list_for_each_entry(lock, &res->converting, list) {
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
+ can_grant = 0;
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ __dlm_queue_bast(dlm, lock);
+ }
+ if (lock->ml.highest_blocked < target->ml.type)
+ lock->ml.highest_blocked = target->ml.type;
+ }
+ }
+
+ /* we can grant the blocked lock (only
+ * possible if converting list empty) */
+ if (can_grant) {
+ spin_lock(&target->spinlock);
+ BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
+
+ mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
+ "node %u\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
+ target->ml.type, target->ml.node);
+
+ /* target->ml.type is already correct */
+ list_move_tail(&target->list, &res->granted);
+
+ BUG_ON(!target->lksb);
+ target->lksb->status = DLM_NORMAL;
+
+ spin_unlock(&target->spinlock);
+
+ __dlm_lockres_reserve_ast(res);
+ __dlm_queue_ast(dlm, target);
+ /* go back and check for more */
+ goto converting;
+ }
+
+leave:
+ return;
+}
+
+/* must have NO locks when calling this with res !=NULL * */
+void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ if (res) {
+ spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ }
+ wake_up(&dlm->dlm_thread_wq);
+}
+
+void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ /* don't shuffle secondary queues */
+ if (res->owner == dlm->node_num) {
+ if (res->state & (DLM_LOCK_RES_MIGRATING |
+ DLM_LOCK_RES_BLOCK_DIRTY))
+ return;
+
+ if (list_empty(&res->dirty)) {
+ /* ref for dirty_list */
+ dlm_lockres_get(res);
+ list_add_tail(&res->dirty, &dlm->dirty_list);
+ res->state |= DLM_LOCK_RES_DIRTY;
+ }
+ }
+
+ mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+}
+
+
+/* Launch the NM thread for the mounted volume */
+int dlm_launch_thread(struct dlm_ctxt *dlm)
+{
+ mlog(0, "Starting dlm_thread...\n");
+
+ dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm-%s",
+ dlm->name);
+ if (IS_ERR(dlm->dlm_thread_task)) {
+ mlog_errno(PTR_ERR(dlm->dlm_thread_task));
+ dlm->dlm_thread_task = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void dlm_complete_thread(struct dlm_ctxt *dlm)
+{
+ if (dlm->dlm_thread_task) {
+ mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
+ kthread_stop(dlm->dlm_thread_task);
+ dlm->dlm_thread_task = NULL;
+ }
+}
+
+static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
+{
+ int empty;
+
+ spin_lock(&dlm->spinlock);
+ empty = list_empty(&dlm->dirty_list);
+ spin_unlock(&dlm->spinlock);
+
+ return empty;
+}
+
+static void dlm_flush_asts(struct dlm_ctxt *dlm)
+{
+ int ret;
+ struct dlm_lock *lock;
+ struct dlm_lock_resource *res;
+ u8 hi;
+
+ spin_lock(&dlm->ast_lock);
+ while (!list_empty(&dlm->pending_asts)) {
+ lock = list_entry(dlm->pending_asts.next,
+ struct dlm_lock, ast_list);
+ /* get an extra ref on lock */
+ dlm_lock_get(lock);
+ res = lock->lockres;
+ mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
+ "node %u\n", dlm->name, res->lockname.len,
+ res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->ml.type, lock->ml.node);
+
+ BUG_ON(!lock->ast_pending);
+
+ /* remove from list (including ref) */
+ list_del_init(&lock->ast_list);
+ dlm_lock_put(lock);
+ spin_unlock(&dlm->ast_lock);
+
+ if (lock->ml.node != dlm->node_num) {
+ ret = dlm_do_remote_ast(dlm, res, lock);
+ if (ret < 0)
+ mlog_errno(ret);
+ } else
+ dlm_do_local_ast(dlm, res, lock);
+
+ spin_lock(&dlm->ast_lock);
+
+ /* possible that another ast was queued while
+ * we were delivering the last one */
+ if (!list_empty(&lock->ast_list)) {
+ mlog(0, "%s: res %.*s, AST queued while flushing last "
+ "one\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ } else
+ lock->ast_pending = 0;
+
+ /* drop the extra ref.
+ * this may drop it completely. */
+ dlm_lock_put(lock);
+ dlm_lockres_release_ast(dlm, res);
+ }
+
+ while (!list_empty(&dlm->pending_basts)) {
+ lock = list_entry(dlm->pending_basts.next,
+ struct dlm_lock, bast_list);
+ /* get an extra ref on lock */
+ dlm_lock_get(lock);
+ res = lock->lockres;
+
+ BUG_ON(!lock->bast_pending);
+
+ /* get the highest blocked lock, and reset */
+ spin_lock(&lock->spinlock);
+ BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
+ hi = lock->ml.highest_blocked;
+ lock->ml.highest_blocked = LKM_IVMODE;
+ spin_unlock(&lock->spinlock);
+
+ /* remove from list (including ref) */
+ list_del_init(&lock->bast_list);
+ dlm_lock_put(lock);
+ spin_unlock(&dlm->ast_lock);
+
+ mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
+ "blocked %d, node %u\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ hi, lock->ml.node);
+
+ if (lock->ml.node != dlm->node_num) {
+ ret = dlm_send_proxy_bast(dlm, res, lock, hi);
+ if (ret < 0)
+ mlog_errno(ret);
+ } else
+ dlm_do_local_bast(dlm, res, lock, hi);
+
+ spin_lock(&dlm->ast_lock);
+
+ /* possible that another bast was queued while
+ * we were delivering the last one */
+ if (!list_empty(&lock->bast_list)) {
+ mlog(0, "%s: res %.*s, BAST queued while flushing last "
+ "one\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ } else
+ lock->bast_pending = 0;
+
+ /* drop the extra ref.
+ * this may drop it completely. */
+ dlm_lock_put(lock);
+ dlm_lockres_release_ast(dlm, res);
+ }
+ wake_up(&dlm->ast_wq);
+ spin_unlock(&dlm->ast_lock);
+}
+
+
+#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
+#define DLM_THREAD_MAX_DIRTY 100
+
+static int dlm_thread(void *data)
+{
+ struct dlm_lock_resource *res;
+ struct dlm_ctxt *dlm = data;
+ unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
+
+ mlog(0, "dlm thread running for %s...\n", dlm->name);
+
+ while (!kthread_should_stop()) {
+ int n = DLM_THREAD_MAX_DIRTY;
+
+ /* dlm_shutting_down is very point-in-time, but that
+ * doesn't matter as we'll just loop back around if we
+ * get false on the leading edge of a state
+ * transition. */
+ dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
+
+ /* We really don't want to hold dlm->spinlock while
+ * calling dlm_shuffle_lists on each lockres that
+ * needs to have its queues adjusted and AST/BASTs
+ * run. So let's pull each entry off the dirty_list
+ * and drop dlm->spinlock ASAP. Once off the list,
+ * res->spinlock needs to be taken again to protect
+ * the queues while calling dlm_shuffle_lists. */
+ spin_lock(&dlm->spinlock);
+ while (!list_empty(&dlm->dirty_list)) {
+ int delay = 0;
+ res = list_entry(dlm->dirty_list.next,
+ struct dlm_lock_resource, dirty);
+
+ /* peel a lockres off, remove it from the list,
+ * unset the dirty flag and drop the dlm lock */
+ BUG_ON(!res);
+ dlm_lockres_get(res);
+
+ spin_lock(&res->spinlock);
+ /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
+ list_del_init(&res->dirty);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ /* Drop dirty_list ref */
+ dlm_lockres_put(res);
+
+ /* lockres can be re-dirtied/re-added to the
+ * dirty_list in this gap, but that is ok */
+
+ spin_lock(&dlm->ast_lock);
+ spin_lock(&res->spinlock);
+ if (res->owner != dlm->node_num) {
+ __dlm_print_one_lock_resource(res);
+ mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
+ " dirty %d\n", dlm->name,
+ !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
+ !!(res->state & DLM_LOCK_RES_MIGRATING),
+ !!(res->state & DLM_LOCK_RES_RECOVERING),
+ !!(res->state & DLM_LOCK_RES_DIRTY));
+ }
+ BUG_ON(res->owner != dlm->node_num);
+
+ /* it is now ok to move lockreses in these states
+ * to the dirty list, assuming that they will only be
+ * dirty for a short while. */
+ BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
+ if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
+ DLM_LOCK_RES_RECOVERING |
+ DLM_LOCK_RES_RECOVERY_WAITING)) {
+ /* move it to the tail and keep going */
+ res->state &= ~DLM_LOCK_RES_DIRTY;
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->ast_lock);
+ mlog(0, "%s: res %.*s, inprogress, delay list "
+ "shuffle, state %d\n", dlm->name,
+ res->lockname.len, res->lockname.name,
+ res->state);
+ delay = 1;
+ goto in_progress;
+ }
+
+ /* at this point the lockres is not migrating/
+ * recovering/in-progress. we have the lockres
+ * spinlock and do NOT have the dlm lock.
+ * safe to reserve/queue asts and run the lists. */
+
+ /* called while holding lockres lock */
+ dlm_shuffle_lists(dlm, res);
+ res->state &= ~DLM_LOCK_RES_DIRTY;
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->ast_lock);
+
+ dlm_lockres_calc_usage(dlm, res);
+
+in_progress:
+
+ spin_lock(&dlm->spinlock);
+ /* if the lock was in-progress, stick
+ * it on the back of the list */
+ if (delay) {
+ spin_lock(&res->spinlock);
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ }
+ dlm_lockres_put(res);
+
+ /* unlikely, but we may need to give time to
+ * other tasks */
+ if (!--n) {
+ mlog(0, "%s: Throttling dlm thread\n",
+ dlm->name);
+ break;
+ }
+ }
+
+ spin_unlock(&dlm->spinlock);
+ dlm_flush_asts(dlm);
+
+ /* yield and continue right away if there is more work to do */
+ if (!n) {
+ cond_resched();
+ continue;
+ }
+
+ wait_event_interruptible_timeout(dlm->dlm_thread_wq,
+ !dlm_dirty_list_empty(dlm) ||
+ kthread_should_stop(),
+ timeout);
+ }
+
+ mlog(0, "quitting DLM thread\n");
+ return 0;
+}
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
new file mode 100644
index 0000000000..7318e4794e
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmunlock.c
+ *
+ * underlying calls for unlocking locks
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "../cluster/heartbeat.h"
+#include "../cluster/nodemanager.h"
+#include "../cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "../cluster/masklog.h"
+
+#define DLM_UNLOCK_FREE_LOCK 0x00000001
+#define DLM_UNLOCK_CALL_AST 0x00000002
+#define DLM_UNLOCK_REMOVE_LOCK 0x00000004
+#define DLM_UNLOCK_REGRANT_LOCK 0x00000008
+#define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010
+
+
+static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions);
+static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions);
+
+static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ u8 owner);
+
+
+/*
+ * according to the spec:
+ * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
+ *
+ * flags & LKM_CANCEL != 0: must be converting or blocked
+ * flags & LKM_CANCEL == 0: must be granted
+ *
+ * So to unlock a converting lock, you must first cancel the
+ * convert (passing LKM_CANCEL in flags), then call the unlock
+ * again (with no LKM_CANCEL in flags).
+ */
+
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: res->spinlock and lock->spinlock taken and dropped
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
+ * all callers should have taken an extra ref on lock coming in
+ */
+static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags, int *call_ast,
+ int master_node)
+{
+ enum dlm_status status;
+ int actions = 0;
+ int in_use;
+ u8 owner;
+ int recovery_wait = 0;
+
+ mlog(0, "master_node = %d, valblk = %d\n", master_node,
+ flags & LKM_VALBLK);
+
+ if (master_node)
+ BUG_ON(res->owner != dlm->node_num);
+ else
+ BUG_ON(res->owner == dlm->node_num);
+
+ spin_lock(&dlm->ast_lock);
+ /* We want to be sure that we're not freeing a lock
+ * that still has AST's pending... */
+ in_use = !list_empty(&lock->ast_list);
+ spin_unlock(&dlm->ast_lock);
+ if (in_use && !(flags & LKM_CANCEL)) {
+ mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
+ "while waiting for an ast!", res->lockname.len,
+ res->lockname.name);
+ return DLM_BADPARAM;
+ }
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
+ if (master_node && !(flags & LKM_CANCEL)) {
+ mlog(ML_ERROR, "lockres in progress!\n");
+ spin_unlock(&res->spinlock);
+ return DLM_FORWARD;
+ }
+ /* ok for this to sleep if not in a network handler */
+ __dlm_wait_on_lockres(res);
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ }
+ spin_lock(&lock->spinlock);
+
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ status = DLM_RECOVERING;
+ goto leave;
+ }
+
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ status = DLM_MIGRATING;
+ goto leave;
+ }
+
+ /* see above for what the spec says about
+ * LKM_CANCEL and the lock queue state */
+ if (flags & LKM_CANCEL)
+ status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
+ else
+ status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
+
+ if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node))
+ goto leave;
+
+ /* By now this has been masked out of cancel requests. */
+ if (flags & LKM_VALBLK) {
+ /* make the final update to the lvb */
+ if (master_node)
+ memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
+ else
+ flags |= LKM_PUT_LVB; /* let the send function
+ * handle it. */
+ }
+
+ if (!master_node) {
+ owner = res->owner;
+ /* drop locks and send message */
+ if (flags & LKM_CANCEL)
+ lock->cancel_pending = 1;
+ else
+ lock->unlock_pending = 1;
+ spin_unlock(&lock->spinlock);
+ spin_unlock(&res->spinlock);
+ status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
+ flags, owner);
+ spin_lock(&res->spinlock);
+ spin_lock(&lock->spinlock);
+ /* if the master told us the lock was already granted,
+ * let the ast handle all of these actions */
+ if (status == DLM_CANCELGRANT) {
+ actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
+ DLM_UNLOCK_REGRANT_LOCK|
+ DLM_UNLOCK_CLEAR_CONVERT_TYPE);
+ } else if (status == DLM_RECOVERING ||
+ status == DLM_MIGRATING ||
+ status == DLM_FORWARD ||
+ status == DLM_NOLOCKMGR
+ ) {
+ /* must clear the actions because this unlock
+ * is about to be retried. cannot free or do
+ * any list manipulation. */
+ mlog(0, "%s:%.*s: clearing actions, %s\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name,
+ status==DLM_RECOVERING?"recovering":
+ (status==DLM_MIGRATING?"migrating":
+ (status == DLM_FORWARD ? "forward" :
+ "nolockmanager")));
+ actions = 0;
+ }
+ if (flags & LKM_CANCEL)
+ lock->cancel_pending = 0;
+ else {
+ if (!lock->unlock_pending)
+ recovery_wait = 1;
+ else
+ lock->unlock_pending = 0;
+ }
+ }
+
+ /* get an extra ref on lock. if we are just switching
+ * lists here, we dont want the lock to go away. */
+ dlm_lock_get(lock);
+
+ if (actions & DLM_UNLOCK_REMOVE_LOCK) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ }
+ if (actions & DLM_UNLOCK_REGRANT_LOCK) {
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->granted);
+ }
+ if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
+ mlog(0, "clearing convert_type at %smaster node\n",
+ master_node ? "" : "non-");
+ lock->ml.convert_type = LKM_IVMODE;
+ }
+
+ /* remove the extra ref on lock */
+ dlm_lock_put(lock);
+
+leave:
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ if (!dlm_lock_on_list(&res->converting, lock))
+ BUG_ON(lock->ml.convert_type != LKM_IVMODE);
+ else
+ BUG_ON(lock->ml.convert_type == LKM_IVMODE);
+ spin_unlock(&lock->spinlock);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ if (recovery_wait) {
+ spin_lock(&res->spinlock);
+ /* Unlock request will directly succeed after owner dies,
+ * and the lock is already removed from grant list. We have to
+ * wait for RECOVERING done or we miss the chance to purge it
+ * since the removement is much faster than RECOVERING proc.
+ */
+ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
+ spin_unlock(&res->spinlock);
+ }
+
+ /* let the caller's final dlm_lock_put handle the actual kfree */
+ if (actions & DLM_UNLOCK_FREE_LOCK) {
+ /* this should always be coupled with list removal */
+ BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
+ mlog(0, "lock %u:%llu should be gone now! refs=%d\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ kref_read(&lock->lock_refs)-1);
+ dlm_lock_put(lock);
+ }
+ if (actions & DLM_UNLOCK_CALL_AST)
+ *call_ast = 1;
+
+ /* if cancel or unlock succeeded, lvb work is done */
+ if (status == DLM_NORMAL)
+ lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
+
+ return status;
+}
+
+void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ /* leave DLM_LKSB_PUT_LVB on the lksb so any final
+ * update of the lvb will be sent to the new master */
+ list_del_init(&lock->list);
+}
+
+void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ list_move_tail(&lock->list, &res->granted);
+ lock->ml.convert_type = LKM_IVMODE;
+}
+
+
+static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ int *call_ast)
+{
+ return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
+}
+
+static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags, int *call_ast)
+{
+ return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
+}
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: none
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
+ */
+static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ u8 owner)
+{
+ struct dlm_unlock_lock unlock;
+ int tmpret;
+ enum dlm_status ret;
+ int status = 0;
+ struct kvec vec[2];
+ size_t veclen = 1;
+
+ mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
+
+ if (owner == dlm->node_num) {
+ /* ended up trying to contact ourself. this means
+ * that the lockres had been remote but became local
+ * via a migration. just retry it, now as local */
+ mlog(0, "%s:%.*s: this node became the master due to a "
+ "migration, re-evaluate now\n", dlm->name,
+ res->lockname.len, res->lockname.name);
+ return DLM_FORWARD;
+ }
+
+ memset(&unlock, 0, sizeof(unlock));
+ unlock.node_idx = dlm->node_num;
+ unlock.flags = cpu_to_be32(flags);
+ unlock.cookie = lock->ml.cookie;
+ unlock.namelen = res->lockname.len;
+ memcpy(unlock.name, res->lockname.name, unlock.namelen);
+
+ vec[0].iov_len = sizeof(struct dlm_unlock_lock);
+ vec[0].iov_base = &unlock;
+
+ if (flags & LKM_PUT_LVB) {
+ /* extra data to send if we are updating lvb */
+ vec[1].iov_len = DLM_LVB_LEN;
+ vec[1].iov_base = lock->lksb->lvb;
+ veclen++;
+ }
+
+ tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
+ vec, veclen, owner, &status);
+ if (tmpret >= 0) {
+ // successfully sent and received
+ if (status == DLM_FORWARD)
+ mlog(0, "master was in-progress. retry\n");
+ ret = status;
+ } else {
+ mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
+ "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner);
+ if (dlm_is_host_down(tmpret)) {
+ /* NOTE: this seems strange, but it is what we want.
+ * when the master goes down during a cancel or
+ * unlock, the recovery code completes the operation
+ * as if the master had not died, then passes the
+ * updated state to the recovery master. this thread
+ * just needs to finish out the operation and call
+ * the unlockast. */
+ if (dlm_is_node_dead(dlm, owner))
+ ret = DLM_NORMAL;
+ else
+ ret = DLM_NOLOCKMGR;
+ } else {
+ /* something bad. this will BUG in ocfs2 */
+ ret = dlm_err_to_dlm_status(tmpret);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
+ * return value from dlmunlock_master
+ */
+int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *lock = NULL, *iter;
+ enum dlm_status status = DLM_NORMAL;
+ int i;
+ struct dlm_lockstatus *lksb = NULL;
+ int ignore;
+ u32 flags;
+ struct list_head *queue;
+
+ flags = be32_to_cpu(unlock->flags);
+
+ if (flags & LKM_GET_LVB) {
+ mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
+ return DLM_BADARGS;
+ }
+
+ if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
+ mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL "
+ "request!\n");
+ return DLM_BADARGS;
+ }
+
+ if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
+ mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
+ return DLM_IVBUFLEN;
+ }
+
+ if (!dlm_grab(dlm))
+ return DLM_FORWARD;
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
+
+ res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
+ if (!res) {
+ /* We assume here that a no lock resource simply means
+ * it was migrated away and destroyed before the other
+ * node could detect it. */
+ mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
+ status = DLM_FORWARD;
+ goto not_found;
+ }
+
+ queue=&res->granted;
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_RECOVERING\n");
+ status = DLM_RECOVERING;
+ goto leave;
+ }
+
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_MIGRATING\n");
+ status = DLM_MIGRATING;
+ goto leave;
+ }
+
+ if (res->owner != dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_FORWARD -- not master\n");
+ status = DLM_FORWARD;
+ goto leave;
+ }
+
+ for (i=0; i<3; i++) {
+ list_for_each_entry(iter, queue, list) {
+ if (iter->ml.cookie == unlock->cookie &&
+ iter->ml.node == unlock->node_idx) {
+ dlm_lock_get(iter);
+ lock = iter;
+ break;
+ }
+ }
+ if (lock)
+ break;
+ /* scan granted -> converting -> blocked queues */
+ queue++;
+ }
+ spin_unlock(&res->spinlock);
+ if (!lock) {
+ status = DLM_IVLOCKID;
+ goto not_found;
+ }
+
+ /* lock was found on queue */
+ lksb = lock->lksb;
+ if (flags & (LKM_VALBLK|LKM_PUT_LVB) &&
+ lock->ml.type != LKM_EXMODE)
+ flags &= ~(LKM_VALBLK|LKM_PUT_LVB);
+
+ /* unlockast only called on originating node */
+ if (flags & LKM_PUT_LVB) {
+ lksb->flags |= DLM_LKSB_PUT_LVB;
+ memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
+ }
+
+ /* if this is in-progress, propagate the DLM_FORWARD
+ * all the way back out */
+ status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
+ if (status == DLM_FORWARD)
+ mlog(0, "lockres is in progress\n");
+
+ if (flags & LKM_PUT_LVB)
+ lksb->flags &= ~DLM_LKSB_PUT_LVB;
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_kick_thread(dlm, res);
+
+not_found:
+ if (!lock)
+ mlog(ML_ERROR, "failed to find lock to unlock! "
+ "cookie=%u:%llu\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie)));
+ else
+ dlm_lock_put(lock);
+
+leave:
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+
+ return status;
+}
+
+
+static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions)
+{
+ enum dlm_status status;
+
+ if (dlm_lock_on_list(&res->blocked, lock)) {
+ /* cancel this outright */
+ status = DLM_NORMAL;
+ *actions = (DLM_UNLOCK_CALL_AST |
+ DLM_UNLOCK_REMOVE_LOCK);
+ } else if (dlm_lock_on_list(&res->converting, lock)) {
+ /* cancel the request, put back on granted */
+ status = DLM_NORMAL;
+ *actions = (DLM_UNLOCK_CALL_AST |
+ DLM_UNLOCK_REMOVE_LOCK |
+ DLM_UNLOCK_REGRANT_LOCK |
+ DLM_UNLOCK_CLEAR_CONVERT_TYPE);
+ } else if (dlm_lock_on_list(&res->granted, lock)) {
+ /* too late, already granted. */
+ status = DLM_CANCELGRANT;
+ *actions = DLM_UNLOCK_CALL_AST;
+ } else {
+ mlog(ML_ERROR, "lock to cancel is not on any list!\n");
+ status = DLM_IVLOCKID;
+ *actions = 0;
+ }
+ return status;
+}
+
+static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions)
+{
+ enum dlm_status status;
+
+ /* unlock request */
+ if (!dlm_lock_on_list(&res->granted, lock)) {
+ status = DLM_DENIED;
+ dlm_error(status);
+ *actions = 0;
+ } else {
+ /* unlock granted lock */
+ status = DLM_NORMAL;
+ *actions = (DLM_UNLOCK_FREE_LOCK |
+ DLM_UNLOCK_CALL_AST |
+ DLM_UNLOCK_REMOVE_LOCK);
+ }
+ return status;
+}
+
+/* there seems to be no point in doing this async
+ * since (even for the remote case) there is really
+ * no work to queue up... so just do it and fire the
+ * unlockast by hand when done... */
+enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
+ int flags, dlm_astunlockfunc_t *unlockast, void *data)
+{
+ enum dlm_status status;
+ struct dlm_lock_resource *res;
+ struct dlm_lock *lock = NULL;
+ int call_ast, is_master;
+
+ if (!lksb) {
+ dlm_error(DLM_BADARGS);
+ return DLM_BADARGS;
+ }
+
+ if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
+ dlm_error(DLM_BADPARAM);
+ return DLM_BADPARAM;
+ }
+
+ if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
+ mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
+ flags &= ~LKM_VALBLK;
+ }
+
+ if (!lksb->lockid || !lksb->lockid->lockres) {
+ dlm_error(DLM_BADPARAM);
+ return DLM_BADPARAM;
+ }
+
+ lock = lksb->lockid;
+ BUG_ON(!lock);
+ dlm_lock_get(lock);
+
+ res = lock->lockres;
+ BUG_ON(!res);
+ dlm_lockres_get(res);
+retry:
+ call_ast = 0;
+ /* need to retry up here because owner may have changed */
+ mlog(0, "lock=%p res=%p\n", lock, res);
+
+ spin_lock(&res->spinlock);
+ is_master = (res->owner == dlm->node_num);
+ if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE)
+ flags &= ~LKM_VALBLK;
+ spin_unlock(&res->spinlock);
+
+ if (is_master) {
+ status = dlmunlock_master(dlm, res, lock, lksb, flags,
+ &call_ast);
+ mlog(0, "done calling dlmunlock_master: returned %d, "
+ "call_ast is %d\n", status, call_ast);
+ } else {
+ status = dlmunlock_remote(dlm, res, lock, lksb, flags,
+ &call_ast);
+ mlog(0, "done calling dlmunlock_remote: returned %d, "
+ "call_ast is %d\n", status, call_ast);
+ }
+
+ if (status == DLM_RECOVERING ||
+ status == DLM_MIGRATING ||
+ status == DLM_FORWARD ||
+ status == DLM_NOLOCKMGR) {
+
+ /* We want to go away for a tiny bit to allow recovery
+ * / migration to complete on this resource. I don't
+ * know of any wait queue we could sleep on as this
+ * may be happening on another node. Perhaps the
+ * proper solution is to queue up requests on the
+ * other end? */
+
+ /* do we want to yield(); ?? */
+ msleep(50);
+
+ mlog(0, "retrying unlock due to pending recovery/"
+ "migration/in-progress/reconnect\n");
+ goto retry;
+ }
+
+ if (call_ast) {
+ mlog(0, "calling unlockast(%p, %d)\n", data, status);
+ if (is_master) {
+ /* it is possible that there is one last bast
+ * pending. make sure it is flushed, then
+ * call the unlockast.
+ * not an issue if this is a mastered remotely,
+ * since this lock has been removed from the
+ * lockres queues and cannot be found. */
+ dlm_kick_thread(dlm, NULL);
+ wait_event(dlm->ast_wq,
+ dlm_lock_basts_flushed(dlm, lock));
+ }
+ (*unlockast)(data, status);
+ }
+
+ if (status == DLM_CANCELGRANT)
+ status = DLM_NORMAL;
+
+ if (status == DLM_NORMAL) {
+ mlog(0, "kicking the thread\n");
+ dlm_kick_thread(dlm, res);
+ } else
+ dlm_error(status);
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_lockres_put(res);
+ dlm_lock_put(lock);
+
+ mlog(0, "returning status=%d!\n", status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(dlmunlock);
+
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
new file mode 100644
index 0000000000..c7895f65be
--- /dev/null
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
+
+ocfs2_dlmfs-objs := userdlm.o dlmfs.o
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
new file mode 100644
index 0000000000..81265123ce
--- /dev/null
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -0,0 +1,630 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmfs.c
+ *
+ * Code which implements the kernel side of a minimal userspace
+ * interface to our DLM. This file handles the virtual file system
+ * used for communication with userspace. Credit should go to ramfs,
+ * which was a template for the fs side of this module.
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ */
+
+/* Simple VFS hooks based on: */
+/*
+ * Resizable simple ram filesystem for Linux.
+ *
+ * Copyright (C) 2000 Linus Torvalds.
+ * 2000 Transmeta Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/backing-dev.h>
+#include <linux/poll.h>
+
+#include <linux/uaccess.h>
+
+#include "../stackglue.h"
+#include "userdlm.h"
+
+#define MLOG_MASK_PREFIX ML_DLMFS
+#include "../cluster/masklog.h"
+
+
+static const struct super_operations dlmfs_ops;
+static const struct file_operations dlmfs_file_operations;
+static const struct inode_operations dlmfs_dir_inode_operations;
+static const struct inode_operations dlmfs_root_inode_operations;
+static const struct inode_operations dlmfs_file_inode_operations;
+static struct kmem_cache *dlmfs_inode_cache;
+
+struct workqueue_struct *user_dlm_worker;
+
+
+
+/*
+ * These are the ABI capabilities of dlmfs.
+ *
+ * Over time, dlmfs has added some features that were not part of the
+ * initial ABI. Unfortunately, some of these features are not detectable
+ * via standard usage. For example, Linux's default poll always returns
+ * EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs
+ * added poll support. Instead, we provide this list of new capabilities.
+ *
+ * Capabilities is a read-only attribute. We do it as a module parameter
+ * so we can discover it whether dlmfs is built in, loaded, or even not
+ * loaded.
+ *
+ * The ABI features are local to this machine's dlmfs mount. This is
+ * distinct from the locking protocol, which is concerned with inter-node
+ * interaction.
+ *
+ * Capabilities:
+ * - bast : EPOLLIN against the file descriptor of a held lock
+ * signifies a bast fired on the lock.
+ */
+#define DLMFS_CAPABILITIES "bast stackglue"
+static int param_set_dlmfs_capabilities(const char *val,
+ const struct kernel_param *kp)
+{
+ printk(KERN_ERR "%s: readonly parameter\n", kp->name);
+ return -EINVAL;
+}
+static int param_get_dlmfs_capabilities(char *buffer,
+ const struct kernel_param *kp)
+{
+ return strlcpy(buffer, DLMFS_CAPABILITIES,
+ strlen(DLMFS_CAPABILITIES) + 1);
+}
+module_param_call(capabilities, param_set_dlmfs_capabilities,
+ param_get_dlmfs_capabilities, NULL, 0444);
+MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
+
+
+/*
+ * decodes a set of open flags into a valid lock level and a set of flags.
+ * returns < 0 if we have invalid flags
+ * flags which mean something to us:
+ * O_RDONLY -> PRMODE level
+ * O_WRONLY -> EXMODE level
+ *
+ * O_NONBLOCK -> NOQUEUE
+ */
+static int dlmfs_decode_open_flags(int open_flags,
+ int *level,
+ int *flags)
+{
+ if (open_flags & (O_WRONLY|O_RDWR))
+ *level = DLM_LOCK_EX;
+ else
+ *level = DLM_LOCK_PR;
+
+ *flags = 0;
+ if (open_flags & O_NONBLOCK)
+ *flags |= DLM_LKF_NOQUEUE;
+
+ return 0;
+}
+
+static int dlmfs_file_open(struct inode *inode,
+ struct file *file)
+{
+ int status, level, flags;
+ struct dlmfs_filp_private *fp = NULL;
+ struct dlmfs_inode_private *ip;
+
+ if (S_ISDIR(inode->i_mode))
+ BUG();
+
+ mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
+ file->f_flags);
+
+ status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
+ if (status < 0)
+ goto bail;
+
+ /* We don't want to honor O_APPEND at read/write time as it
+ * doesn't make sense for LVB writes. */
+ file->f_flags &= ~O_APPEND;
+
+ fp = kmalloc(sizeof(*fp), GFP_NOFS);
+ if (!fp) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ fp->fp_lock_level = level;
+
+ ip = DLMFS_I(inode);
+
+ status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
+ if (status < 0) {
+ /* this is a strange error to return here but I want
+ * to be able userspace to be able to distinguish a
+ * valid lock request from one that simply couldn't be
+ * granted. */
+ if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
+ status = -ETXTBSY;
+ kfree(fp);
+ goto bail;
+ }
+
+ file->private_data = fp;
+bail:
+ return status;
+}
+
+static int dlmfs_file_release(struct inode *inode,
+ struct file *file)
+{
+ int level;
+ struct dlmfs_inode_private *ip = DLMFS_I(inode);
+ struct dlmfs_filp_private *fp = file->private_data;
+
+ if (S_ISDIR(inode->i_mode))
+ BUG();
+
+ mlog(0, "close called on inode %lu\n", inode->i_ino);
+
+ if (fp) {
+ level = fp->fp_lock_level;
+ if (level != DLM_LOCK_IV)
+ user_dlm_cluster_unlock(&ip->ip_lockres, level);
+
+ kfree(fp);
+ file->private_data = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * We do ->setattr() just to override size changes. Our size is the size
+ * of the LVB and nothing else.
+ */
+static int dlmfs_file_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *attr)
+{
+ int error;
+ struct inode *inode = d_inode(dentry);
+
+ attr->ia_valid &= ~ATTR_SIZE;
+ error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+ if (error)
+ return error;
+
+ setattr_copy(&nop_mnt_idmap, inode, attr);
+ mark_inode_dirty(inode);
+ return 0;
+}
+
+static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait)
+{
+ __poll_t event = 0;
+ struct inode *inode = file_inode(file);
+ struct dlmfs_inode_private *ip = DLMFS_I(inode);
+
+ poll_wait(file, &ip->ip_lockres.l_event, wait);
+
+ spin_lock(&ip->ip_lockres.l_lock);
+ if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
+ event = EPOLLIN | EPOLLRDNORM;
+ spin_unlock(&ip->ip_lockres.l_lock);
+
+ return event;
+}
+
+static ssize_t dlmfs_file_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ char lvb[DLM_LVB_LEN];
+
+ if (!user_dlm_read_lvb(file_inode(file), lvb))
+ return 0;
+
+ return simple_read_from_buffer(buf, count, ppos, lvb, sizeof(lvb));
+}
+
+static ssize_t dlmfs_file_write(struct file *filp,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ char lvb_buf[DLM_LVB_LEN];
+ int bytes_left;
+ struct inode *inode = file_inode(filp);
+
+ mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
+ inode->i_ino, count, *ppos);
+
+ if (*ppos >= DLM_LVB_LEN)
+ return -ENOSPC;
+
+ /* don't write past the lvb */
+ if (count > DLM_LVB_LEN - *ppos)
+ count = DLM_LVB_LEN - *ppos;
+
+ if (!count)
+ return 0;
+
+ bytes_left = copy_from_user(lvb_buf, buf, count);
+ count -= bytes_left;
+ if (count)
+ user_dlm_write_lvb(inode, lvb_buf, count);
+
+ *ppos = *ppos + count;
+ mlog(0, "wrote %zu bytes\n", count);
+ return count;
+}
+
+static void dlmfs_init_once(void *foo)
+{
+ struct dlmfs_inode_private *ip =
+ (struct dlmfs_inode_private *) foo;
+
+ ip->ip_conn = NULL;
+ ip->ip_parent = NULL;
+
+ inode_init_once(&ip->ip_vfs_inode);
+}
+
+static struct inode *dlmfs_alloc_inode(struct super_block *sb)
+{
+ struct dlmfs_inode_private *ip;
+
+ ip = alloc_inode_sb(sb, dlmfs_inode_cache, GFP_NOFS);
+ if (!ip)
+ return NULL;
+
+ return &ip->ip_vfs_inode;
+}
+
+static void dlmfs_free_inode(struct inode *inode)
+{
+ kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
+}
+
+static void dlmfs_evict_inode(struct inode *inode)
+{
+ int status;
+ struct dlmfs_inode_private *ip;
+ struct user_lock_res *lockres;
+ int teardown;
+
+ clear_inode(inode);
+
+ mlog(0, "inode %lu\n", inode->i_ino);
+
+ ip = DLMFS_I(inode);
+ lockres = &ip->ip_lockres;
+
+ if (S_ISREG(inode->i_mode)) {
+ spin_lock(&lockres->l_lock);
+ teardown = !!(lockres->l_flags & USER_LOCK_IN_TEARDOWN);
+ spin_unlock(&lockres->l_lock);
+ if (!teardown) {
+ status = user_dlm_destroy_lock(lockres);
+ if (status < 0)
+ mlog_errno(status);
+ }
+ iput(ip->ip_parent);
+ goto clear_fields;
+ }
+
+ mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
+ /* we must be a directory. If required, lets unregister the
+ * dlm context now. */
+ if (ip->ip_conn)
+ user_dlm_unregister(ip->ip_conn);
+clear_fields:
+ ip->ip_parent = NULL;
+ ip->ip_conn = NULL;
+}
+
+static struct inode *dlmfs_get_root_inode(struct super_block *sb)
+{
+ struct inode *inode = new_inode(sb);
+ umode_t mode = S_IFDIR | 0755;
+
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ inc_nlink(inode);
+
+ inode->i_fop = &simple_dir_operations;
+ inode->i_op = &dlmfs_root_inode_operations;
+ }
+
+ return inode;
+}
+
+static struct inode *dlmfs_get_inode(struct inode *parent,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ struct super_block *sb = parent->i_sb;
+ struct inode * inode = new_inode(sb);
+ struct dlmfs_inode_private *ip;
+
+ if (!inode)
+ return NULL;
+
+ inode->i_ino = get_next_ino();
+ inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+
+ ip = DLMFS_I(inode);
+ ip->ip_conn = DLMFS_I(parent)->ip_conn;
+
+ switch (mode & S_IFMT) {
+ default:
+ /* for now we don't support anything other than
+ * directories and regular files. */
+ BUG();
+ break;
+ case S_IFREG:
+ inode->i_op = &dlmfs_file_inode_operations;
+ inode->i_fop = &dlmfs_file_operations;
+
+ i_size_write(inode, DLM_LVB_LEN);
+
+ user_dlm_lock_res_init(&ip->ip_lockres, dentry);
+
+ /* released at clear_inode time, this insures that we
+ * get to drop the dlm reference on each lock *before*
+ * we call the unregister code for releasing parent
+ * directories. */
+ ip->ip_parent = igrab(parent);
+ BUG_ON(!ip->ip_parent);
+ break;
+ case S_IFDIR:
+ inode->i_op = &dlmfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+
+ /* directory inodes start off with i_nlink ==
+ * 2 (for "." entry) */
+ inc_nlink(inode);
+ break;
+ }
+ return inode;
+}
+
+/*
+ * File creation. Allocate an inode, and we're done..
+ */
+/* SMP-safe */
+static int dlmfs_mkdir(struct mnt_idmap * idmap,
+ struct inode * dir,
+ struct dentry * dentry,
+ umode_t mode)
+{
+ int status;
+ struct inode *inode = NULL;
+ const struct qstr *domain = &dentry->d_name;
+ struct dlmfs_inode_private *ip;
+ struct ocfs2_cluster_connection *conn;
+
+ mlog(0, "mkdir %.*s\n", domain->len, domain->name);
+
+ /* verify that we have a proper domain */
+ if (domain->len >= GROUP_NAME_MAX) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "invalid domain name for directory.\n");
+ goto bail;
+ }
+
+ inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
+ if (!inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ip = DLMFS_I(inode);
+
+ conn = user_dlm_register(domain);
+ if (IS_ERR(conn)) {
+ status = PTR_ERR(conn);
+ mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
+ status, domain->len, domain->name);
+ goto bail;
+ }
+ ip->ip_conn = conn;
+
+ inc_nlink(dir);
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+
+ status = 0;
+bail:
+ if (status < 0)
+ iput(inode);
+ return status;
+}
+
+static int dlmfs_create(struct mnt_idmap *idmap,
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode,
+ bool excl)
+{
+ int status = 0;
+ struct inode *inode;
+ const struct qstr *name = &dentry->d_name;
+
+ mlog(0, "create %.*s\n", name->len, name->name);
+
+ /* verify name is valid and doesn't contain any dlm reserved
+ * characters */
+ if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
+ name->name[0] == '$') {
+ status = -EINVAL;
+ mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
+ name->name);
+ goto bail;
+ }
+
+ inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
+ if (!inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+bail:
+ return status;
+}
+
+static int dlmfs_unlink(struct inode *dir,
+ struct dentry *dentry)
+{
+ int status;
+ struct inode *inode = d_inode(dentry);
+
+ mlog(0, "unlink inode %lu\n", inode->i_ino);
+
+ /* if there are no current holders, or none that are waiting
+ * to acquire a lock, this basically destroys our lockres. */
+ status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
+ if (status < 0) {
+ mlog(ML_ERROR, "unlink %pd, error %d from destroy\n",
+ dentry, status);
+ goto bail;
+ }
+ status = simple_unlink(dir, dentry);
+bail:
+ return status;
+}
+
+static int dlmfs_fill_super(struct super_block * sb,
+ void * data,
+ int silent)
+{
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+ sb->s_magic = DLMFS_MAGIC;
+ sb->s_op = &dlmfs_ops;
+ sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
+ if (!sb->s_root)
+ return -ENOMEM;
+ return 0;
+}
+
+static const struct file_operations dlmfs_file_operations = {
+ .open = dlmfs_file_open,
+ .release = dlmfs_file_release,
+ .poll = dlmfs_file_poll,
+ .read = dlmfs_file_read,
+ .write = dlmfs_file_write,
+ .llseek = default_llseek,
+};
+
+static const struct inode_operations dlmfs_dir_inode_operations = {
+ .create = dlmfs_create,
+ .lookup = simple_lookup,
+ .unlink = dlmfs_unlink,
+};
+
+/* this way we can restrict mkdir to only the toplevel of the fs. */
+static const struct inode_operations dlmfs_root_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = dlmfs_mkdir,
+ .rmdir = simple_rmdir,
+};
+
+static const struct super_operations dlmfs_ops = {
+ .statfs = simple_statfs,
+ .alloc_inode = dlmfs_alloc_inode,
+ .free_inode = dlmfs_free_inode,
+ .evict_inode = dlmfs_evict_inode,
+ .drop_inode = generic_delete_inode,
+};
+
+static const struct inode_operations dlmfs_file_inode_operations = {
+ .getattr = simple_getattr,
+ .setattr = dlmfs_file_setattr,
+};
+
+static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
+}
+
+static struct file_system_type dlmfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ocfs2_dlmfs",
+ .mount = dlmfs_mount,
+ .kill_sb = kill_litter_super,
+};
+MODULE_ALIAS_FS("ocfs2_dlmfs");
+
+static int __init init_dlmfs_fs(void)
+{
+ int status;
+ int cleanup_inode = 0, cleanup_worker = 0;
+
+ dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
+ sizeof(struct dlmfs_inode_private),
+ 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ dlmfs_init_once);
+ if (!dlmfs_inode_cache) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ cleanup_inode = 1;
+
+ user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM, 0);
+ if (!user_dlm_worker) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ cleanup_worker = 1;
+
+ user_dlm_set_locking_protocol();
+ status = register_filesystem(&dlmfs_fs_type);
+bail:
+ if (status) {
+ if (cleanup_inode)
+ kmem_cache_destroy(dlmfs_inode_cache);
+ if (cleanup_worker)
+ destroy_workqueue(user_dlm_worker);
+ } else
+ printk("OCFS2 User DLM kernel interface loaded\n");
+ return status;
+}
+
+static void __exit exit_dlmfs_fs(void)
+{
+ unregister_filesystem(&dlmfs_fs_type);
+
+ destroy_workqueue(user_dlm_worker);
+
+ /*
+ * Make sure all delayed rcu free inodes are flushed before we
+ * destroy cache.
+ */
+ rcu_barrier();
+ kmem_cache_destroy(dlmfs_inode_cache);
+
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OCFS2 DLM-Filesystem");
+
+module_init(init_dlmfs_fs)
+module_exit(exit_dlmfs_fs)
diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
new file mode 100644
index 0000000000..617c92e7b9
--- /dev/null
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * userdlm.c
+ *
+ * Code which implements the kernel side of a minimal userspace
+ * interface to our DLM.
+ *
+ * Many of the functions here are pared down versions of dlmglue.c
+ * functions.
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched/signal.h>
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/crc32.h>
+
+#include "../ocfs2_lockingver.h"
+#include "../stackglue.h"
+#include "userdlm.h"
+
+#define MLOG_MASK_PREFIX ML_DLMFS
+#include "../cluster/masklog.h"
+
+
+static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct user_lock_res, l_lksb);
+}
+
+static inline int user_check_wait_flag(struct user_lock_res *lockres,
+ int flag)
+{
+ int ret;
+
+ spin_lock(&lockres->l_lock);
+ ret = lockres->l_flags & flag;
+ spin_unlock(&lockres->l_lock);
+
+ return ret;
+}
+
+static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !user_check_wait_flag(lockres, USER_LOCK_BUSY));
+}
+
+static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
+}
+
+/* I heart container_of... */
+static inline struct ocfs2_cluster_connection *
+cluster_connection_from_user_lockres(struct user_lock_res *lockres)
+{
+ struct dlmfs_inode_private *ip;
+
+ ip = container_of(lockres,
+ struct dlmfs_inode_private,
+ ip_lockres);
+ return ip->ip_conn;
+}
+
+static struct inode *
+user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
+{
+ struct dlmfs_inode_private *ip;
+
+ ip = container_of(lockres,
+ struct dlmfs_inode_private,
+ ip_lockres);
+ return &ip->ip_vfs_inode;
+}
+
+static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
+{
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+}
+
+#define user_log_dlm_error(_func, _stat, _lockres) do { \
+ mlog(ML_ERROR, "Dlm error %d while calling %s on " \
+ "resource %.*s\n", _stat, _func, \
+ _lockres->l_namelen, _lockres->l_name); \
+} while (0)
+
+/* WARNING: This function lives in a world where the only three lock
+ * levels are EX, PR, and NL. It *will* have to be adjusted when more
+ * lock types are added. */
+static inline int user_highest_compat_lock_level(int level)
+{
+ int new_level = DLM_LOCK_EX;
+
+ if (level == DLM_LOCK_EX)
+ new_level = DLM_LOCK_NL;
+ else if (level == DLM_LOCK_PR)
+ new_level = DLM_LOCK_PR;
+ return new_level;
+}
+
+static void user_ast(struct ocfs2_dlm_lksb *lksb)
+{
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
+ int status;
+
+ mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level,
+ lockres->l_requested);
+
+ spin_lock(&lockres->l_lock);
+
+ status = ocfs2_dlm_lock_status(&lockres->l_lksb);
+ if (status) {
+ mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
+ status, lockres->l_namelen, lockres->l_name);
+ spin_unlock(&lockres->l_lock);
+ return;
+ }
+
+ mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
+ "Lockres %.*s, requested ivmode. flags 0x%x\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_flags);
+
+ /* we're downconverting. */
+ if (lockres->l_requested < lockres->l_level) {
+ if (lockres->l_requested <=
+ user_highest_compat_lock_level(lockres->l_blocking)) {
+ lockres->l_blocking = DLM_LOCK_NL;
+ lockres->l_flags &= ~USER_LOCK_BLOCKED;
+ }
+ }
+
+ lockres->l_level = lockres->l_requested;
+ lockres->l_requested = DLM_LOCK_IV;
+ lockres->l_flags |= USER_LOCK_ATTACHED;
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+
+ spin_unlock(&lockres->l_lock);
+
+ wake_up(&lockres->l_event);
+}
+
+static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
+{
+ struct inode *inode;
+ inode = user_dlm_inode_from_user_lockres(lockres);
+ if (!igrab(inode))
+ BUG();
+}
+
+static void user_dlm_unblock_lock(struct work_struct *work);
+
+static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
+{
+ if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
+ user_dlm_grab_inode_ref(lockres);
+
+ INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
+
+ queue_work(user_dlm_worker, &lockres->l_work);
+ lockres->l_flags |= USER_LOCK_QUEUED;
+ }
+}
+
+static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
+{
+ int queue = 0;
+
+ if (!(lockres->l_flags & USER_LOCK_BLOCKED))
+ return;
+
+ switch (lockres->l_blocking) {
+ case DLM_LOCK_EX:
+ if (!lockres->l_ex_holders && !lockres->l_ro_holders)
+ queue = 1;
+ break;
+ case DLM_LOCK_PR:
+ if (!lockres->l_ex_holders)
+ queue = 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (queue)
+ __user_dlm_queue_lockres(lockres);
+}
+
+static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
+{
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
+
+ mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
+ lockres->l_namelen, lockres->l_name, level, lockres->l_level);
+
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags |= USER_LOCK_BLOCKED;
+ if (level > lockres->l_blocking)
+ lockres->l_blocking = level;
+
+ __user_dlm_queue_lockres(lockres);
+ spin_unlock(&lockres->l_lock);
+
+ wake_up(&lockres->l_event);
+}
+
+static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
+{
+ struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
+
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_flags);
+
+ if (status)
+ mlog(ML_ERROR, "dlm returns status %d\n", status);
+
+ spin_lock(&lockres->l_lock);
+ /* The teardown flag gets set early during the unlock process,
+ * so test the cancel flag to make sure that this ast isn't
+ * for a concurrent cancel. */
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
+ && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
+ lockres->l_level = DLM_LOCK_IV;
+ } else if (status == DLM_CANCELGRANT) {
+ /* We tried to cancel a convert request, but it was
+ * already granted. Don't clear the busy flag - the
+ * ast should've done this already. */
+ BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
+ lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
+ goto out_noclear;
+ } else {
+ BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
+ /* Cancel succeeded, we want to re-queue */
+ lockres->l_requested = DLM_LOCK_IV; /* cancel an
+ * upconvert
+ * request. */
+ lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
+ /* we want the unblock thread to look at it again
+ * now. */
+ if (lockres->l_flags & USER_LOCK_BLOCKED)
+ __user_dlm_queue_lockres(lockres);
+ }
+
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+out_noclear:
+ spin_unlock(&lockres->l_lock);
+
+ wake_up(&lockres->l_event);
+}
+
+/*
+ * This is the userdlmfs locking protocol version.
+ *
+ * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ */
+static struct ocfs2_locking_protocol user_dlm_lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = user_ast,
+ .lp_blocking_ast = user_bast,
+ .lp_unlock_ast = user_unlock_ast,
+};
+
+static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
+{
+ struct inode *inode;
+ inode = user_dlm_inode_from_user_lockres(lockres);
+ iput(inode);
+}
+
+static void user_dlm_unblock_lock(struct work_struct *work)
+{
+ int new_level, status;
+ struct user_lock_res *lockres =
+ container_of(work, struct user_lock_res, l_work);
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
+
+ mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
+
+ spin_lock(&lockres->l_lock);
+
+ mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
+ "Lockres %.*s, flags 0x%x\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_flags);
+
+ /* notice that we don't clear USER_LOCK_BLOCKED here. If it's
+ * set, we want user_ast clear it. */
+ lockres->l_flags &= ~USER_LOCK_QUEUED;
+
+ /* It's valid to get here and no longer be blocked - if we get
+ * several basts in a row, we might be queued by the first
+ * one, the unblock thread might run and clear the queued
+ * flag, and finally we might get another bast which re-queues
+ * us before our ast for the downconvert is called. */
+ if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
+ lockres->l_namelen, lockres->l_name);
+ spin_unlock(&lockres->l_lock);
+ goto drop_ref;
+ }
+
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
+ lockres->l_namelen, lockres->l_name);
+ spin_unlock(&lockres->l_lock);
+ goto drop_ref;
+ }
+
+ if (lockres->l_flags & USER_LOCK_BUSY) {
+ if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
+ mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
+ lockres->l_namelen, lockres->l_name);
+ spin_unlock(&lockres->l_lock);
+ goto drop_ref;
+ }
+
+ lockres->l_flags |= USER_LOCK_IN_CANCEL;
+ spin_unlock(&lockres->l_lock);
+
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
+ DLM_LKF_CANCEL);
+ if (status)
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
+ goto drop_ref;
+ }
+
+ /* If there are still incompat holders, we can exit safely
+ * without worrying about re-queueing this lock as that will
+ * happen on the last call to user_cluster_unlock. */
+ if ((lockres->l_blocking == DLM_LOCK_EX)
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ spin_unlock(&lockres->l_lock);
+ mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders, lockres->l_ro_holders);
+ goto drop_ref;
+ }
+
+ if ((lockres->l_blocking == DLM_LOCK_PR)
+ && lockres->l_ex_holders) {
+ spin_unlock(&lockres->l_lock);
+ mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
+ lockres->l_namelen, lockres->l_name,
+ lockres->l_ex_holders);
+ goto drop_ref;
+ }
+
+ /* yay, we can downconvert now. */
+ new_level = user_highest_compat_lock_level(lockres->l_blocking);
+ lockres->l_requested = new_level;
+ lockres->l_flags |= USER_LOCK_BUSY;
+ mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
+ lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
+ spin_unlock(&lockres->l_lock);
+
+ /* need lock downconvert request now... */
+ status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
+ DLM_LKF_CONVERT|DLM_LKF_VALBLK,
+ lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
+ user_recover_from_dlm_error(lockres);
+ }
+
+drop_ref:
+ user_dlm_drop_inode_ref(lockres);
+}
+
+static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
+ int level)
+{
+ switch(level) {
+ case DLM_LOCK_EX:
+ lockres->l_ex_holders++;
+ break;
+ case DLM_LOCK_PR:
+ lockres->l_ro_holders++;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/* predict what lock level we'll be dropping down to on behalf
+ * of another node, and return true if the currently wanted
+ * level will be compatible with it. */
+static inline int
+user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
+ int wanted)
+{
+ BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
+
+ return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
+}
+
+int user_dlm_cluster_lock(struct user_lock_res *lockres,
+ int level,
+ int lkm_flags)
+{
+ int status, local_flags;
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
+
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
+ mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
+ lockres->l_namelen, lockres->l_name);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
+ lockres->l_namelen, lockres->l_name, level, lkm_flags);
+
+again:
+ if (signal_pending(current)) {
+ status = -ERESTARTSYS;
+ goto bail;
+ }
+
+ spin_lock(&lockres->l_lock);
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ spin_unlock(&lockres->l_lock);
+ status = -EAGAIN;
+ goto bail;
+ }
+
+ /* We only compare against the currently granted level
+ * here. If the lock is blocked waiting on a downconvert,
+ * we'll get caught below. */
+ if ((lockres->l_flags & USER_LOCK_BUSY) &&
+ (level > lockres->l_level)) {
+ /* is someone sitting in dlm_lock? If so, wait on
+ * them. */
+ spin_unlock(&lockres->l_lock);
+
+ user_wait_on_busy_lock(lockres);
+ goto again;
+ }
+
+ if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
+ (!user_may_continue_on_blocked_lock(lockres, level))) {
+ /* is the lock is currently blocked on behalf of
+ * another node */
+ spin_unlock(&lockres->l_lock);
+
+ user_wait_on_blocked_lock(lockres);
+ goto again;
+ }
+
+ if (level > lockres->l_level) {
+ local_flags = lkm_flags | DLM_LKF_VALBLK;
+ if (lockres->l_level != DLM_LOCK_IV)
+ local_flags |= DLM_LKF_CONVERT;
+
+ lockres->l_requested = level;
+ lockres->l_flags |= USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+
+ BUG_ON(level == DLM_LOCK_IV);
+ BUG_ON(level == DLM_LOCK_NL);
+
+ /* call dlm_lock to upgrade lock now */
+ status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
+ local_flags, lockres->l_name,
+ lockres->l_namelen);
+ if (status) {
+ if ((lkm_flags & DLM_LKF_NOQUEUE) &&
+ (status != -EAGAIN))
+ user_log_dlm_error("ocfs2_dlm_lock",
+ status, lockres);
+ user_recover_from_dlm_error(lockres);
+ goto bail;
+ }
+
+ user_wait_on_busy_lock(lockres);
+ goto again;
+ }
+
+ user_dlm_inc_holders(lockres, level);
+ spin_unlock(&lockres->l_lock);
+
+ status = 0;
+bail:
+ return status;
+}
+
+static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
+ int level)
+{
+ switch(level) {
+ case DLM_LOCK_EX:
+ BUG_ON(!lockres->l_ex_holders);
+ lockres->l_ex_holders--;
+ break;
+ case DLM_LOCK_PR:
+ BUG_ON(!lockres->l_ro_holders);
+ lockres->l_ro_holders--;
+ break;
+ default:
+ BUG();
+ }
+}
+
+void user_dlm_cluster_unlock(struct user_lock_res *lockres,
+ int level)
+{
+ if (level != DLM_LOCK_EX &&
+ level != DLM_LOCK_PR) {
+ mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
+ lockres->l_namelen, lockres->l_name);
+ return;
+ }
+
+ spin_lock(&lockres->l_lock);
+ user_dlm_dec_holders(lockres, level);
+ __user_dlm_cond_queue_lockres(lockres);
+ spin_unlock(&lockres->l_lock);
+}
+
+void user_dlm_write_lvb(struct inode *inode,
+ const char *val,
+ unsigned int len)
+{
+ struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
+ char *lvb;
+
+ BUG_ON(len > DLM_LVB_LEN);
+
+ spin_lock(&lockres->l_lock);
+
+ BUG_ON(lockres->l_level < DLM_LOCK_EX);
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ memcpy(lvb, val, len);
+
+ spin_unlock(&lockres->l_lock);
+}
+
+bool user_dlm_read_lvb(struct inode *inode, char *val)
+{
+ struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
+ char *lvb;
+ bool ret = true;
+
+ spin_lock(&lockres->l_lock);
+
+ BUG_ON(lockres->l_level < DLM_LOCK_PR);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ memcpy(val, lvb, DLM_LVB_LEN);
+ } else
+ ret = false;
+
+ spin_unlock(&lockres->l_lock);
+ return ret;
+}
+
+void user_dlm_lock_res_init(struct user_lock_res *lockres,
+ struct dentry *dentry)
+{
+ memset(lockres, 0, sizeof(*lockres));
+
+ spin_lock_init(&lockres->l_lock);
+ init_waitqueue_head(&lockres->l_event);
+ lockres->l_level = DLM_LOCK_IV;
+ lockres->l_requested = DLM_LOCK_IV;
+ lockres->l_blocking = DLM_LOCK_IV;
+
+ /* should have been checked before getting here. */
+ BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
+
+ memcpy(lockres->l_name,
+ dentry->d_name.name,
+ dentry->d_name.len);
+ lockres->l_namelen = dentry->d_name.len;
+}
+
+int user_dlm_destroy_lock(struct user_lock_res *lockres)
+{
+ int status = -EBUSY;
+ struct ocfs2_cluster_connection *conn =
+ cluster_connection_from_user_lockres(lockres);
+
+ mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
+
+ spin_lock(&lockres->l_lock);
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ spin_unlock(&lockres->l_lock);
+ goto bail;
+ }
+
+ lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
+
+ while (lockres->l_flags & USER_LOCK_BUSY) {
+ spin_unlock(&lockres->l_lock);
+
+ user_wait_on_busy_lock(lockres);
+
+ spin_lock(&lockres->l_lock);
+ }
+
+ if (lockres->l_ro_holders || lockres->l_ex_holders) {
+ lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
+ spin_unlock(&lockres->l_lock);
+ goto bail;
+ }
+
+ status = 0;
+ if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
+ /*
+ * lock is never requested, leave USER_LOCK_IN_TEARDOWN set
+ * to avoid new lock request coming in.
+ */
+ spin_unlock(&lockres->l_lock);
+ goto bail;
+ }
+
+ lockres->l_flags |= USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+
+ status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
+ if (status) {
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN;
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+ user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
+ goto bail;
+ }
+
+ user_wait_on_busy_lock(lockres);
+
+ status = 0;
+bail:
+ return status;
+}
+
+static void user_dlm_recovery_handler_noop(int node_num,
+ void *recovery_data)
+{
+ /* We ignore recovery events */
+ return;
+}
+
+void user_dlm_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
+}
+
+struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name)
+{
+ int rc;
+ struct ocfs2_cluster_connection *conn;
+
+ rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
+ &user_dlm_lproto,
+ user_dlm_recovery_handler_noop,
+ NULL, &conn);
+ if (rc)
+ mlog_errno(rc);
+
+ return rc ? ERR_PTR(rc) : conn;
+}
+
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
+{
+ ocfs2_cluster_disconnect(conn, 0);
+}
diff --git a/fs/ocfs2/dlmfs/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
new file mode 100644
index 0000000000..47ba18eac4
--- /dev/null
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * userdlm.h
+ *
+ * Userspace dlm defines
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ */
+
+
+#ifndef USERDLM_H
+#define USERDLM_H
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* user_lock_res->l_flags flags. */
+#define USER_LOCK_ATTACHED (0x00000001) /* we have initialized
+ * the lvb */
+#define USER_LOCK_BUSY (0x00000002) /* we are currently in
+ * dlm_lock */
+#define USER_LOCK_BLOCKED (0x00000004) /* blocked waiting to
+ * downconvert*/
+#define USER_LOCK_IN_TEARDOWN (0x00000008) /* we're currently
+ * destroying this
+ * lock. */
+#define USER_LOCK_QUEUED (0x00000010) /* lock is on the
+ * workqueue */
+#define USER_LOCK_IN_CANCEL (0x00000020)
+
+struct user_lock_res {
+ spinlock_t l_lock;
+
+ int l_flags;
+
+#define USER_DLM_LOCK_ID_MAX_LEN 32
+ char l_name[USER_DLM_LOCK_ID_MAX_LEN];
+ int l_namelen;
+ int l_level;
+ unsigned int l_ro_holders;
+ unsigned int l_ex_holders;
+ struct ocfs2_dlm_lksb l_lksb;
+
+ int l_requested;
+ int l_blocking;
+
+ wait_queue_head_t l_event;
+
+ struct work_struct l_work;
+};
+
+extern struct workqueue_struct *user_dlm_worker;
+
+void user_dlm_lock_res_init(struct user_lock_res *lockres,
+ struct dentry *dentry);
+int user_dlm_destroy_lock(struct user_lock_res *lockres);
+int user_dlm_cluster_lock(struct user_lock_res *lockres,
+ int level,
+ int lkm_flags);
+void user_dlm_cluster_unlock(struct user_lock_res *lockres,
+ int level);
+void user_dlm_write_lvb(struct inode *inode,
+ const char *val,
+ unsigned int len);
+bool user_dlm_read_lvb(struct inode *inode, char *val);
+struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name);
+void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
+void user_dlm_set_locking_protocol(void);
+
+struct dlmfs_inode_private {
+ struct ocfs2_cluster_connection *ip_conn;
+
+ struct user_lock_res ip_lockres; /* unused for directories. */
+ struct inode *ip_parent;
+
+ struct inode ip_vfs_inode;
+};
+
+static inline struct dlmfs_inode_private *
+DLMFS_I(struct inode *inode)
+{
+ return container_of(inode,
+ struct dlmfs_inode_private,
+ ip_vfs_inode);
+}
+
+struct dlmfs_filp_private {
+ int fp_lock_level;
+};
+
+#define DLMFS_MAGIC 0x76a9f425
+
+#endif /* USERDLM_H */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
new file mode 100644
index 0000000000..c3e2961ee5
--- /dev/null
+++ b/fs/ocfs2/dlmglue.c
@@ -0,0 +1,4473 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dlmglue.c
+ *
+ * Code which implements an OCFS2 specific interface to our DLM.
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/quotaops.h>
+#include <linux/sched/signal.h>
+
+#define MLOG_MASK_PREFIX ML_DLM_GLUE
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+#include "ocfs2_lockingver.h"
+
+#include "alloc.h"
+#include "dcache.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "file.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "journal.h"
+#include "stackglue.h"
+#include "slot_map.h"
+#include "super.h"
+#include "uptodate.h"
+#include "quota.h"
+#include "refcounttree.h"
+#include "acl.h"
+
+#include "buffer_head_io.h"
+
+struct ocfs2_mask_waiter {
+ struct list_head mw_item;
+ int mw_status;
+ struct completion mw_complete;
+ unsigned long mw_mask;
+ unsigned long mw_goal;
+#ifdef CONFIG_OCFS2_FS_STATS
+ ktime_t mw_lock_start;
+#endif
+};
+
+static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
+static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
+static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
+static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
+
+/*
+ * Return value from ->downconvert_worker functions.
+ *
+ * These control the precise actions of ocfs2_unblock_lock()
+ * and ocfs2_process_blocked_lock()
+ *
+ */
+enum ocfs2_unblock_action {
+ UNBLOCK_CONTINUE = 0, /* Continue downconvert */
+ UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
+ * ->post_unlock callback */
+ UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
+ * ->post_unlock() callback. */
+};
+
+struct ocfs2_unblock_ctl {
+ int requeue;
+ enum ocfs2_unblock_action unblock_action;
+};
+
+/* Lockdep class keys */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
+#endif
+
+static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level);
+static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
+
+static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
+
+static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
+
+static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+
+static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
+
+static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level);
+static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
+
+#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
+
+/* This aids in debugging situations where a bad LVB might be involved. */
+static void ocfs2_dump_meta_lvb_info(u64 level,
+ const char *function,
+ unsigned int line,
+ struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+
+ mlog(level, "LVB information for %s (called from %s:%u):\n",
+ lockres->l_name, function, line);
+ mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
+ lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
+ be32_to_cpu(lvb->lvb_igeneration));
+ mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
+ (unsigned long long)be64_to_cpu(lvb->lvb_isize),
+ be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
+ be16_to_cpu(lvb->lvb_imode));
+ mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
+ "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
+ (long long)be64_to_cpu(lvb->lvb_iatime_packed),
+ (long long)be64_to_cpu(lvb->lvb_ictime_packed),
+ (long long)be64_to_cpu(lvb->lvb_imtime_packed),
+ be32_to_cpu(lvb->lvb_iattr));
+}
+
+
+/*
+ * OCFS2 Lock Resource Operations
+ *
+ * These fine tune the behavior of the generic dlmglue locking infrastructure.
+ *
+ * The most basic of lock types can point ->l_priv to their respective
+ * struct ocfs2_super and allow the default actions to manage things.
+ *
+ * Right now, each lock type also needs to implement an init function,
+ * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
+ * should be called when the lock is no longer needed (i.e., object
+ * destruction time).
+ */
+struct ocfs2_lock_res_ops {
+ /*
+ * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
+ * this callback if ->l_priv is not an ocfs2_super pointer
+ */
+ struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
+
+ /*
+ * Optionally called in the downconvert thread after a
+ * successful downconvert. The lockres will not be referenced
+ * after this callback is called, so it is safe to free
+ * memory, etc.
+ *
+ * The exact semantics of when this is called are controlled
+ * by ->downconvert_worker()
+ */
+ void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
+
+ /*
+ * Allow a lock type to add checks to determine whether it is
+ * safe to downconvert a lock. Return 0 to re-queue the
+ * downconvert at a later time, nonzero to continue.
+ *
+ * For most locks, the default checks that there are no
+ * incompatible holders are sufficient.
+ *
+ * Called with the lockres spinlock held.
+ */
+ int (*check_downconvert)(struct ocfs2_lock_res *, int);
+
+ /*
+ * Allows a lock type to populate the lock value block. This
+ * is called on downconvert, and when we drop a lock.
+ *
+ * Locks that want to use this should set LOCK_TYPE_USES_LVB
+ * in the flags field.
+ *
+ * Called with the lockres spinlock held.
+ */
+ void (*set_lvb)(struct ocfs2_lock_res *);
+
+ /*
+ * Called from the downconvert thread when it is determined
+ * that a lock will be downconverted. This is called without
+ * any locks held so the function can do work that might
+ * schedule (syncing out data, etc).
+ *
+ * This should return any one of the ocfs2_unblock_action
+ * values, depending on what it wants the thread to do.
+ */
+ int (*downconvert_worker)(struct ocfs2_lock_res *, int);
+
+ /*
+ * LOCK_TYPE_* flags which describe the specific requirements
+ * of a lock type. Descriptions of each individual flag follow.
+ */
+ int flags;
+};
+
+/*
+ * Some locks want to "refresh" potentially stale data when a
+ * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
+ * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
+ * individual lockres l_flags member from the ast function. It is
+ * expected that the locking wrapper will clear the
+ * OCFS2_LOCK_NEEDS_REFRESH flag when done.
+ */
+#define LOCK_TYPE_REQUIRES_REFRESH 0x1
+
+/*
+ * Indicate that a lock type makes use of the lock value block. The
+ * ->set_lvb lock type callback must be defined.
+ */
+#define LOCK_TYPE_USES_LVB 0x2
+
+static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
+ .get_osb = ocfs2_get_inode_osb,
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
+ .get_osb = ocfs2_get_inode_osb,
+ .check_downconvert = ocfs2_check_meta_downconvert,
+ .set_lvb = ocfs2_set_meta_lvb,
+ .downconvert_worker = ocfs2_data_convert_worker,
+ .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_super_lops = {
+ .flags = LOCK_TYPE_REQUIRES_REFRESH,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_trim_fs_lops = {
+ .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
+ .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
+ .get_osb = ocfs2_get_dentry_osb,
+ .post_unlock = ocfs2_dentry_post_unlock,
+ .downconvert_worker = ocfs2_dentry_convert_worker,
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
+ .get_osb = ocfs2_get_inode_osb,
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
+ .get_osb = ocfs2_get_file_osb,
+ .flags = 0,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
+ .set_lvb = ocfs2_set_qinfo_lvb,
+ .get_osb = ocfs2_get_qinfo_osb,
+ .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
+ .check_downconvert = ocfs2_check_refcount_downconvert,
+ .downconvert_worker = ocfs2_refcount_convert_worker,
+ .flags = 0,
+};
+
+static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
+{
+ return lockres->l_type == OCFS2_LOCK_TYPE_META ||
+ lockres->l_type == OCFS2_LOCK_TYPE_RW ||
+ lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
+}
+
+static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
+{
+ return container_of(lksb, struct ocfs2_lock_res, l_lksb);
+}
+
+static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(!ocfs2_is_inode_lock(lockres));
+
+ return (struct inode *) lockres->l_priv;
+}
+
+static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
+
+ return (struct ocfs2_dentry_lock *)lockres->l_priv;
+}
+
+static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
+
+ return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
+}
+
+static inline struct ocfs2_refcount_tree *
+ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
+{
+ return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
+}
+
+static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
+{
+ if (lockres->l_ops->get_osb)
+ return lockres->l_ops->get_osb(lockres);
+
+ return (struct ocfs2_super *)lockres->l_priv;
+}
+
+static int ocfs2_lock_create(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ u32 dlm_flags);
+static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
+ int wanted);
+static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level, unsigned long caller_ip);
+static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level)
+{
+ __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
+}
+
+static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
+static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
+static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
+static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
+static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
+ int convert);
+#define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
+ if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
+ mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
+ _err, _func, _lockres->l_name); \
+ else \
+ mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
+ _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
+ (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
+} while (0)
+static int ocfs2_downconvert_thread(void *arg);
+static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static int ocfs2_inode_lock_update(struct inode *inode,
+ struct buffer_head **bh);
+static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
+static inline int ocfs2_highest_compat_lock_level(int level);
+static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level);
+static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int new_level,
+ int lvb,
+ unsigned int generation);
+static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static int ocfs2_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+
+
+static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
+ u64 blkno,
+ u32 generation,
+ char *name)
+{
+ int len;
+
+ BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
+
+ len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
+ ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
+ (long long)blkno, generation);
+
+ BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
+
+ mlog(0, "built lock resource with name: %s\n", name);
+}
+
+static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
+
+static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
+ struct ocfs2_dlm_debug *dlm_debug)
+{
+ mlog(0, "Add tracking for lockres %s\n", res->l_name);
+
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+}
+
+static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
+{
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ if (!list_empty(&res->l_debug_list))
+ list_del_init(&res->l_debug_list);
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+}
+
+#ifdef CONFIG_OCFS2_FS_STATS
+static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
+{
+ res->l_lock_refresh = 0;
+ res->l_lock_wait = 0;
+ memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
+ memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
+}
+
+static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
+ struct ocfs2_mask_waiter *mw, int ret)
+{
+ u32 usec;
+ ktime_t kt;
+ struct ocfs2_lock_stats *stats;
+
+ if (level == LKM_PRMODE)
+ stats = &res->l_lock_prmode;
+ else if (level == LKM_EXMODE)
+ stats = &res->l_lock_exmode;
+ else
+ return;
+
+ kt = ktime_sub(ktime_get(), mw->mw_lock_start);
+ usec = ktime_to_us(kt);
+
+ stats->ls_gets++;
+ stats->ls_total += ktime_to_ns(kt);
+ /* overflow */
+ if (unlikely(stats->ls_gets == 0)) {
+ stats->ls_gets++;
+ stats->ls_total = ktime_to_ns(kt);
+ }
+
+ if (stats->ls_max < usec)
+ stats->ls_max = usec;
+
+ if (ret)
+ stats->ls_fail++;
+
+ stats->ls_last = ktime_to_us(ktime_get_real());
+}
+
+static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
+{
+ lockres->l_lock_refresh++;
+}
+
+static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_mask_waiter *mw;
+
+ if (list_empty(&lockres->l_mask_waiters)) {
+ lockres->l_lock_wait = 0;
+ return;
+ }
+
+ mw = list_first_entry(&lockres->l_mask_waiters,
+ struct ocfs2_mask_waiter, mw_item);
+ lockres->l_lock_wait =
+ ktime_to_us(ktime_mono_to_real(mw->mw_lock_start));
+}
+
+static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
+{
+ mw->mw_lock_start = ktime_get();
+}
+#else
+static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
+{
+}
+static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
+ int level, struct ocfs2_mask_waiter *mw, int ret)
+{
+}
+static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
+{
+}
+static inline void ocfs2_track_lock_wait(struct ocfs2_lock_res *lockres)
+{
+}
+static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
+{
+}
+#endif
+
+static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *res,
+ enum ocfs2_lock_type type,
+ struct ocfs2_lock_res_ops *ops,
+ void *priv)
+{
+ res->l_type = type;
+ res->l_ops = ops;
+ res->l_priv = priv;
+
+ res->l_level = DLM_LOCK_IV;
+ res->l_requested = DLM_LOCK_IV;
+ res->l_blocking = DLM_LOCK_IV;
+ res->l_action = OCFS2_AST_INVALID;
+ res->l_unlock_action = OCFS2_UNLOCK_INVALID;
+
+ res->l_flags = OCFS2_LOCK_INITIALIZED;
+
+ ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
+
+ ocfs2_init_lock_stats(res);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (type != OCFS2_LOCK_TYPE_OPEN)
+ lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
+ &lockdep_keys[type], 0);
+ else
+ res->l_lockdep_map.key = NULL;
+#endif
+}
+
+void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
+{
+ /* This also clears out the lock status block */
+ memset(res, 0, sizeof(struct ocfs2_lock_res));
+ spin_lock_init(&res->l_lock);
+ init_waitqueue_head(&res->l_event);
+ INIT_LIST_HEAD(&res->l_blocked_list);
+ INIT_LIST_HEAD(&res->l_mask_waiters);
+ INIT_LIST_HEAD(&res->l_holders);
+}
+
+void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
+ enum ocfs2_lock_type type,
+ unsigned int generation,
+ struct inode *inode)
+{
+ struct ocfs2_lock_res_ops *ops;
+
+ switch(type) {
+ case OCFS2_LOCK_TYPE_RW:
+ ops = &ocfs2_inode_rw_lops;
+ break;
+ case OCFS2_LOCK_TYPE_META:
+ ops = &ocfs2_inode_inode_lops;
+ break;
+ case OCFS2_LOCK_TYPE_OPEN:
+ ops = &ocfs2_inode_open_lops;
+ break;
+ default:
+ mlog_bug_on_msg(1, "type: %d\n", type);
+ ops = NULL; /* thanks, gcc */
+ break;
+ }
+
+ ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
+ generation, res->l_name);
+ ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
+}
+
+static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
+{
+ struct inode *inode = ocfs2_lock_res_inode(lockres);
+
+ return OCFS2_SB(inode->i_sb);
+}
+
+static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_mem_dqinfo *info = lockres->l_priv;
+
+ return OCFS2_SB(info->dqi_gi.dqi_sb);
+}
+
+static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_file_private *fp = lockres->l_priv;
+
+ return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
+}
+
+static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
+{
+ __be64 inode_blkno_be;
+
+ memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
+ sizeof(__be64));
+
+ return be64_to_cpu(inode_blkno_be);
+}
+
+static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_dentry_lock *dl = lockres->l_priv;
+
+ return OCFS2_SB(dl->dl_inode->i_sb);
+}
+
+void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
+ u64 parent, struct inode *inode)
+{
+ int len;
+ u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
+ __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
+ struct ocfs2_lock_res *lockres = &dl->dl_lockres;
+
+ ocfs2_lock_res_init_once(lockres);
+
+ /*
+ * Unfortunately, the standard lock naming scheme won't work
+ * here because we have two 16 byte values to use. Instead,
+ * we'll stuff the inode number as a binary value. We still
+ * want error prints to show something without garbling the
+ * display, so drop a null byte in there before the inode
+ * number. A future version of OCFS2 will likely use all
+ * binary lock names. The stringified names have been a
+ * tremendous aid in debugging, but now that the debugfs
+ * interface exists, we can mangle things there if need be.
+ *
+ * NOTE: We also drop the standard "pad" value (the total lock
+ * name size stays the same though - the last part is all
+ * zeros due to the memset in ocfs2_lock_res_init_once()
+ */
+ len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
+ "%c%016llx",
+ ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
+ (long long)parent);
+
+ BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
+
+ memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
+ sizeof(__be64));
+
+ ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
+ OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
+ dl);
+}
+
+static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
+ struct ocfs2_super *osb)
+{
+ /* Superblock lockres doesn't come from a slab so we call init
+ * once on it manually. */
+ ocfs2_lock_res_init_once(res);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
+ 0, res->l_name);
+ ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
+ &ocfs2_super_lops, osb);
+}
+
+static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
+ struct ocfs2_super *osb)
+{
+ /* Rename lockres doesn't come from a slab so we call init
+ * once on it manually. */
+ ocfs2_lock_res_init_once(res);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
+ ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
+ &ocfs2_rename_lops, osb);
+}
+
+static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
+ struct ocfs2_super *osb)
+{
+ /* nfs_sync lockres doesn't come from a slab so we call init
+ * once on it manually. */
+ ocfs2_lock_res_init_once(res);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
+ ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
+ &ocfs2_nfs_sync_lops, osb);
+}
+
+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+ ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+ init_rwsem(&osb->nfs_sync_rwlock);
+}
+
+void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ /* Only one trimfs thread are allowed to work at the same time. */
+ mutex_lock(&osb->obs_trim_fs_mutex);
+
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_TRIM_FS, 0, 0, lockres->l_name);
+ ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_TRIM_FS,
+ &ocfs2_trim_fs_lops, osb);
+}
+
+void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ ocfs2_simple_drop_lockres(osb, lockres);
+ ocfs2_lock_res_free(lockres);
+
+ mutex_unlock(&osb->obs_trim_fs_mutex);
+}
+
+static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
+ struct ocfs2_super *osb)
+{
+ ocfs2_lock_res_init_once(res);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
+ ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
+ &ocfs2_orphan_scan_lops, osb);
+}
+
+void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_file_private *fp)
+{
+ struct inode *inode = fp->fp_file->f_mapping->host;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
+ inode->i_generation, lockres->l_name);
+ ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
+ OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
+ fp);
+ lockres->l_flags |= OCFS2_LOCK_NOCACHE;
+}
+
+void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mem_dqinfo *info)
+{
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
+ 0, lockres->l_name);
+ ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
+ OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
+ info);
+}
+
+void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_super *osb, u64 ref_blkno,
+ unsigned int generation)
+{
+ ocfs2_lock_res_init_once(lockres);
+ ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
+ generation, lockres->l_name);
+ ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
+ &ocfs2_refcount_block_lops, osb);
+}
+
+void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
+{
+ if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
+ return;
+
+ ocfs2_remove_lockres_tracking(res);
+
+ mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
+ "Lockres %s is on the blocked list\n",
+ res->l_name);
+ mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
+ "Lockres %s has mask waiters pending\n",
+ res->l_name);
+ mlog_bug_on_msg(spin_is_locked(&res->l_lock),
+ "Lockres %s is locked\n",
+ res->l_name);
+ mlog_bug_on_msg(res->l_ro_holders,
+ "Lockres %s has %u ro holders\n",
+ res->l_name, res->l_ro_holders);
+ mlog_bug_on_msg(res->l_ex_holders,
+ "Lockres %s has %u ex holders\n",
+ res->l_name, res->l_ex_holders);
+
+ /* Need to clear out the lock status block for the dlm */
+ memset(&res->l_lksb, 0, sizeof(res->l_lksb));
+
+ res->l_flags = 0UL;
+}
+
+/*
+ * Keep a list of processes who have interest in a lockres.
+ * Note: this is now only uesed for check recursive cluster locking.
+ */
+static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ INIT_LIST_HEAD(&oh->oh_list);
+ oh->oh_owner_pid = get_pid(task_pid(current));
+
+ spin_lock(&lockres->l_lock);
+ list_add_tail(&oh->oh_list, &lockres->l_holders);
+ spin_unlock(&lockres->l_lock);
+}
+
+static struct ocfs2_lock_holder *
+ocfs2_pid_holder(struct ocfs2_lock_res *lockres,
+ struct pid *pid)
+{
+ struct ocfs2_lock_holder *oh;
+
+ spin_lock(&lockres->l_lock);
+ list_for_each_entry(oh, &lockres->l_holders, oh_list) {
+ if (oh->oh_owner_pid == pid) {
+ spin_unlock(&lockres->l_lock);
+ return oh;
+ }
+ }
+ spin_unlock(&lockres->l_lock);
+ return NULL;
+}
+
+static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
+ struct ocfs2_lock_holder *oh)
+{
+ spin_lock(&lockres->l_lock);
+ list_del(&oh->oh_list);
+ spin_unlock(&lockres->l_lock);
+
+ put_pid(oh->oh_owner_pid);
+}
+
+
+static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ BUG_ON(!lockres);
+
+ switch(level) {
+ case DLM_LOCK_EX:
+ lockres->l_ex_holders++;
+ break;
+ case DLM_LOCK_PR:
+ lockres->l_ro_holders++;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ BUG_ON(!lockres);
+
+ switch(level) {
+ case DLM_LOCK_EX:
+ BUG_ON(!lockres->l_ex_holders);
+ lockres->l_ex_holders--;
+ break;
+ case DLM_LOCK_PR:
+ BUG_ON(!lockres->l_ro_holders);
+ lockres->l_ro_holders--;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/* WARNING: This function lives in a world where the only three lock
+ * levels are EX, PR, and NL. It *will* have to be adjusted when more
+ * lock types are added. */
+static inline int ocfs2_highest_compat_lock_level(int level)
+{
+ int new_level = DLM_LOCK_EX;
+
+ if (level == DLM_LOCK_EX)
+ new_level = DLM_LOCK_NL;
+ else if (level == DLM_LOCK_PR)
+ new_level = DLM_LOCK_PR;
+ return new_level;
+}
+
+static void lockres_set_flags(struct ocfs2_lock_res *lockres,
+ unsigned long newflags)
+{
+ struct ocfs2_mask_waiter *mw, *tmp;
+
+ assert_spin_locked(&lockres->l_lock);
+
+ lockres->l_flags = newflags;
+
+ list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
+ if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
+ continue;
+
+ list_del_init(&mw->mw_item);
+ mw->mw_status = 0;
+ complete(&mw->mw_complete);
+ ocfs2_track_lock_wait(lockres);
+ }
+}
+static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
+{
+ lockres_set_flags(lockres, lockres->l_flags | or);
+}
+static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
+ unsigned long clear)
+{
+ lockres_set_flags(lockres, lockres->l_flags & ~clear);
+}
+
+static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
+ BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
+
+ lockres->l_level = lockres->l_requested;
+ if (lockres->l_level <=
+ ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
+ lockres->l_blocking = DLM_LOCK_NL;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+ }
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+}
+
+static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
+
+ /* Convert from RO to EX doesn't really need anything as our
+ * information is already up to data. Convert from NL to
+ * *anything* however should mark ourselves as needing an
+ * update */
+ if (lockres->l_level == DLM_LOCK_NL &&
+ lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
+ lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+
+ lockres->l_level = lockres->l_requested;
+
+ /*
+ * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
+ * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
+ * downconverting the lock before the upconvert has fully completed.
+ * Do not prevent the dc thread from downconverting if NONBLOCK lock
+ * had already returned.
+ */
+ if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
+ lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+ else
+ lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+}
+
+static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
+ BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
+
+ if (lockres->l_requested > DLM_LOCK_NL &&
+ !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
+ lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
+ lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+
+ lockres->l_level = lockres->l_requested;
+ lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+}
+
+static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ int needs_downconvert = 0;
+
+ assert_spin_locked(&lockres->l_lock);
+
+ if (level > lockres->l_blocking) {
+ /* only schedule a downconvert if we haven't already scheduled
+ * one that goes low enough to satisfy the level we're
+ * blocking. this also catches the case where we get
+ * duplicate BASTs */
+ if (ocfs2_highest_compat_lock_level(level) <
+ ocfs2_highest_compat_lock_level(lockres->l_blocking))
+ needs_downconvert = 1;
+
+ lockres->l_blocking = level;
+ }
+
+ mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
+ lockres->l_name, level, lockres->l_level, lockres->l_blocking,
+ needs_downconvert);
+
+ if (needs_downconvert)
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+ mlog(0, "needs_downconvert = %d\n", needs_downconvert);
+ return needs_downconvert;
+}
+
+/*
+ * OCFS2_LOCK_PENDING and l_pending_gen.
+ *
+ * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
+ * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
+ * for more details on the race.
+ *
+ * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
+ * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
+ * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
+ * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
+ * the caller is going to try to clear PENDING again. If nothing else is
+ * happening, __lockres_clear_pending() sees PENDING is unset and does
+ * nothing.
+ *
+ * But what if another path (eg downconvert thread) has just started a
+ * new locking action? The other path has re-set PENDING. Our path
+ * cannot clear PENDING, because that will re-open the original race
+ * window.
+ *
+ * [Example]
+ *
+ * ocfs2_meta_lock()
+ * ocfs2_cluster_lock()
+ * set BUSY
+ * set PENDING
+ * drop l_lock
+ * ocfs2_dlm_lock()
+ * ocfs2_locking_ast() ocfs2_downconvert_thread()
+ * clear PENDING ocfs2_unblock_lock()
+ * take_l_lock
+ * !BUSY
+ * ocfs2_prepare_downconvert()
+ * set BUSY
+ * set PENDING
+ * drop l_lock
+ * take l_lock
+ * clear PENDING
+ * drop l_lock
+ * <window>
+ * ocfs2_dlm_lock()
+ *
+ * So as you can see, we now have a window where l_lock is not held,
+ * PENDING is not set, and ocfs2_dlm_lock() has not been called.
+ *
+ * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
+ * set by ocfs2_prepare_downconvert(). That wasn't nice.
+ *
+ * To solve this we introduce l_pending_gen. A call to
+ * lockres_clear_pending() will only do so when it is passed a generation
+ * number that matches the lockres. lockres_set_pending() will return the
+ * current generation number. When ocfs2_cluster_lock() goes to clear
+ * PENDING, it passes the generation it got from set_pending(). In our
+ * example above, the generation numbers will *not* match. Thus,
+ * ocfs2_cluster_lock() will not clear the PENDING set by
+ * ocfs2_prepare_downconvert().
+ */
+
+/* Unlocked version for ocfs2_locking_ast() */
+static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
+ unsigned int generation,
+ struct ocfs2_super *osb)
+{
+ assert_spin_locked(&lockres->l_lock);
+
+ /*
+ * The ast and locking functions can race us here. The winner
+ * will clear pending, the loser will not.
+ */
+ if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
+ (lockres->l_pending_gen != generation))
+ return;
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
+ lockres->l_pending_gen++;
+
+ /*
+ * The downconvert thread may have skipped us because we
+ * were PENDING. Wake it up.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(osb);
+}
+
+/* Locked version for callers of ocfs2_dlm_lock() */
+static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
+ unsigned int generation,
+ struct ocfs2_super *osb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ __lockres_clear_pending(lockres, generation, osb);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+}
+
+static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
+{
+ assert_spin_locked(&lockres->l_lock);
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
+
+ lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
+
+ return lockres->l_pending_gen;
+}
+
+static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
+{
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
+ struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
+ int needs_downconvert;
+ unsigned long flags;
+
+ BUG_ON(level <= DLM_LOCK_NL);
+
+ mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
+ "type %s\n", lockres->l_name, level, lockres->l_level,
+ ocfs2_lock_type_string(lockres->l_type));
+
+ /*
+ * We can skip the bast for locks which don't enable caching -
+ * they'll be dropped at the earliest possible time anyway.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
+ return;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
+ if (needs_downconvert)
+ ocfs2_schedule_blocked_lock(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+
+ ocfs2_wake_downconvert_thread(osb);
+}
+
+static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
+{
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
+ struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ status = ocfs2_dlm_lock_status(&lockres->l_lksb);
+
+ if (status == -EAGAIN) {
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ goto out;
+ }
+
+ if (status) {
+ mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
+ lockres->l_name, status);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ return;
+ }
+
+ mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
+ "level %d => %d\n", lockres->l_name, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
+
+ switch(lockres->l_action) {
+ case OCFS2_AST_ATTACH:
+ ocfs2_generic_handle_attach_action(lockres);
+ lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
+ break;
+ case OCFS2_AST_CONVERT:
+ ocfs2_generic_handle_convert_action(lockres);
+ break;
+ case OCFS2_AST_DOWNCONVERT:
+ ocfs2_generic_handle_downconvert_action(lockres);
+ break;
+ default:
+ mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
+ "flags 0x%lx, unlock: %u\n",
+ lockres->l_name, lockres->l_action, lockres->l_flags,
+ lockres->l_unlock_action);
+ BUG();
+ }
+out:
+ /* set it to something invalid so if we get called again we
+ * can catch it. */
+ lockres->l_action = OCFS2_AST_INVALID;
+
+ /* Did we try to cancel this lock? Clear that state */
+ if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+
+ /*
+ * We may have beaten the locking functions here. We certainly
+ * know that dlm_lock() has been called :-)
+ * Because we can't have two lock calls in flight at once, we
+ * can use lockres->l_pending_gen.
+ */
+ __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
+
+ wake_up(&lockres->l_event);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+}
+
+static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
+{
+ struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
+ unsigned long flags;
+
+ mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
+ lockres->l_name, lockres->l_unlock_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (error) {
+ mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
+ "unlock_action %d\n", error, lockres->l_name,
+ lockres->l_unlock_action);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ return;
+ }
+
+ switch(lockres->l_unlock_action) {
+ case OCFS2_UNLOCK_CANCEL_CONVERT:
+ mlog(0, "Cancel convert success for %s\n", lockres->l_name);
+ lockres->l_action = OCFS2_AST_INVALID;
+ /* Downconvert thread may have requeued this lock, we
+ * need to wake it. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
+ break;
+ case OCFS2_UNLOCK_DROP_LOCK:
+ lockres->l_level = DLM_LOCK_IV;
+ break;
+ default:
+ BUG();
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ wake_up(&lockres->l_event);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+}
+
+/*
+ * This is the filesystem locking protocol. It provides the lock handling
+ * hooks for the underlying DLM. It has a maximum version number.
+ * The version number allows interoperability with systems running at
+ * the same major number and an equal or smaller minor number.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed. The protocol is negotiated when joining
+ * the dlm domain. A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes. When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero. If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased. If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+static struct ocfs2_locking_protocol lproto = {
+ .lp_max_version = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+ },
+ .lp_lock_ast = ocfs2_locking_ast,
+ .lp_blocking_ast = ocfs2_blocking_ast,
+ .lp_unlock_ast = ocfs2_unlock_ast,
+};
+
+void ocfs2_set_locking_protocol(void)
+{
+ ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
+}
+
+static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
+ int convert)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+ if (convert)
+ lockres->l_action = OCFS2_AST_INVALID;
+ else
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+}
+
+/* Note: If we detect another process working on the lock (i.e.,
+ * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
+ * to do the right thing in that case.
+ */
+static int ocfs2_lock_create(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ u32 dlm_flags)
+{
+ int ret = 0;
+ unsigned long flags;
+ unsigned int gen;
+
+ mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
+ dlm_flags);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
+ (lockres->l_flags & OCFS2_LOCK_BUSY)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto bail;
+ }
+
+ lockres->l_action = OCFS2_AST_ATTACH;
+ lockres->l_requested = level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ gen = lockres_set_pending(lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ret = ocfs2_dlm_lock(osb->cconn,
+ level,
+ &lockres->l_lksb,
+ dlm_flags,
+ lockres->l_name,
+ OCFS2_LOCK_ID_MAX_LEN - 1);
+ lockres_clear_pending(lockres, gen, osb);
+ if (ret) {
+ ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ }
+
+ mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
+
+bail:
+ return ret;
+}
+
+static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
+ int flag)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ ret = lockres->l_flags & flag;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ return ret;
+}
+
+static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
+}
+
+static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
+}
+
+/* predict what lock level we'll be dropping down to on behalf
+ * of another node, and return true if the currently wanted
+ * level will be compatible with it. */
+static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
+ int wanted)
+{
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
+
+ return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
+}
+
+static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
+{
+ INIT_LIST_HEAD(&mw->mw_item);
+ init_completion(&mw->mw_complete);
+ ocfs2_init_start_time(mw);
+}
+
+static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
+{
+ wait_for_completion(&mw->mw_complete);
+ /* Re-arm the completion in case we want to wait on it again */
+ reinit_completion(&mw->mw_complete);
+ return mw->mw_status;
+}
+
+static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mask_waiter *mw,
+ unsigned long mask,
+ unsigned long goal)
+{
+ BUG_ON(!list_empty(&mw->mw_item));
+
+ assert_spin_locked(&lockres->l_lock);
+
+ list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
+ mw->mw_mask = mask;
+ mw->mw_goal = goal;
+ ocfs2_track_lock_wait(lockres);
+}
+
+/* returns 0 if the mw that was removed was already satisfied, -EBUSY
+ * if the mask still hadn't reached its goal */
+static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mask_waiter *mw)
+{
+ int ret = 0;
+
+ assert_spin_locked(&lockres->l_lock);
+ if (!list_empty(&mw->mw_item)) {
+ if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
+ ret = -EBUSY;
+
+ list_del_init(&mw->mw_item);
+ init_completion(&mw->mw_complete);
+ ocfs2_track_lock_wait(lockres);
+ }
+
+ return ret;
+}
+
+static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mask_waiter *mw)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ ret = __lockres_remove_mask_waiter(lockres, mw);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ return ret;
+
+}
+
+static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
+ struct ocfs2_lock_res *lockres)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible(&mw->mw_complete);
+ if (ret)
+ lockres_remove_mask_waiter(lockres, mw);
+ else
+ ret = mw->mw_status;
+ /* Re-arm the completion in case we want to wait on it again */
+ reinit_completion(&mw->mw_complete);
+ return ret;
+}
+
+static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ u32 lkm_flags,
+ int arg_flags,
+ int l_subclass,
+ unsigned long caller_ip)
+{
+ struct ocfs2_mask_waiter mw;
+ int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
+ int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
+ unsigned long flags;
+ unsigned int gen;
+ int noqueue_attempted = 0;
+ int dlm_locked = 0;
+ int kick_dc = 0;
+
+ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
+ mlog_errno(-EINVAL);
+ return -EINVAL;
+ }
+
+ ocfs2_init_mask_waiter(&mw);
+
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lkm_flags |= DLM_LKF_VALBLK;
+
+again:
+ wait = 0;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ if (catch_signals && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto unlock;
+ }
+
+ mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
+ "Cluster lock called on freeing lockres %s! flags "
+ "0x%lx\n", lockres->l_name, lockres->l_flags);
+
+ /* We only compare against the currently granted level
+ * here. If the lock is blocked waiting on a downconvert,
+ * we'll get caught below. */
+ if (lockres->l_flags & OCFS2_LOCK_BUSY &&
+ level > lockres->l_level) {
+ /* is someone sitting in dlm_lock? If so, wait on
+ * them. */
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ wait = 1;
+ goto unlock;
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
+ /*
+ * We've upconverted. If the lock now has a level we can
+ * work with, we take it. If, however, the lock is not at the
+ * required level, we go thru the full cycle. One way this could
+ * happen is if a process requesting an upconvert to PR is
+ * closely followed by another requesting upconvert to an EX.
+ * If the process requesting EX lands here, we want it to
+ * continue attempting to upconvert and let the process
+ * requesting PR take the lock.
+ * If multiple processes request upconvert to PR, the first one
+ * here will take the lock. The others will have to go thru the
+ * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
+ * downconvert request.
+ */
+ if (level <= lockres->l_level)
+ goto update_holders;
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
+ !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
+ /* is the lock is currently blocked on behalf of
+ * another node */
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
+ wait = 1;
+ goto unlock;
+ }
+
+ if (level > lockres->l_level) {
+ if (noqueue_attempted > 0) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+ if (lkm_flags & DLM_LKF_NOQUEUE)
+ noqueue_attempted = 1;
+
+ if (lockres->l_action != OCFS2_AST_INVALID)
+ mlog(ML_ERROR, "lockres %s has action %u pending\n",
+ lockres->l_name, lockres->l_action);
+
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ lockres->l_action = OCFS2_AST_ATTACH;
+ lkm_flags &= ~DLM_LKF_CONVERT;
+ } else {
+ lockres->l_action = OCFS2_AST_CONVERT;
+ lkm_flags |= DLM_LKF_CONVERT;
+ }
+
+ lockres->l_requested = level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ gen = lockres_set_pending(lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ BUG_ON(level == DLM_LOCK_IV);
+ BUG_ON(level == DLM_LOCK_NL);
+
+ mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
+ lockres->l_name, lockres->l_level, level);
+
+ /* call dlm_lock to upgrade lock now */
+ ret = ocfs2_dlm_lock(osb->cconn,
+ level,
+ &lockres->l_lksb,
+ lkm_flags,
+ lockres->l_name,
+ OCFS2_LOCK_ID_MAX_LEN - 1);
+ lockres_clear_pending(lockres, gen, osb);
+ if (ret) {
+ if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
+ (ret != -EAGAIN)) {
+ ocfs2_log_dlm_error("ocfs2_dlm_lock",
+ ret, lockres);
+ }
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ goto out;
+ }
+ dlm_locked = 1;
+
+ mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
+ lockres->l_name);
+
+ /* At this point we've gone inside the dlm and need to
+ * complete our work regardless. */
+ catch_signals = 0;
+
+ /* wait for busy to clear and carry on */
+ goto again;
+ }
+
+update_holders:
+ /* Ok, if we get here then we're good to go. */
+ ocfs2_inc_holders(lockres, level);
+
+ ret = 0;
+unlock:
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
+ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
+ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
+
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (kick_dc)
+ ocfs2_wake_downconvert_thread(osb);
+out:
+ /*
+ * This is helping work around a lock inversion between the page lock
+ * and dlm locks. One path holds the page lock while calling aops
+ * which block acquiring dlm locks. The voting thread holds dlm
+ * locks while acquiring page locks while down converting data locks.
+ * This block is helping an aop path notice the inversion and back
+ * off to unlock its page lock before trying the dlm lock again.
+ */
+ if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
+ mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
+ wait = 0;
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (__lockres_remove_mask_waiter(lockres, &mw)) {
+ if (dlm_locked)
+ lockres_or_flags(lockres,
+ OCFS2_LOCK_NONBLOCK_FINISHED);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = -EAGAIN;
+ } else {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto again;
+ }
+ }
+ if (wait) {
+ ret = ocfs2_wait_for_mask(&mw);
+ if (ret == 0)
+ goto again;
+ mlog_errno(ret);
+ }
+ ocfs2_update_lock_stats(lockres, level, &mw, ret);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (!ret && lockres->l_lockdep_map.key != NULL) {
+ if (level == DLM_LOCK_PR)
+ rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
+ !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
+ caller_ip);
+ else
+ rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
+ !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
+ caller_ip);
+ }
+#endif
+ return ret;
+}
+
+static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ u32 lkm_flags,
+ int arg_flags)
+{
+ return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
+ 0, _RET_IP_);
+}
+
+
+static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ unsigned long caller_ip)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ ocfs2_dec_holders(lockres, level);
+ ocfs2_downconvert_on_unlock(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ if (lockres->l_lockdep_map.key != NULL)
+ rwsem_release(&lockres->l_lockdep_map, caller_ip);
+#endif
+}
+
+static int ocfs2_create_new_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int ex,
+ int local)
+{
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ unsigned long flags;
+ u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
+ lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ return ocfs2_lock_create(osb, lockres, level, lkm_flags);
+}
+
+/* Grants us an EX lock on the data and metadata resources, skipping
+ * the normal cluster directory lookup. Use this ONLY on newly created
+ * inodes which other nodes can't possibly see, and which haven't been
+ * hashed in the inode hash yet. This can give us a good performance
+ * increase as it'll skip the network broadcast normally associated
+ * with creating a new lock resource. */
+int ocfs2_create_new_inode_locks(struct inode *inode)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ BUG_ON(!ocfs2_inode_is_new(inode));
+
+ mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ /* NOTE: That we don't increment any of the holder counts, nor
+ * do we add anything to a journal handle. Since this is
+ * supposed to be a new inode which the cluster doesn't know
+ * about yet, there is no need to. As far as the LVB handling
+ * is concerned, this is basically like acquiring an EX lock
+ * on a resource which has an invalid one -- we'll set it
+ * valid when we release the EX. */
+
+ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ /*
+ * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
+ * don't use a generation in their lock names.
+ */
+ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
+ if (ret)
+ mlog_errno(ret);
+
+bail:
+ return ret;
+}
+
+int ocfs2_rw_lock(struct inode *inode, int write)
+{
+ int status, level;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu take %s RW lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ lockres = &OCFS2_I(inode)->ip_rw_lockres;
+
+ level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+int ocfs2_try_rw_lock(struct inode *inode, int write)
+{
+ int status, level;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu try to take %s RW lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ lockres = &OCFS2_I(inode)->ip_rw_lockres;
+
+ level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
+ return status;
+}
+
+void ocfs2_rw_unlock(struct inode *inode, int write)
+{
+ int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu drop %s RW lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
+/*
+ * ocfs2_open_lock always get PR mode lock.
+ */
+int ocfs2_open_lock(struct inode *inode)
+{
+ int status = 0;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu take PRMODE open lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
+ goto out;
+
+ lockres = &OCFS2_I(inode)->ip_open_lockres;
+
+ status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_PR, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+out:
+ return status;
+}
+
+int ocfs2_try_open_lock(struct inode *inode, int write)
+{
+ int status = 0, level;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu try to take %s open lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (write)
+ status = -EROFS;
+ goto out;
+ }
+
+ if (ocfs2_mount_local(osb))
+ goto out;
+
+ lockres = &OCFS2_I(inode)->ip_open_lockres;
+
+ level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
+
+ /*
+ * The file system may already holding a PRMODE/EXMODE open lock.
+ * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
+ * other nodes and the -EAGAIN will indicate to the caller that
+ * this inode is still in use.
+ */
+ status = ocfs2_cluster_lock(osb, lockres, level, DLM_LKF_NOQUEUE, 0);
+
+out:
+ return status;
+}
+
+/*
+ * ocfs2_open_unlock unlock PR and EX mode open locks.
+ */
+void ocfs2_open_unlock(struct inode *inode)
+{
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu drop open lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+
+ if (ocfs2_mount_local(osb))
+ goto out;
+
+ if(lockres->l_ro_holders)
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_PR);
+ if(lockres->l_ex_holders)
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
+
+out:
+ return;
+}
+
+static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ int ret;
+ struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
+ unsigned long flags;
+ struct ocfs2_mask_waiter mw;
+
+ ocfs2_init_mask_waiter(&mw);
+
+retry_cancel:
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ ret = ocfs2_prepare_cancel_convert(osb, lockres);
+ if (ret) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = ocfs2_cancel_convert(osb, lockres);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ goto retry_cancel;
+ }
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ocfs2_wait_for_mask(&mw);
+ goto retry_cancel;
+ }
+
+ ret = -ERESTARTSYS;
+ /*
+ * We may still have gotten the lock, in which case there's no
+ * point to restarting the syscall.
+ */
+ if (lockres->l_level == level)
+ ret = 0;
+
+ mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
+ lockres->l_flags, lockres->l_level, lockres->l_action);
+
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+out:
+ return ret;
+}
+
+/*
+ * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
+ * flock() calls. The locking approach this requires is sufficiently
+ * different from all other cluster lock types that we implement a
+ * separate path to the "low-level" dlm calls. In particular:
+ *
+ * - No optimization of lock levels is done - we take at exactly
+ * what's been requested.
+ *
+ * - No lock caching is employed. We immediately downconvert to
+ * no-lock at unlock time. This also means flock locks never go on
+ * the blocking list).
+ *
+ * - Since userspace can trivially deadlock itself with flock, we make
+ * sure to allow cancellation of a misbehaving applications flock()
+ * request.
+ *
+ * - Access to any flock lockres doesn't require concurrency, so we
+ * can simplify the code by requiring the caller to guarantee
+ * serialization of dlmglue flock calls.
+ */
+int ocfs2_file_lock(struct file *file, int ex, int trylock)
+{
+ int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
+ unsigned long flags;
+ struct ocfs2_file_private *fp = file->private_data;
+ struct ocfs2_lock_res *lockres = &fp->fp_flock;
+ struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
+ struct ocfs2_mask_waiter mw;
+
+ ocfs2_init_mask_waiter(&mw);
+
+ if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
+ (lockres->l_level > DLM_LOCK_NL)) {
+ mlog(ML_ERROR,
+ "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
+ "level: %u\n", lockres->l_name, lockres->l_flags,
+ lockres->l_level);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ /*
+ * Get the lock at NLMODE to start - that way we
+ * can cancel the upconvert request if need be.
+ */
+ ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_wait_for_mask(&mw);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ }
+
+ lockres->l_action = OCFS2_AST_CONVERT;
+ lkm_flags |= DLM_LKF_CONVERT;
+ lockres->l_requested = level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
+ lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
+ if (ret) {
+ if (!trylock || (ret != -EAGAIN)) {
+ ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
+ ret = -EINVAL;
+ }
+
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ lockres_remove_mask_waiter(lockres, &mw);
+ goto out;
+ }
+
+ ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
+ if (ret == -ERESTARTSYS) {
+ /*
+ * Userspace can cause deadlock itself with
+ * flock(). Current behavior locally is to allow the
+ * deadlock, but abort the system call if a signal is
+ * received. We follow this example, otherwise a
+ * poorly written program could sit in kernel until
+ * reboot.
+ *
+ * Handling this is a bit more complicated for Ocfs2
+ * though. We can't exit this function with an
+ * outstanding lock request, so a cancel convert is
+ * required. We intentionally overwrite 'ret' - if the
+ * cancel fails and the lock was granted, it's easier
+ * to just bubble success back up to the user.
+ */
+ ret = ocfs2_flock_handle_signal(lockres, level);
+ } else if (!ret && (level > lockres->l_level)) {
+ /* Trylock failed asynchronously */
+ BUG_ON(!trylock);
+ ret = -EAGAIN;
+ }
+
+out:
+
+ mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
+ lockres->l_name, ex, trylock, ret);
+ return ret;
+}
+
+void ocfs2_file_unlock(struct file *file)
+{
+ int ret;
+ unsigned int gen;
+ unsigned long flags;
+ struct ocfs2_file_private *fp = file->private_data;
+ struct ocfs2_lock_res *lockres = &fp->fp_flock;
+ struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
+ struct ocfs2_mask_waiter mw;
+
+ ocfs2_init_mask_waiter(&mw);
+
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
+ return;
+
+ if (lockres->l_level == DLM_LOCK_NL)
+ return;
+
+ mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
+ lockres->l_name, lockres->l_flags, lockres->l_level,
+ lockres->l_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ /*
+ * Fake a blocking ast for the downconvert code.
+ */
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+ lockres->l_blocking = DLM_LOCK_EX;
+
+ gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
+ if (ret) {
+ mlog_errno(ret);
+ return;
+ }
+
+ ret = ocfs2_wait_for_mask(&mw);
+ if (ret)
+ mlog_errno(ret);
+}
+
+static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int kick = 0;
+
+ /* If we know that another node is waiting on our lock, kick
+ * the downconvert thread * pre-emptively when we reach a release
+ * condition. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
+ switch(lockres->l_blocking) {
+ case DLM_LOCK_EX:
+ if (!lockres->l_ex_holders && !lockres->l_ro_holders)
+ kick = 1;
+ break;
+ case DLM_LOCK_PR:
+ if (!lockres->l_ex_holders)
+ kick = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ if (kick)
+ ocfs2_wake_downconvert_thread(osb);
+}
+
+#define OCFS2_SEC_BITS 34
+#define OCFS2_SEC_SHIFT (64 - OCFS2_SEC_BITS)
+#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
+
+/* LVB only has room for 64 bits of time here so we pack it for
+ * now. */
+static u64 ocfs2_pack_timespec(struct timespec64 *spec)
+{
+ u64 res;
+ u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull);
+ u32 nsec = spec->tv_nsec;
+
+ res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
+
+ return res;
+}
+
+/* Call this with the lockres locked. I am reasonably sure we don't
+ * need ip_lock in this function as anyone who would be changing those
+ * values is supposed to be blocked in ocfs2_inode_lock right now. */
+static void __ocfs2_stuff_meta_lvb(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
+ struct ocfs2_meta_lvb *lvb;
+ struct timespec64 ctime = inode_get_ctime(inode);
+
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+
+ /*
+ * Invalidate the LVB of a deleted inode - this way other
+ * nodes are forced to go to disk and discover the new inode
+ * status.
+ */
+ if (oi->ip_flags & OCFS2_INODE_DELETED) {
+ lvb->lvb_version = 0;
+ goto out;
+ }
+
+ lvb->lvb_version = OCFS2_LVB_VERSION;
+ lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
+ lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
+ lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
+ lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
+ lvb->lvb_imode = cpu_to_be16(inode->i_mode);
+ lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
+ lvb->lvb_iatime_packed =
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
+ lvb->lvb_ictime_packed =
+ cpu_to_be64(ocfs2_pack_timespec(&ctime));
+ lvb->lvb_imtime_packed =
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
+ lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
+ lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
+ lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
+
+out:
+ mlog_meta_lvb(0, lockres);
+}
+
+static void ocfs2_unpack_timespec(struct timespec64 *spec,
+ u64 packed_time)
+{
+ spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
+ spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
+}
+
+static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
+ struct ocfs2_meta_lvb *lvb;
+ struct timespec64 ctime;
+
+ mlog_meta_lvb(0, lockres);
+
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ if (inode_wrong_type(inode, be16_to_cpu(lvb->lvb_imode)))
+ return -ESTALE;
+
+ /* We're safe here without the lockres lock... */
+ spin_lock(&oi->ip_lock);
+ oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
+ i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
+
+ oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
+ oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
+ ocfs2_set_inode_flags(inode);
+
+ /* fast-symlinks are a special case */
+ if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
+ inode->i_blocks = 0;
+ else
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+
+ i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
+ i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
+ inode->i_mode = be16_to_cpu(lvb->lvb_imode);
+ set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
+ ocfs2_unpack_timespec(&inode->i_atime,
+ be64_to_cpu(lvb->lvb_iatime_packed));
+ ocfs2_unpack_timespec(&inode->i_mtime,
+ be64_to_cpu(lvb->lvb_imtime_packed));
+ ocfs2_unpack_timespec(&ctime,
+ be64_to_cpu(lvb->lvb_ictime_packed));
+ inode_set_ctime_to_ts(inode, ctime);
+ spin_unlock(&oi->ip_lock);
+ return 0;
+}
+
+static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
+ struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
+ && lvb->lvb_version == OCFS2_LVB_VERSION
+ && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
+ return 1;
+ return 0;
+}
+
+/* Determine whether a lock resource needs to be refreshed, and
+ * arbitrate who gets to refresh it.
+ *
+ * 0 means no refresh needed.
+ *
+ * > 0 means you need to refresh this and you MUST call
+ * ocfs2_complete_lock_res_refresh afterwards. */
+static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
+{
+ unsigned long flags;
+ int status = 0;
+
+refresh_check:
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto bail;
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ocfs2_wait_on_refreshing_lock(lockres);
+ goto refresh_check;
+ }
+
+ /* Ok, I'll be the one to refresh this lock. */
+ lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ status = 1;
+bail:
+ mlog(0, "status %d\n", status);
+ return status;
+}
+
+/* If status is non zero, I'll mark it as not being in refresh
+ * anymroe, but i won't clear the needs refresh flag. */
+static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
+ int status)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
+ if (!status)
+ lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+}
+
+/* may or may not return a bh if it went to disk. */
+static int ocfs2_inode_lock_update(struct inode *inode,
+ struct buffer_head **bh)
+{
+ int status = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (ocfs2_mount_local(osb))
+ goto bail;
+
+ spin_lock(&oi->ip_lock);
+ if (oi->ip_flags & OCFS2_INODE_DELETED) {
+ mlog(0, "Orphaned inode %llu was deleted while we "
+ "were waiting on a lock. ip_flags = 0x%x\n",
+ (unsigned long long)oi->ip_blkno, oi->ip_flags);
+ spin_unlock(&oi->ip_lock);
+ status = -ENOENT;
+ goto bail;
+ }
+ spin_unlock(&oi->ip_lock);
+
+ if (!ocfs2_should_refresh_lock_res(lockres))
+ goto bail;
+
+ /* This will discard any caching information we might have had
+ * for the inode metadata. */
+ ocfs2_metadata_cache_purge(INODE_CACHE(inode));
+
+ ocfs2_extent_map_trunc(inode, 0);
+
+ if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
+ mlog(0, "Trusting LVB on inode %llu\n",
+ (unsigned long long)oi->ip_blkno);
+ status = ocfs2_refresh_inode_from_lvb(inode);
+ goto bail_refresh;
+ } else {
+ /* Boo, we have to go to disk. */
+ /* read bh, cast, ocfs2_refresh_inode */
+ status = ocfs2_read_inode_block(inode, bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_refresh;
+ }
+ fe = (struct ocfs2_dinode *) (*bh)->b_data;
+ if (inode_wrong_type(inode, le16_to_cpu(fe->i_mode))) {
+ status = -ESTALE;
+ goto bail_refresh;
+ }
+
+ /* This is a good chance to make sure we're not
+ * locking an invalid object. ocfs2_read_inode_block()
+ * already checked that the inode block is sane.
+ *
+ * We bug on a stale inode here because we checked
+ * above whether it was wiped from disk. The wiping
+ * node provides a guarantee that we receive that
+ * message and can mark the inode before dropping any
+ * locks associated with it. */
+ mlog_bug_on_msg(inode->i_generation !=
+ le32_to_cpu(fe->i_generation),
+ "Invalid dinode %llu disk generation: %u "
+ "inode->i_generation: %u\n",
+ (unsigned long long)oi->ip_blkno,
+ le32_to_cpu(fe->i_generation),
+ inode->i_generation);
+ mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
+ !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
+ "Stale dinode %llu dtime: %llu flags: 0x%x\n",
+ (unsigned long long)oi->ip_blkno,
+ (unsigned long long)le64_to_cpu(fe->i_dtime),
+ le32_to_cpu(fe->i_flags));
+
+ ocfs2_refresh_inode(inode, fe);
+ ocfs2_track_lock_refresh(lockres);
+ }
+
+ status = 0;
+bail_refresh:
+ ocfs2_complete_lock_res_refresh(lockres, status);
+bail:
+ return status;
+}
+
+static int ocfs2_assign_bh(struct inode *inode,
+ struct buffer_head **ret_bh,
+ struct buffer_head *passed_bh)
+{
+ int status;
+
+ if (passed_bh) {
+ /* Ok, the update went to disk for us, use the
+ * returned bh. */
+ *ret_bh = passed_bh;
+ get_bh(*ret_bh);
+
+ return 0;
+ }
+
+ status = ocfs2_read_inode_block(inode, ret_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+/*
+ * returns < 0 error if the callback will never be called, otherwise
+ * the result of the lock will be communicated via the callback.
+ */
+int ocfs2_inode_lock_full_nested(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ int arg_flags,
+ int subclass)
+{
+ int status, level, acquired;
+ u32 dlm_flags;
+ struct ocfs2_lock_res *lockres = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *local_bh = NULL;
+
+ mlog(0, "inode %llu, take %s META lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ ex ? "EXMODE" : "PRMODE");
+
+ status = 0;
+ acquired = 0;
+ /* We'll allow faking a readonly metadata lock for
+ * rodevices. */
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ status = -EROFS;
+ goto getbh;
+ }
+
+ if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
+ ocfs2_mount_local(osb))
+ goto update;
+
+ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ ocfs2_wait_for_recovery(osb);
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ dlm_flags = 0;
+ if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
+ dlm_flags |= DLM_LKF_NOQUEUE;
+
+ status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
+ arg_flags, subclass, _RET_IP_);
+ if (status < 0) {
+ if (status != -EAGAIN)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Notify the error cleanup path to drop the cluster lock. */
+ acquired = 1;
+
+ /* We wait twice because a node may have died while we were in
+ * the lower dlm layers. The second time though, we've
+ * committed to owning this lock so we don't allow signals to
+ * abort the operation. */
+ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ ocfs2_wait_for_recovery(osb);
+
+update:
+ /*
+ * We only see this flag if we're being called from
+ * ocfs2_read_locked_inode(). It means we're locking an inode
+ * which hasn't been populated yet, so clear the refresh flag
+ * and let the caller handle it.
+ */
+ if (inode->i_state & I_NEW) {
+ status = 0;
+ if (lockres)
+ ocfs2_complete_lock_res_refresh(lockres, 0);
+ goto bail;
+ }
+
+ /* This is fun. The caller may want a bh back, or it may
+ * not. ocfs2_inode_lock_update definitely wants one in, but
+ * may or may not read one, depending on what's in the
+ * LVB. The result of all of this is that we've *only* gone to
+ * disk if we have to, so the complexity is worthwhile. */
+ status = ocfs2_inode_lock_update(inode, &local_bh);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+getbh:
+ if (ret_bh) {
+ status = ocfs2_assign_bh(inode, ret_bh, local_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+bail:
+ if (status < 0) {
+ if (ret_bh && (*ret_bh)) {
+ brelse(*ret_bh);
+ *ret_bh = NULL;
+ }
+ if (acquired)
+ ocfs2_inode_unlock(inode, ex);
+ }
+
+ brelse(local_bh);
+ return status;
+}
+
+/*
+ * This is working around a lock inversion between tasks acquiring DLM
+ * locks while holding a page lock and the downconvert thread which
+ * blocks dlm lock acquiry while acquiring page locks.
+ *
+ * ** These _with_page variantes are only intended to be called from aop
+ * methods that hold page locks and return a very specific *positive* error
+ * code that aop methods pass up to the VFS -- test for errors with != 0. **
+ *
+ * The DLM is called such that it returns -EAGAIN if it would have
+ * blocked waiting for the downconvert thread. In that case we unlock
+ * our page so the downconvert thread can make progress. Once we've
+ * done this we have to return AOP_TRUNCATED_PAGE so the aop method
+ * that called us can bubble that back up into the VFS who will then
+ * immediately retry the aop call.
+ */
+int ocfs2_inode_lock_with_page(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct page *page)
+{
+ int ret;
+
+ ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
+ if (ret == -EAGAIN) {
+ unlock_page(page);
+ /*
+ * If we can't get inode lock immediately, we should not return
+ * directly here, since this will lead to a softlockup problem.
+ * The method is to get a blocking lock and immediately unlock
+ * before returning, this can avoid CPU resource waste due to
+ * lots of retries, and benefits fairness in getting lock.
+ */
+ if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
+ ocfs2_inode_unlock(inode, ex);
+ ret = AOP_TRUNCATED_PAGE;
+ }
+
+ return ret;
+}
+
+int ocfs2_inode_lock_atime(struct inode *inode,
+ struct vfsmount *vfsmnt,
+ int *level, int wait)
+{
+ int ret;
+
+ if (wait)
+ ret = ocfs2_inode_lock(inode, NULL, 0);
+ else
+ ret = ocfs2_try_inode_lock(inode, NULL, 0);
+
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /*
+ * If we should update atime, we will get EX lock,
+ * otherwise we just get PR lock.
+ */
+ if (ocfs2_should_update_atime(inode, vfsmnt)) {
+ struct buffer_head *bh = NULL;
+
+ ocfs2_inode_unlock(inode, 0);
+ if (wait)
+ ret = ocfs2_inode_lock(inode, &bh, 1);
+ else
+ ret = ocfs2_try_inode_lock(inode, &bh, 1);
+
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlog_errno(ret);
+ return ret;
+ }
+ *level = 1;
+ if (ocfs2_should_update_atime(inode, vfsmnt))
+ ocfs2_update_inode_atime(inode, bh);
+ brelse(bh);
+ } else
+ *level = 0;
+
+ return ret;
+}
+
+void ocfs2_inode_unlock(struct inode *inode,
+ int ex)
+{
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "inode %llu drop %s META lock\n",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno,
+ ex ? "EXMODE" : "PRMODE");
+
+ if (!ocfs2_is_hard_readonly(osb) &&
+ !ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
+/*
+ * This _tracker variantes are introduced to deal with the recursive cluster
+ * locking issue. The idea is to keep track of a lock holder on the stack of
+ * the current process. If there's a lock holder on the stack, we know the
+ * task context is already protected by cluster locking. Currently, they're
+ * used in some VFS entry routines.
+ *
+ * return < 0 on error, return == 0 if there's no lock holder on the stack
+ * before this call, return == 1 if this call would be a recursive locking.
+ * return == -1 if this lock attempt will cause an upgrade which is forbidden.
+ *
+ * When taking lock levels into account,we face some different situations.
+ *
+ * 1. no lock is held
+ * In this case, just lock the inode as requested and return 0
+ *
+ * 2. We are holding a lock
+ * For this situation, things diverges into several cases
+ *
+ * wanted holding what to do
+ * ex ex see 2.1 below
+ * ex pr see 2.2 below
+ * pr ex see 2.1 below
+ * pr pr see 2.1 below
+ *
+ * 2.1 lock level that is been held is compatible
+ * with the wanted level, so no lock action will be tacken.
+ *
+ * 2.2 Otherwise, an upgrade is needed, but it is forbidden.
+ *
+ * Reason why upgrade within a process is forbidden is that
+ * lock upgrade may cause dead lock. The following illustrates
+ * how it happens.
+ *
+ * thread on node1 thread on node2
+ * ocfs2_inode_lock_tracker(ex=0)
+ *
+ * <====== ocfs2_inode_lock_tracker(ex=1)
+ *
+ * ocfs2_inode_lock_tracker(ex=1)
+ */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh)
+{
+ int status = 0;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_lock_holder *tmp_oh;
+ struct pid *pid = task_pid(current);
+
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ tmp_oh = ocfs2_pid_holder(lockres, pid);
+
+ if (!tmp_oh) {
+ /*
+ * This corresponds to the case 1.
+ * We haven't got any lock before.
+ */
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex, 0);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+
+ oh->oh_ex = ex;
+ ocfs2_add_holder(lockres, oh);
+ return 0;
+ }
+
+ if (unlikely(ex && !tmp_oh->oh_ex)) {
+ /*
+ * case 2.2 upgrade may cause dead lock, forbid it.
+ */
+ mlog(ML_ERROR, "Recursive locking is not permitted to "
+ "upgrade to EX level from PR level.\n");
+ dump_stack();
+ return -EINVAL;
+ }
+
+ /*
+ * case 2.1 OCFS2_META_LOCK_GETBH flag make ocfs2_inode_lock_full.
+ * ignore the lock level and just update it.
+ */
+ if (ret_bh) {
+ status = ocfs2_inode_lock_full(inode, ret_bh, ex,
+ OCFS2_META_LOCK_GETBH);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ return status;
+ }
+ }
+ return 1;
+}
+
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock)
+{
+ struct ocfs2_lock_res *lockres;
+
+ lockres = &OCFS2_I(inode)->ip_inode_lockres;
+ /* had_lock means that the currect process already takes the cluster
+ * lock previously.
+ * If had_lock is 1, we have nothing to do here.
+ * If had_lock is 0, we will release the lock.
+ */
+ if (!had_lock) {
+ ocfs2_inode_unlock(inode, oh->oh_ex);
+ ocfs2_remove_holder(lockres, oh);
+ }
+}
+
+int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
+{
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_orphan_scan_lvb *lvb;
+ int status = 0;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ lockres = &osb->osb_orphan_scan.os_lockres;
+ status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
+ if (status < 0)
+ return status;
+
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
+ lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
+ *seqno = be32_to_cpu(lvb->lvb_os_seqno);
+ else
+ *seqno = osb->osb_orphan_scan.os_seqno + 1;
+
+ return status;
+}
+
+void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
+{
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_orphan_scan_lvb *lvb;
+
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
+ lockres = &osb->osb_orphan_scan.os_lockres;
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
+ lvb->lvb_os_seqno = cpu_to_be32(seqno);
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
+ }
+}
+
+int ocfs2_super_lock(struct ocfs2_super *osb,
+ int ex)
+{
+ int status = 0;
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ goto bail;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* The super block lock path is really in the best position to
+ * know when resources covered by the lock need to be
+ * refreshed, so we do it here. Of course, making sense of
+ * everything is up to the caller :) */
+ status = ocfs2_should_refresh_lock_res(lockres);
+ if (status) {
+ status = ocfs2_refresh_slot_info(osb);
+
+ ocfs2_complete_lock_res_refresh(lockres, status);
+
+ if (status < 0) {
+ ocfs2_cluster_unlock(osb, lockres, level);
+ mlog_errno(status);
+ }
+ ocfs2_track_lock_refresh(lockres);
+ }
+bail:
+ return status;
+}
+
+void ocfs2_super_unlock(struct ocfs2_super *osb,
+ int ex)
+{
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
+
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
+int ocfs2_rename_lock(struct ocfs2_super *osb)
+{
+ int status;
+ struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+void ocfs2_rename_unlock(struct ocfs2_super *osb)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
+
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
+}
+
+int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
+{
+ int status;
+ struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ex)
+ down_write(&osb->nfs_sync_rwlock);
+ else
+ down_read(&osb->nfs_sync_rwlock);
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
+ 0, 0);
+ if (status < 0) {
+ mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
+
+ if (ex)
+ up_write(&osb->nfs_sync_rwlock);
+ else
+ up_read(&osb->nfs_sync_rwlock);
+ }
+
+ return status;
+}
+
+void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
+
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres,
+ ex ? LKM_EXMODE : LKM_PRMODE);
+ if (ex)
+ up_write(&osb->nfs_sync_rwlock);
+ else
+ up_read(&osb->nfs_sync_rwlock);
+}
+
+int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info, int trylock)
+{
+ int status;
+ struct ocfs2_trim_fs_lvb *lvb;
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ if (info)
+ info->tf_valid = 0;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX,
+ trylock ? DLM_LKF_NOQUEUE : 0, 0);
+ if (status < 0) {
+ if (status != -EAGAIN)
+ mlog_errno(status);
+ return status;
+ }
+
+ if (info) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
+ lvb->lvb_version == OCFS2_TRIMFS_LVB_VERSION) {
+ info->tf_valid = 1;
+ info->tf_success = lvb->lvb_success;
+ info->tf_nodenum = be32_to_cpu(lvb->lvb_nodenum);
+ info->tf_start = be64_to_cpu(lvb->lvb_start);
+ info->tf_len = be64_to_cpu(lvb->lvb_len);
+ info->tf_minlen = be64_to_cpu(lvb->lvb_minlen);
+ info->tf_trimlen = be64_to_cpu(lvb->lvb_trimlen);
+ }
+ }
+
+ return status;
+}
+
+void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info)
+{
+ struct ocfs2_trim_fs_lvb *lvb;
+ struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
+
+ if (ocfs2_mount_local(osb))
+ return;
+
+ if (info) {
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ lvb->lvb_version = OCFS2_TRIMFS_LVB_VERSION;
+ lvb->lvb_success = info->tf_success;
+ lvb->lvb_nodenum = cpu_to_be32(info->tf_nodenum);
+ lvb->lvb_start = cpu_to_be64(info->tf_start);
+ lvb->lvb_len = cpu_to_be64(info->tf_len);
+ lvb->lvb_minlen = cpu_to_be64(info->tf_minlen);
+ lvb->lvb_trimlen = cpu_to_be64(info->tf_trimlen);
+ }
+
+ ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
+}
+
+int ocfs2_dentry_lock(struct dentry *dentry, int ex)
+{
+ int ret;
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
+
+ BUG_ON(!dl);
+
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ return -EROFS;
+ return 0;
+ }
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ return ret;
+}
+
+void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
+{
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
+ struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
+
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
+}
+
+/* Reference counting of the dlm debug structure. We want this because
+ * open references on the debug inodes can live on after a mount, so
+ * we can't rely on the ocfs2_super to always exist. */
+static void ocfs2_dlm_debug_free(struct kref *kref)
+{
+ struct ocfs2_dlm_debug *dlm_debug;
+
+ dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
+
+ kfree(dlm_debug);
+}
+
+void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
+{
+ if (dlm_debug)
+ kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
+}
+
+static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
+{
+ kref_get(&debug->d_refcnt);
+}
+
+struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
+{
+ struct ocfs2_dlm_debug *dlm_debug;
+
+ dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
+ if (!dlm_debug) {
+ mlog_errno(-ENOMEM);
+ goto out;
+ }
+
+ kref_init(&dlm_debug->d_refcnt);
+ INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
+ dlm_debug->d_filter_secs = 0;
+out:
+ return dlm_debug;
+}
+
+/* Access to this is arbitrated for us via seq_file->sem. */
+struct ocfs2_dlm_seq_priv {
+ struct ocfs2_dlm_debug *p_dlm_debug;
+ struct ocfs2_lock_res p_iter_res;
+ struct ocfs2_lock_res p_tmp_res;
+};
+
+static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
+ struct ocfs2_dlm_seq_priv *priv)
+{
+ struct ocfs2_lock_res *iter, *ret = NULL;
+ struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
+
+ assert_spin_locked(&ocfs2_dlm_tracking_lock);
+
+ list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
+ /* discover the head of the list */
+ if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
+ mlog(0, "End of list found, %p\n", ret);
+ break;
+ }
+
+ /* We track our "dummy" iteration lockres' by a NULL
+ * l_ops field. */
+ if (iter->l_ops != NULL) {
+ ret = iter;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct ocfs2_dlm_seq_priv *priv = m->private;
+ struct ocfs2_lock_res *iter;
+
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
+ if (iter) {
+ /* Since lockres' have the lifetime of their container
+ * (which can be inodes, ocfs2_supers, etc) we want to
+ * copy this out to a temporary lockres while still
+ * under the spinlock. Obviously after this we can't
+ * trust any pointers on the copy returned, but that's
+ * ok as the information we want isn't typically held
+ * in them. */
+ priv->p_tmp_res = *iter;
+ iter = &priv->p_tmp_res;
+ }
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+
+ return iter;
+}
+
+static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ocfs2_dlm_seq_priv *priv = m->private;
+ struct ocfs2_lock_res *iter = v;
+ struct ocfs2_lock_res *dummy = &priv->p_iter_res;
+
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ iter = ocfs2_dlm_next_res(iter, priv);
+ list_del_init(&dummy->l_debug_list);
+ if (iter) {
+ list_add(&dummy->l_debug_list, &iter->l_debug_list);
+ priv->p_tmp_res = *iter;
+ iter = &priv->p_tmp_res;
+ }
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+
+ return iter;
+}
+
+/*
+ * Version is used by debugfs.ocfs2 to determine the format being used
+ *
+ * New in version 2
+ * - Lock stats printed
+ * New in version 3
+ * - Max time in lock stats is in usecs (instead of nsecs)
+ * New in version 4
+ * - Add last pr/ex unlock times and first lock wait time in usecs
+ */
+#define OCFS2_DLM_DEBUG_STR_VERSION 4
+static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
+{
+ int i;
+ char *lvb;
+ struct ocfs2_lock_res *lockres = v;
+#ifdef CONFIG_OCFS2_FS_STATS
+ u64 now, last;
+ struct ocfs2_dlm_debug *dlm_debug =
+ ((struct ocfs2_dlm_seq_priv *)m->private)->p_dlm_debug;
+#endif
+
+ if (!lockres)
+ return -EINVAL;
+
+#ifdef CONFIG_OCFS2_FS_STATS
+ if (!lockres->l_lock_wait && dlm_debug->d_filter_secs) {
+ now = ktime_to_us(ktime_get_real());
+ if (lockres->l_lock_prmode.ls_last >
+ lockres->l_lock_exmode.ls_last)
+ last = lockres->l_lock_prmode.ls_last;
+ else
+ last = lockres->l_lock_exmode.ls_last;
+ /*
+ * Use d_filter_secs field to filter lock resources dump,
+ * the default d_filter_secs(0) value filters nothing,
+ * otherwise, only dump the last N seconds active lock
+ * resources.
+ */
+ if (div_u64(now - last, 1000000) > dlm_debug->d_filter_secs)
+ return 0;
+ }
+#endif
+
+ seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
+
+ if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
+ seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
+ lockres->l_name,
+ (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
+ else
+ seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
+
+ seq_printf(m, "%d\t"
+ "0x%lx\t"
+ "0x%x\t"
+ "0x%x\t"
+ "%u\t"
+ "%u\t"
+ "%d\t"
+ "%d\t",
+ lockres->l_level,
+ lockres->l_flags,
+ lockres->l_action,
+ lockres->l_unlock_action,
+ lockres->l_ro_holders,
+ lockres->l_ex_holders,
+ lockres->l_requested,
+ lockres->l_blocking);
+
+ /* Dump the raw LVB */
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ for(i = 0; i < DLM_LVB_LEN; i++)
+ seq_printf(m, "0x%x\t", lvb[i]);
+
+#ifdef CONFIG_OCFS2_FS_STATS
+# define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
+# define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
+# define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
+# define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
+# define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
+# define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
+# define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
+# define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
+# define lock_refresh(_l) ((_l)->l_lock_refresh)
+# define lock_last_prmode(_l) ((_l)->l_lock_prmode.ls_last)
+# define lock_last_exmode(_l) ((_l)->l_lock_exmode.ls_last)
+# define lock_wait(_l) ((_l)->l_lock_wait)
+#else
+# define lock_num_prmode(_l) (0)
+# define lock_num_exmode(_l) (0)
+# define lock_num_prmode_failed(_l) (0)
+# define lock_num_exmode_failed(_l) (0)
+# define lock_total_prmode(_l) (0ULL)
+# define lock_total_exmode(_l) (0ULL)
+# define lock_max_prmode(_l) (0)
+# define lock_max_exmode(_l) (0)
+# define lock_refresh(_l) (0)
+# define lock_last_prmode(_l) (0ULL)
+# define lock_last_exmode(_l) (0ULL)
+# define lock_wait(_l) (0ULL)
+#endif
+ /* The following seq_print was added in version 2 of this output */
+ seq_printf(m, "%u\t"
+ "%u\t"
+ "%u\t"
+ "%u\t"
+ "%llu\t"
+ "%llu\t"
+ "%u\t"
+ "%u\t"
+ "%u\t"
+ "%llu\t"
+ "%llu\t"
+ "%llu\t",
+ lock_num_prmode(lockres),
+ lock_num_exmode(lockres),
+ lock_num_prmode_failed(lockres),
+ lock_num_exmode_failed(lockres),
+ lock_total_prmode(lockres),
+ lock_total_exmode(lockres),
+ lock_max_prmode(lockres),
+ lock_max_exmode(lockres),
+ lock_refresh(lockres),
+ lock_last_prmode(lockres),
+ lock_last_exmode(lockres),
+ lock_wait(lockres));
+
+ /* End the line */
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static const struct seq_operations ocfs2_dlm_seq_ops = {
+ .start = ocfs2_dlm_seq_start,
+ .stop = ocfs2_dlm_seq_stop,
+ .next = ocfs2_dlm_seq_next,
+ .show = ocfs2_dlm_seq_show,
+};
+
+static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct ocfs2_dlm_seq_priv *priv = seq->private;
+ struct ocfs2_lock_res *res = &priv->p_iter_res;
+
+ ocfs2_remove_lockres_tracking(res);
+ ocfs2_put_dlm_debug(priv->p_dlm_debug);
+ return seq_release_private(inode, file);
+}
+
+static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
+{
+ struct ocfs2_dlm_seq_priv *priv;
+ struct ocfs2_super *osb;
+
+ priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
+ if (!priv) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ osb = inode->i_private;
+ ocfs2_get_dlm_debug(osb->osb_dlm_debug);
+ priv->p_dlm_debug = osb->osb_dlm_debug;
+ INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
+
+ ocfs2_add_lockres_tracking(&priv->p_iter_res,
+ priv->p_dlm_debug);
+
+ return 0;
+}
+
+static const struct file_operations ocfs2_dlm_debug_fops = {
+ .open = ocfs2_dlm_debug_open,
+ .release = ocfs2_dlm_debug_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void ocfs2_dlm_init_debug(struct ocfs2_super *osb)
+{
+ struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
+
+ debugfs_create_file("locking_state", S_IFREG|S_IRUSR,
+ osb->osb_debug_root, osb, &ocfs2_dlm_debug_fops);
+
+ debugfs_create_u32("locking_filter", 0600, osb->osb_debug_root,
+ &dlm_debug->d_filter_secs);
+ ocfs2_get_dlm_debug(dlm_debug);
+}
+
+static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
+{
+ struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
+
+ if (dlm_debug)
+ ocfs2_put_dlm_debug(dlm_debug);
+}
+
+int ocfs2_dlm_init(struct ocfs2_super *osb)
+{
+ int status = 0;
+ struct ocfs2_cluster_connection *conn = NULL;
+
+ if (ocfs2_mount_local(osb)) {
+ osb->node_num = 0;
+ goto local;
+ }
+
+ ocfs2_dlm_init_debug(osb);
+
+ /* launch downconvert thread */
+ osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc-%s",
+ osb->uuid_str);
+ if (IS_ERR(osb->dc_task)) {
+ status = PTR_ERR(osb->dc_task);
+ osb->dc_task = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* for now, uuid == domain */
+ status = ocfs2_cluster_connect(osb->osb_cluster_stack,
+ osb->osb_cluster_name,
+ strlen(osb->osb_cluster_name),
+ osb->uuid_str,
+ strlen(osb->uuid_str),
+ &lproto, ocfs2_do_node_down, osb,
+ &conn);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_cluster_this_node(conn, &osb->node_num);
+ if (status < 0) {
+ mlog_errno(status);
+ mlog(ML_ERROR,
+ "could not find this host's node number\n");
+ ocfs2_cluster_disconnect(conn, 0);
+ goto bail;
+ }
+
+local:
+ ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
+ ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
+ ocfs2_nfs_sync_lock_init(osb);
+ ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
+
+ osb->cconn = conn;
+bail:
+ if (status < 0) {
+ ocfs2_dlm_shutdown_debug(osb);
+ if (osb->dc_task)
+ kthread_stop(osb->dc_task);
+ }
+
+ return status;
+}
+
+void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
+ int hangup_pending)
+{
+ ocfs2_drop_osb_locks(osb);
+
+ /*
+ * Now that we have dropped all locks and ocfs2_dismount_volume()
+ * has disabled recovery, the DLM won't be talking to us. It's
+ * safe to tear things down before disconnecting the cluster.
+ */
+
+ if (osb->dc_task) {
+ kthread_stop(osb->dc_task);
+ osb->dc_task = NULL;
+ }
+
+ ocfs2_lock_res_free(&osb->osb_super_lockres);
+ ocfs2_lock_res_free(&osb->osb_rename_lockres);
+ ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
+ ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
+
+ if (osb->cconn) {
+ ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
+ osb->cconn = NULL;
+
+ ocfs2_dlm_shutdown_debug(osb);
+ }
+}
+
+static int ocfs2_drop_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int ret;
+ unsigned long flags;
+ u32 lkm_flags = 0;
+
+ /* We didn't get anywhere near actually using this lockres. */
+ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
+ goto out;
+
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lkm_flags |= DLM_LKF_VALBLK;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
+ "lockres %s, flags 0x%lx\n",
+ lockres->l_name, lockres->l_flags);
+
+ while (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
+ "%u, unlock_action = %u\n",
+ lockres->l_name, lockres->l_flags, lockres->l_action,
+ lockres->l_unlock_action);
+
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ /* XXX: Today we just wait on any busy
+ * locks... Perhaps we need to cancel converts in the
+ * future? */
+ ocfs2_wait_on_busy_lock(lockres);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ }
+
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
+ if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
+ lockres->l_level == DLM_LOCK_EX &&
+ !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
+ lockres->l_ops->set_lvb(lockres);
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_BUSY)
+ mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
+ lockres->l_name);
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
+
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto out;
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
+
+ /* make sure we never get here while waiting for an ast to
+ * fire. */
+ BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
+
+ /* is this necessary? */
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog(0, "lock %s\n", lockres->l_name);
+
+ ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
+ if (ret) {
+ ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
+ mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
+ ocfs2_dlm_dump_lksb(&lockres->l_lksb);
+ BUG();
+ }
+ mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
+ lockres->l_name);
+
+ ocfs2_wait_on_busy_lock(lockres);
+out:
+ return 0;
+}
+
+static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+
+/* Mark the lockres as being dropped. It will no longer be
+ * queued if blocking, but we still may have to wait on it
+ * being dequeued from the downconvert thread before we can consider
+ * it safe to drop.
+ *
+ * You can *not* attempt to call cluster_lock on this lockres anymore. */
+void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int status;
+ struct ocfs2_mask_waiter mw;
+ unsigned long flags, flags2;
+
+ ocfs2_init_mask_waiter(&mw);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ lockres->l_flags |= OCFS2_LOCK_FREEING;
+ if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
+ /*
+ * We know the downconvert is queued but not in progress
+ * because we are the downconvert thread and processing
+ * different lock. So we can just remove the lock from the
+ * queue. This is not only an optimization but also a way
+ * to avoid the following deadlock:
+ * ocfs2_dentry_post_unlock()
+ * ocfs2_dentry_lock_put()
+ * ocfs2_drop_dentry_lock()
+ * iput()
+ * ocfs2_evict_inode()
+ * ocfs2_clear_inode()
+ * ocfs2_mark_lockres_freeing()
+ * ... blocks waiting for OCFS2_LOCK_QUEUED
+ * since we are the downconvert thread which
+ * should clear the flag.
+ */
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ spin_lock_irqsave(&osb->dc_task_lock, flags2);
+ list_del_init(&lockres->l_blocked_list);
+ osb->blocked_lock_count--;
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
+ /*
+ * Warn if we recurse into another post_unlock call. Strictly
+ * speaking it isn't a problem but we need to be careful if
+ * that happens (stack overflow, deadlocks, ...) so warn if
+ * ocfs2 grows a path for which this can happen.
+ */
+ WARN_ON_ONCE(lockres->l_ops->post_unlock);
+ /* Since the lock is freeing we don't do much in the fn below */
+ ocfs2_process_blocked_lock(osb, lockres);
+ return;
+ }
+ while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog(0, "Waiting on lockres %s\n", lockres->l_name);
+
+ status = ocfs2_wait_for_mask(&mw);
+ if (status)
+ mlog_errno(status);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ }
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+}
+
+void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int ret;
+
+ ocfs2_mark_lockres_freeing(osb, lockres);
+ ret = ocfs2_drop_lock(osb, lockres);
+ if (ret)
+ mlog_errno(ret);
+}
+
+static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
+{
+ ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
+ ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
+ ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
+ ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
+}
+
+int ocfs2_drop_inode_locks(struct inode *inode)
+{
+ int status, err;
+
+ /* No need to call ocfs2_mark_lockres_freeing here -
+ * ocfs2_clear_inode has done it for us. */
+
+ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
+ &OCFS2_I(inode)->ip_open_lockres);
+ if (err < 0)
+ mlog_errno(err);
+
+ status = err;
+
+ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
+ &OCFS2_I(inode)->ip_inode_lockres);
+ if (err < 0)
+ mlog_errno(err);
+ if (err < 0 && !status)
+ status = err;
+
+ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
+ &OCFS2_I(inode)->ip_rw_lockres);
+ if (err < 0)
+ mlog_errno(err);
+ if (err < 0 && !status)
+ status = err;
+
+ return status;
+}
+
+static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ assert_spin_locked(&lockres->l_lock);
+
+ BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
+
+ if (lockres->l_level <= new_level) {
+ mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
+ "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
+ "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
+ new_level, list_empty(&lockres->l_blocked_list),
+ list_empty(&lockres->l_mask_waiters), lockres->l_type,
+ lockres->l_flags, lockres->l_ro_holders,
+ lockres->l_ex_holders, lockres->l_action,
+ lockres->l_unlock_action, lockres->l_requested,
+ lockres->l_blocking, lockres->l_pending_gen);
+ BUG();
+ }
+
+ mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
+ lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
+
+ lockres->l_action = OCFS2_AST_DOWNCONVERT;
+ lockres->l_requested = new_level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ return lockres_set_pending(lockres);
+}
+
+static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int new_level,
+ int lvb,
+ unsigned int generation)
+{
+ int ret;
+ u32 dlm_flags = DLM_LKF_CONVERT;
+
+ mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
+ lockres->l_level, new_level);
+
+ /*
+ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+ * we can recover correctly from node failure. Otherwise, we may get
+ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
+ */
+ if (ocfs2_userspace_stack(osb) &&
+ lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lvb = 1;
+
+ if (lvb)
+ dlm_flags |= DLM_LKF_VALBLK;
+
+ ret = ocfs2_dlm_lock(osb->cconn,
+ new_level,
+ &lockres->l_lksb,
+ dlm_flags,
+ lockres->l_name,
+ OCFS2_LOCK_ID_MAX_LEN - 1);
+ lockres_clear_pending(lockres, generation, osb);
+ if (ret) {
+ ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ return ret;
+}
+
+/* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
+static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ assert_spin_locked(&lockres->l_lock);
+
+ if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
+ /* If we're already trying to cancel a lock conversion
+ * then just drop the spinlock and allow the caller to
+ * requeue this lock. */
+ mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
+ return 0;
+ }
+
+ /* were we in a convert when we got the bast fire? */
+ BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
+ lockres->l_action != OCFS2_AST_DOWNCONVERT);
+ /* set things up for the unlockast to know to just
+ * clear out the ast_action and unset busy, etc. */
+ lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
+
+ mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
+ "lock %s, invalid flags: 0x%lx\n",
+ lockres->l_name, lockres->l_flags);
+
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
+
+ return 1;
+}
+
+static int ocfs2_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int ret;
+
+ ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
+ DLM_LKF_CANCEL);
+ if (ret) {
+ ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
+ ocfs2_recover_from_dlm_error(lockres, 0);
+ }
+
+ mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
+
+ return ret;
+}
+
+static int ocfs2_unblock_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ struct ocfs2_unblock_ctl *ctl)
+{
+ unsigned long flags;
+ int blocking;
+ int new_level;
+ int level;
+ int ret = 0;
+ int set_lvb = 0;
+ unsigned int gen;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+recheck:
+ /*
+ * Is it still blocking? If not, we have no more work to do.
+ */
+ if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
+ BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = 0;
+ goto leave;
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ /* XXX
+ * This is a *big* race. The OCFS2_LOCK_PENDING flag
+ * exists entirely for one reason - another thread has set
+ * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
+ *
+ * If we do ocfs2_cancel_convert() before the other thread
+ * calls dlm_lock(), our cancel will do nothing. We will
+ * get no ast, and we will have no way of knowing the
+ * cancel failed. Meanwhile, the other thread will call
+ * into dlm_lock() and wait...forever.
+ *
+ * Why forever? Because another node has asked for the
+ * lock first; that's why we're here in unblock_lock().
+ *
+ * The solution is OCFS2_LOCK_PENDING. When PENDING is
+ * set, we just requeue the unblock. Only when the other
+ * thread has called dlm_lock() and cleared PENDING will
+ * we then cancel their request.
+ *
+ * All callers of dlm_lock() must set OCFS2_DLM_PENDING
+ * at the same time they set OCFS2_DLM_BUSY. They must
+ * clear OCFS2_DLM_PENDING after dlm_lock() returns.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_PENDING) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
+ lockres->l_name);
+ goto leave_requeue;
+ }
+
+ ctl->requeue = 1;
+ ret = ocfs2_prepare_cancel_convert(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (ret) {
+ ret = ocfs2_cancel_convert(osb, lockres);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+ goto leave;
+ }
+
+ /*
+ * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
+ * set when the ast is received for an upconvert just before the
+ * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
+ * on the heels of the ast, we want to delay the downconvert just
+ * enough to allow the up requestor to do its task. Because this
+ * lock is in the blocked queue, the lock will be downconverted
+ * as soon as the requestor is done with the lock.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
+ goto leave_requeue;
+
+ /*
+ * How can we block and yet be at NL? We were trying to upconvert
+ * from NL and got canceled. The code comes back here, and now
+ * we notice and clear BLOCKING.
+ */
+ if (lockres->l_level == DLM_LOCK_NL) {
+ BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
+ lockres->l_blocking = DLM_LOCK_NL;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto leave;
+ }
+
+ /* if we're blocking an exclusive and we have *any* holders,
+ * then requeue. */
+ if ((lockres->l_blocking == DLM_LOCK_EX)
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
+ lockres->l_name, lockres->l_ex_holders,
+ lockres->l_ro_holders);
+ goto leave_requeue;
+ }
+
+ /* If it's a PR we're blocking, then only
+ * requeue if we've got any EX holders */
+ if (lockres->l_blocking == DLM_LOCK_PR &&
+ lockres->l_ex_holders) {
+ mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
+ lockres->l_name, lockres->l_ex_holders);
+ goto leave_requeue;
+ }
+
+ /*
+ * Can we get a lock in this state if the holder counts are
+ * zero? The meta data unblock code used to check this.
+ */
+ if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
+ && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
+ lockres->l_name);
+ goto leave_requeue;
+ }
+
+ new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
+
+ if (lockres->l_ops->check_downconvert
+ && !lockres->l_ops->check_downconvert(lockres, new_level)) {
+ mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
+ lockres->l_name);
+ goto leave_requeue;
+ }
+
+ /* If we get here, then we know that there are no more
+ * incompatible holders (and anyone asking for an incompatible
+ * lock is blocked). We can now downconvert the lock */
+ if (!lockres->l_ops->downconvert_worker)
+ goto downconvert;
+
+ /* Some lockres types want to do a bit of work before
+ * downconverting a lock. Allow that here. The worker function
+ * may sleep, so we save off a copy of what we're blocking as
+ * it may change while we're not holding the spin lock. */
+ blocking = lockres->l_blocking;
+ level = lockres->l_level;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
+
+ if (ctl->unblock_action == UNBLOCK_STOP_POST) {
+ mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
+ lockres->l_name);
+ goto leave;
+ }
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
+ /* If this changed underneath us, then we can't drop
+ * it just yet. */
+ mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
+ "Recheck\n", lockres->l_name, blocking,
+ lockres->l_blocking, level, lockres->l_level);
+ goto recheck;
+ }
+
+downconvert:
+ ctl->requeue = 0;
+
+ if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
+ if (lockres->l_level == DLM_LOCK_EX)
+ set_lvb = 1;
+
+ /*
+ * We only set the lvb if the lock has been fully
+ * refreshed - otherwise we risk setting stale
+ * data. Otherwise, there's no need to actually clear
+ * out the lvb here as it's value is still valid.
+ */
+ if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
+ lockres->l_ops->set_lvb(lockres);
+ }
+
+ gen = ocfs2_prepare_downconvert(lockres, new_level);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
+ gen);
+ /* The dlm lock convert is being cancelled in background,
+ * ocfs2_cancel_convert() is asynchronous in fs/dlm,
+ * requeue it, try again later.
+ */
+ if (ret == -EBUSY) {
+ ctl->requeue = 1;
+ mlog(ML_BASTS, "lockres %s, ReQ: Downconvert busy\n",
+ lockres->l_name);
+ ret = 0;
+ msleep(20);
+ }
+
+leave:
+ if (ret)
+ mlog_errno(ret);
+ return ret;
+
+leave_requeue:
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ctl->requeue = 1;
+
+ return 0;
+}
+
+static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct ocfs2_inode_info *oi;
+
+ inode = ocfs2_lock_res_inode(lockres);
+ mapping = inode->i_mapping;
+
+ if (S_ISDIR(inode->i_mode)) {
+ oi = OCFS2_I(inode);
+ oi->ip_dir_lock_gen++;
+ mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
+ goto out_forget;
+ }
+
+ if (!S_ISREG(inode->i_mode))
+ goto out;
+
+ /*
+ * We need this before the filemap_fdatawrite() so that it can
+ * transfer the dirty bit from the PTE to the
+ * page. Unfortunately this means that even for EX->PR
+ * downconverts, we'll lose our mappings and have to build
+ * them up again.
+ */
+ unmap_mapping_range(mapping, 0, 0, 0);
+
+ if (filemap_fdatawrite(mapping)) {
+ mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ }
+ sync_mapping_buffers(mapping);
+ if (blocking == DLM_LOCK_EX) {
+ truncate_inode_pages(mapping, 0);
+ } else {
+ /* We only need to wait on the I/O if we're not also
+ * truncating pages because truncate_inode_pages waits
+ * for us above. We don't truncate pages if we're
+ * blocking anything < EXMODE because we want to keep
+ * them around in that case. */
+ filemap_fdatawait(mapping);
+ }
+
+out_forget:
+ forget_all_cached_acls(inode);
+
+out:
+ return UNBLOCK_CONTINUE;
+}
+
+static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
+ struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ int checkpointed = ocfs2_ci_fully_checkpointed(ci);
+
+ BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
+ BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
+
+ if (checkpointed)
+ return 1;
+
+ ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
+ return 0;
+}
+
+static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ struct inode *inode = ocfs2_lock_res_inode(lockres);
+
+ return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
+}
+
+static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
+{
+ struct inode *inode = ocfs2_lock_res_inode(lockres);
+
+ __ocfs2_stuff_meta_lvb(inode);
+}
+
+/*
+ * Does the final reference drop on our dentry lock. Right now this
+ * happens in the downconvert thread, but we could choose to simplify the
+ * dlmglue API and push these off to the ocfs2_wq in the future.
+ */
+static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
+ ocfs2_dentry_lock_put(osb, dl);
+}
+
+/*
+ * d_delete() matching dentries before the lock downconvert.
+ *
+ * At this point, any process waiting to destroy the
+ * dentry_lock due to last ref count is stopped by the
+ * OCFS2_LOCK_QUEUED flag.
+ *
+ * We have two potential problems
+ *
+ * 1) If we do the last reference drop on our dentry_lock (via dput)
+ * we'll wind up in ocfs2_release_dentry_lock(), waiting on
+ * the downconvert to finish. Instead we take an elevated
+ * reference and push the drop until after we've completed our
+ * unblock processing.
+ *
+ * 2) There might be another process with a final reference,
+ * waiting on us to finish processing. If this is the case, we
+ * detect it and exit out - there's no more dentries anyway.
+ */
+static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking)
+{
+ struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
+ struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
+ struct dentry *dentry;
+ unsigned long flags;
+ int extra_ref = 0;
+
+ /*
+ * This node is blocking another node from getting a read
+ * lock. This happens when we've renamed within a
+ * directory. We've forced the other nodes to d_delete(), but
+ * we never actually dropped our lock because it's still
+ * valid. The downconvert code will retain a PR for this node,
+ * so there's no further work to do.
+ */
+ if (blocking == DLM_LOCK_PR)
+ return UNBLOCK_CONTINUE;
+
+ /*
+ * Mark this inode as potentially orphaned. The code in
+ * ocfs2_delete_inode() will figure out whether it actually
+ * needs to be freed or not.
+ */
+ spin_lock(&oi->ip_lock);
+ oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
+ spin_unlock(&oi->ip_lock);
+
+ /*
+ * Yuck. We need to make sure however that the check of
+ * OCFS2_LOCK_FREEING and the extra reference are atomic with
+ * respect to a reference decrement or the setting of that
+ * flag.
+ */
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ spin_lock(&dentry_attach_lock);
+ if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
+ && dl->dl_count) {
+ dl->dl_count++;
+ extra_ref = 1;
+ }
+ spin_unlock(&dentry_attach_lock);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog(0, "extra_ref = %d\n", extra_ref);
+
+ /*
+ * We have a process waiting on us in ocfs2_dentry_iput(),
+ * which means we can't have any more outstanding
+ * aliases. There's no need to do any more work.
+ */
+ if (!extra_ref)
+ return UNBLOCK_CONTINUE;
+
+ spin_lock(&dentry_attach_lock);
+ while (1) {
+ dentry = ocfs2_find_local_alias(dl->dl_inode,
+ dl->dl_parent_blkno, 1);
+ if (!dentry)
+ break;
+ spin_unlock(&dentry_attach_lock);
+
+ if (S_ISDIR(dl->dl_inode->i_mode))
+ shrink_dcache_parent(dentry);
+
+ mlog(0, "d_delete(%pd);\n", dentry);
+
+ /*
+ * The following dcache calls may do an
+ * iput(). Normally we don't want that from the
+ * downconverting thread, but in this case it's ok
+ * because the requesting node already has an
+ * exclusive lock on the inode, so it can't be queued
+ * for a downconvert.
+ */
+ d_delete(dentry);
+ dput(dentry);
+
+ spin_lock(&dentry_attach_lock);
+ }
+ spin_unlock(&dentry_attach_lock);
+
+ /*
+ * If we are the last holder of this dentry lock, there is no
+ * reason to downconvert so skip straight to the unlock.
+ */
+ if (dl->dl_count == 1)
+ return UNBLOCK_STOP_POST;
+
+ return UNBLOCK_CONTINUE_POST;
+}
+
+static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ struct ocfs2_refcount_tree *tree =
+ ocfs2_lock_res_refcount_tree(lockres);
+
+ return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
+}
+
+static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking)
+{
+ struct ocfs2_refcount_tree *tree =
+ ocfs2_lock_res_refcount_tree(lockres);
+
+ ocfs2_metadata_cache_purge(&tree->rf_ci);
+
+ return UNBLOCK_CONTINUE;
+}
+
+static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_qinfo_lvb *lvb;
+ struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
+ struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
+ oinfo->dqi_gi.dqi_type);
+
+ lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
+ lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
+ lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
+ lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
+ lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
+ lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
+ lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
+}
+
+void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
+{
+ struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
+ struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+
+ if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
+static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
+{
+ struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
+ oinfo->dqi_gi.dqi_type);
+ struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
+ struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
+ struct buffer_head *bh = NULL;
+ struct ocfs2_global_disk_dqinfo *gdinfo;
+ int status = 0;
+
+ if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
+ lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
+ info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
+ info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
+ oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
+ oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
+ oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
+ oinfo->dqi_gi.dqi_free_entry =
+ be32_to_cpu(lvb->lvb_free_entry);
+ } else {
+ status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
+ oinfo->dqi_giblk, &bh);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+ gdinfo = (struct ocfs2_global_disk_dqinfo *)
+ (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
+ info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
+ info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
+ oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
+ oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
+ oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
+ oinfo->dqi_gi.dqi_free_entry =
+ le32_to_cpu(gdinfo->dqi_free_entry);
+ brelse(bh);
+ ocfs2_track_lock_refresh(lockres);
+ }
+
+bail:
+ return status;
+}
+
+/* Lock quota info, this function expects at least shared lock on the quota file
+ * so that we can safely refresh quota info from disk. */
+int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
+{
+ struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
+ struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ int status = 0;
+
+ /* On RO devices, locking really isn't needed... */
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ status = -EROFS;
+ goto bail;
+ }
+ if (ocfs2_mount_local(osb))
+ goto bail;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ if (!ocfs2_should_refresh_lock_res(lockres))
+ goto bail;
+ /* OK, we have the lock but we need to refresh the quota info */
+ status = ocfs2_refresh_qinfo(oinfo);
+ if (status)
+ ocfs2_qinfo_unlock(oinfo, ex);
+ ocfs2_complete_lock_res_refresh(lockres, status);
+bail:
+ return status;
+}
+
+int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
+{
+ int status;
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
+ struct ocfs2_super *osb = lockres->l_priv;
+
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (ocfs2_mount_local(osb))
+ return 0;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
+{
+ int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
+ struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
+ struct ocfs2_super *osb = lockres->l_priv;
+
+ if (!ocfs2_mount_local(osb))
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
+static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int status;
+ struct ocfs2_unblock_ctl ctl = {0, 0,};
+ unsigned long flags;
+
+ /* Our reference to the lockres in this function can be
+ * considered valid until we remove the OCFS2_LOCK_QUEUED
+ * flag. */
+
+ BUG_ON(!lockres);
+ BUG_ON(!lockres->l_ops);
+
+ mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
+
+ /* Detect whether a lock has been marked as going away while
+ * the downconvert thread was processing other things. A lock can
+ * still be marked with OCFS2_LOCK_FREEING after this check,
+ * but short circuiting here will still save us some
+ * performance. */
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (lockres->l_flags & OCFS2_LOCK_FREEING)
+ goto unqueue;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ status = ocfs2_unblock_lock(osb, lockres, &ctl);
+ if (status < 0)
+ mlog_errno(status);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+unqueue:
+ if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
+ lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
+ } else
+ ocfs2_schedule_blocked_lock(osb, lockres);
+
+ mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
+ ctl.requeue ? "yes" : "no");
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ if (ctl.unblock_action != UNBLOCK_CONTINUE
+ && lockres->l_ops->post_unlock)
+ lockres->l_ops->post_unlock(osb, lockres);
+}
+
+static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ unsigned long flags;
+
+ assert_spin_locked(&lockres->l_lock);
+
+ if (lockres->l_flags & OCFS2_LOCK_FREEING) {
+ /* Do not schedule a lock for downconvert when it's on
+ * the way to destruction - any nodes wanting access
+ * to the resource will get it soon. */
+ mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
+ lockres->l_name, lockres->l_flags);
+ return;
+ }
+
+ lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
+ if (list_empty(&lockres->l_blocked_list)) {
+ list_add_tail(&lockres->l_blocked_list,
+ &osb->blocked_lock_list);
+ osb->blocked_lock_count++;
+ }
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
+}
+
+static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
+{
+ unsigned long processed;
+ unsigned long flags;
+ struct ocfs2_lock_res *lockres;
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
+ /* grab this early so we know to try again if a state change and
+ * wake happens part-way through our work */
+ osb->dc_work_sequence = osb->dc_wake_sequence;
+
+ processed = osb->blocked_lock_count;
+ /*
+ * blocked lock processing in this loop might call iput which can
+ * remove items off osb->blocked_lock_list. Downconvert up to
+ * 'processed' number of locks, but stop short if we had some
+ * removed in ocfs2_mark_lockres_freeing when downconverting.
+ */
+ while (processed && !list_empty(&osb->blocked_lock_list)) {
+ lockres = list_entry(osb->blocked_lock_list.next,
+ struct ocfs2_lock_res, l_blocked_list);
+ list_del_init(&lockres->l_blocked_list);
+ osb->blocked_lock_count--;
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
+
+ BUG_ON(!processed);
+ processed--;
+
+ ocfs2_process_blocked_lock(osb, lockres);
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
+ }
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
+}
+
+static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
+{
+ int empty = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
+ if (list_empty(&osb->blocked_lock_list))
+ empty = 1;
+
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
+ return empty;
+}
+
+static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
+{
+ int should_wake = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
+ if (osb->dc_work_sequence != osb->dc_wake_sequence)
+ should_wake = 1;
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
+
+ return should_wake;
+}
+
+static int ocfs2_downconvert_thread(void *arg)
+{
+ struct ocfs2_super *osb = arg;
+
+ /* only quit once we've been asked to stop and there is no more
+ * work available */
+ while (!(kthread_should_stop() &&
+ ocfs2_downconvert_thread_lists_empty(osb))) {
+
+ wait_event_interruptible(osb->dc_event,
+ ocfs2_downconvert_thread_should_wake(osb) ||
+ kthread_should_stop());
+
+ mlog(0, "downconvert_thread: awoken\n");
+
+ ocfs2_downconvert_thread_do_work(osb);
+ }
+
+ osb->dc_task = NULL;
+ return 0;
+}
+
+void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
+ /* make sure the voting thread gets a swipe at whatever changes
+ * the caller may have made to the voting state */
+ osb->dc_wake_sequence++;
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
+ wake_up(&osb->dc_event);
+}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
new file mode 100644
index 0000000000..e5da5809ed
--- /dev/null
+++ b/fs/ocfs2/dlmglue.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * dlmglue.h
+ *
+ * description here
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ */
+
+
+#ifndef DLMGLUE_H
+#define DLMGLUE_H
+
+#include "dcache.h"
+
+#define OCFS2_LVB_VERSION 5
+
+struct ocfs2_meta_lvb {
+ __u8 lvb_version;
+ __u8 lvb_reserved0;
+ __be16 lvb_idynfeatures;
+ __be32 lvb_iclusters;
+ __be32 lvb_iuid;
+ __be32 lvb_igid;
+ __be64 lvb_iatime_packed;
+ __be64 lvb_ictime_packed;
+ __be64 lvb_imtime_packed;
+ __be64 lvb_isize;
+ __be16 lvb_imode;
+ __be16 lvb_inlink;
+ __be32 lvb_iattr;
+ __be32 lvb_igeneration;
+ __be32 lvb_reserved2;
+};
+
+#define OCFS2_QINFO_LVB_VERSION 1
+
+struct ocfs2_qinfo_lvb {
+ __u8 lvb_version;
+ __u8 lvb_reserved[3];
+ __be32 lvb_bgrace;
+ __be32 lvb_igrace;
+ __be32 lvb_syncms;
+ __be32 lvb_blocks;
+ __be32 lvb_free_blk;
+ __be32 lvb_free_entry;
+};
+
+#define OCFS2_ORPHAN_LVB_VERSION 1
+
+struct ocfs2_orphan_scan_lvb {
+ __u8 lvb_version;
+ __u8 lvb_reserved[3];
+ __be32 lvb_os_seqno;
+};
+
+#define OCFS2_TRIMFS_LVB_VERSION 1
+
+struct ocfs2_trim_fs_lvb {
+ __u8 lvb_version;
+ __u8 lvb_success;
+ __u8 lvb_reserved[2];
+ __be32 lvb_nodenum;
+ __be64 lvb_start;
+ __be64 lvb_len;
+ __be64 lvb_minlen;
+ __be64 lvb_trimlen;
+};
+
+struct ocfs2_trim_fs_info {
+ u8 tf_valid; /* lvb is valid, or not */
+ u8 tf_success; /* trim is successful, or not */
+ u32 tf_nodenum; /* osb node number */
+ u64 tf_start; /* trim start offset in clusters */
+ u64 tf_len; /* trim end offset in clusters */
+ u64 tf_minlen; /* trim minimum contiguous free clusters */
+ u64 tf_trimlen; /* trimmed length in bytes */
+};
+
+struct ocfs2_lock_holder {
+ struct list_head oh_list;
+ struct pid *oh_owner_pid;
+ int oh_ex;
+};
+
+/* ocfs2_inode_lock_full() 'arg_flags' flags */
+/* don't wait on recovery. */
+#define OCFS2_META_LOCK_RECOVERY (0x01)
+/* Instruct the dlm not to queue ourselves on the other node. */
+#define OCFS2_META_LOCK_NOQUEUE (0x02)
+/* don't block waiting for the downconvert thread, instead return -EAGAIN */
+#define OCFS2_LOCK_NONBLOCK (0x04)
+/* just get back disk inode bh if we've got cluster lock. */
+#define OCFS2_META_LOCK_GETBH (0x08)
+
+/* Locking subclasses of inode cluster lock */
+enum {
+ OI_LS_NORMAL = 0,
+ OI_LS_PARENT,
+ OI_LS_RENAME1,
+ OI_LS_RENAME2,
+ OI_LS_REFLINK_TARGET,
+};
+
+int ocfs2_dlm_init(struct ocfs2_super *osb);
+void ocfs2_dlm_shutdown(struct ocfs2_super *osb, int hangup_pending);
+void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res);
+void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
+ enum ocfs2_lock_type type,
+ unsigned int generation,
+ struct inode *inode);
+void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
+ u64 parent, struct inode *inode);
+struct ocfs2_file_private;
+void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_file_private *fp);
+struct ocfs2_mem_dqinfo;
+void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mem_dqinfo *info);
+void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
+ struct ocfs2_super *osb, u64 ref_blkno,
+ unsigned int generation);
+void ocfs2_lock_res_free(struct ocfs2_lock_res *res);
+int ocfs2_create_new_inode_locks(struct inode *inode);
+int ocfs2_drop_inode_locks(struct inode *inode);
+int ocfs2_rw_lock(struct inode *inode, int write);
+int ocfs2_try_rw_lock(struct inode *inode, int write);
+void ocfs2_rw_unlock(struct inode *inode, int write);
+int ocfs2_open_lock(struct inode *inode);
+int ocfs2_try_open_lock(struct inode *inode, int write);
+void ocfs2_open_unlock(struct inode *inode);
+int ocfs2_inode_lock_atime(struct inode *inode,
+ struct vfsmount *vfsmnt,
+ int *level, int wait);
+int ocfs2_inode_lock_full_nested(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ int arg_flags,
+ int subclass);
+int ocfs2_inode_lock_with_page(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct page *page);
+/* Variants without special locking class or flags */
+#define ocfs2_inode_lock_full(i, r, e, f)\
+ ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
+#define ocfs2_inode_lock_nested(i, b, e, s)\
+ ocfs2_inode_lock_full_nested(i, b, e, 0, s)
+/* 99% of the time we don't want to supply any additional flags --
+ * those are for very specific cases only. */
+#define ocfs2_inode_lock(i, b, e) ocfs2_inode_lock_full_nested(i, b, e, 0, OI_LS_NORMAL)
+#define ocfs2_try_inode_lock(i, b, e)\
+ ocfs2_inode_lock_full_nested(i, b, e, OCFS2_META_LOCK_NOQUEUE,\
+ OI_LS_NORMAL)
+void ocfs2_inode_unlock(struct inode *inode,
+ int ex);
+int ocfs2_super_lock(struct ocfs2_super *osb,
+ int ex);
+void ocfs2_super_unlock(struct ocfs2_super *osb,
+ int ex);
+int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno);
+void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno);
+
+int ocfs2_rename_lock(struct ocfs2_super *osb);
+void ocfs2_rename_unlock(struct ocfs2_super *osb);
+int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex);
+void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex);
+void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb);
+void ocfs2_trim_fs_lock_res_uninit(struct ocfs2_super *osb);
+int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info, int trylock);
+void ocfs2_trim_fs_unlock(struct ocfs2_super *osb,
+ struct ocfs2_trim_fs_info *info);
+int ocfs2_dentry_lock(struct dentry *dentry, int ex);
+void ocfs2_dentry_unlock(struct dentry *dentry, int ex);
+int ocfs2_file_lock(struct file *file, int ex, int trylock);
+void ocfs2_file_unlock(struct file *file);
+int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex);
+void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex);
+struct ocfs2_refcount_tree;
+int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex);
+void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex);
+
+
+void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+
+/* for the downconvert thread */
+void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb);
+
+struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
+void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
+
+/* To set the locking protocol on module initialization */
+void ocfs2_set_locking_protocol(void);
+
+/* The _tracker pair is used to avoid cluster recursive locking */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct ocfs2_lock_holder *oh);
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+ int ex,
+ struct ocfs2_lock_holder *oh,
+ int had_lock);
+
+#endif /* DLMGLUE_H */