summaryrefslogtreecommitdiffstats
path: root/include/linux/lockref.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/lockref.h')
-rw-r--r--include/linux/lockref.h52
1 files changed, 52 insertions, 0 deletions
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000..c3a1f78bc
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+#include <generated/bounds.h>
+
+#define USE_CMPXCHG_LOCKREF \
+ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
+ IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
+
+struct lockref {
+ union {
+#if USE_CMPXCHG_LOCKREF
+ aligned_u64 lock_count;
+#endif
+ struct {
+ spinlock_t lock;
+ int count;
+ };
+ };
+};
+
+extern void lockref_get(struct lockref *);
+extern int lockref_put_return(struct lockref *);
+extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_put_not_zero(struct lockref *);
+extern int lockref_put_or_lock(struct lockref *);
+
+extern void lockref_mark_dead(struct lockref *);
+extern int lockref_get_not_dead(struct lockref *);
+
+/* Must be called under spinlock for reliable results */
+static inline bool __lockref_is_dead(const struct lockref *l)
+{
+ return ((int)l->count < 0);
+}
+
+#endif /* __LINUX_LOCKREF_H */