summaryrefslogtreecommitdiffstats
path: root/libmount/src/lock.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 19:33:32 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 19:33:32 +0000
commit8bb05ac73a5b448b339ce0bc8d396c82c459b47f (patch)
tree1fdda006866bca20d41cb206767ea5241e36852f /libmount/src/lock.c
parentAdding debian version 2.39.3-11. (diff)
downloadutil-linux-8bb05ac73a5b448b339ce0bc8d396c82c459b47f.tar.xz
util-linux-8bb05ac73a5b448b339ce0bc8d396c82c459b47f.zip
Merging upstream version 2.40.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'libmount/src/lock.c')
-rw-r--r--libmount/src/lock.c57
1 files changed, 49 insertions, 8 deletions
diff --git a/libmount/src/lock.c b/libmount/src/lock.c
index 4835406..8aca8a7 100644
--- a/libmount/src/lock.c
+++ b/libmount/src/lock.c
@@ -36,6 +36,7 @@
* lock handler
*/
struct libmnt_lock {
+ int refcount; /* reference counter */
char *lockfile; /* path to lock file (e.g. /etc/mtab~) */
int lockfile_fd; /* lock file descriptor */
@@ -73,6 +74,7 @@ struct libmnt_lock *mnt_new_lock(const char *datafile, pid_t id __attribute__((_
if (!ml)
goto err;
+ ml->refcount = 1;
ml->lockfile_fd = -1;
ml->lockfile = lo;
@@ -89,18 +91,57 @@ err:
* mnt_free_lock:
* @ml: struct libmnt_lock handler
*
- * Deallocates mnt_lock.
+ * Deallocates libmnt_lock. This function does not care about reference count. Don't
+ * use this function directly -- it's better to use mnt_unref_lock().
+ *
+ * The reference counting is supported since util-linux v2.40.
*/
void mnt_free_lock(struct libmnt_lock *ml)
{
if (!ml)
return;
- DBG(LOCKS, ul_debugobj(ml, "free%s", ml->locked ? " !!! LOCKED !!!" : ""));
+
+ DBG(LOCKS, ul_debugobj(ml, "free%s [refcount=%d]",
+ ml->locked ? " !!! LOCKED !!!" : "",
+ ml->refcount));
free(ml->lockfile);
free(ml);
}
/**
+ * mnt_ref_lock:
+ * @ml: lock pointer
+ *
+ * Increments reference counter.
+ *
+ * Since: 2.40
+ */
+void mnt_ref_lock(struct libmnt_lock *ml)
+{
+ if (ml) {
+ ml->refcount++;
+ /*DBG(FS, ul_debugobj(fs, "ref=%d", ml->refcount));*/
+ }
+}
+
+/**
+ * mnt_unref_lock:
+ * @ml: lock pointer
+ *
+ * De-increments reference counter, on zero the @ml is automatically
+ * deallocated by mnt_free_lock).
+ */
+void mnt_unref_lock(struct libmnt_lock *ml)
+{
+ if (ml) {
+ ml->refcount--;
+ /*DBG(FS, ul_debugobj(fs, "unref=%d", ml->refcount));*/
+ if (ml->refcount <= 0)
+ mnt_free_lock(ml);
+ }
+}
+
+/**
* mnt_lock_block_signals:
* @ml: struct libmnt_lock handler
* @enable: TRUE/FALSE
@@ -146,7 +187,7 @@ static int lock_simplelock(struct libmnt_lock *ml)
const char *lfile;
int rc;
struct stat sb;
- const mode_t lock_mask = S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH;
+ const mode_t lock_mask = S_IRUSR|S_IWUSR;
assert(ml);
@@ -161,8 +202,7 @@ static int lock_simplelock(struct libmnt_lock *ml)
sigprocmask(SIG_BLOCK, &sigs, &ml->oldsigmask);
}
- ml->lockfile_fd = open(lfile, O_RDONLY|O_CREAT|O_CLOEXEC,
- S_IWUSR|S_IRUSR|S_IRGRP|S_IROTH);
+ ml->lockfile_fd = open(lfile, O_RDONLY|O_CREAT|O_CLOEXEC, lock_mask);
if (ml->lockfile_fd < 0) {
rc = -errno;
goto err;
@@ -287,7 +327,7 @@ static void clean_lock(void)
if (!lock)
return;
mnt_unlock_file(lock);
- mnt_free_lock(lock);
+ mnt_unref_lock(lock);
}
static void __attribute__((__noreturn__)) sig_handler(int sig)
@@ -295,7 +335,8 @@ static void __attribute__((__noreturn__)) sig_handler(int sig)
errx(EXIT_FAILURE, "\n%d: catch signal: %s\n", getpid(), strsignal(sig));
}
-static int test_lock(struct libmnt_test *ts, int argc, char *argv[])
+static int test_lock(struct libmnt_test *ts __attribute__((unused)),
+ int argc, char *argv[])
{
time_t synctime = 0;
unsigned int usecs;
@@ -367,7 +408,7 @@ static int test_lock(struct libmnt_test *ts, int argc, char *argv[])
increment_data(datafile, verbose, l);
mnt_unlock_file(lock);
- mnt_free_lock(lock);
+ mnt_unref_lock(lock);
lock = NULL;
/* The mount command usually finishes after a mtab update. We