summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/map_benchmark.c3
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/locking/mutex-debug.c12
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/vhost_task.c53
5 files changed, 52 insertions, 20 deletions
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index f7f3d14fa6..4950e0b622 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -256,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
* dma_mask changed by benchmark
*/
dma_set_mask(map->dev, old_dma_mask);
+
+ if (ret)
+ return ret;
break;
default:
return -EINVAL;
diff --git a/kernel/exit.c b/kernel/exit.c
index 41a12630cb..2b9ef8abff 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -488,6 +488,8 @@ retry:
* Search through everything else, we should not get here often.
*/
for_each_process(g) {
+ if (atomic_read(&mm->mm_users) <= 1)
+ break;
if (g->flags & PF_KTHREAD)
continue;
for_each_thread(g, c) {
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index bc8abb8549..6e6f6071cf 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -12,6 +12,7 @@
*/
#include <linux/mutex.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/export.h>
#include <linux/poison.h>
#include <linux/sched.h>
@@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name,
lock->magic = lock;
}
+static void devm_mutex_release(void *res)
+{
+ mutex_destroy(res);
+}
+
+int __devm_mutex_init(struct device *dev, struct mutex *lock)
+{
+ return devm_add_action_or_reset(dev, devm_mutex_release, lock);
+}
+EXPORT_SYMBOL_GPL(__devm_mutex_init);
+
/***
* mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 5bc04bfe2d..c6f24d1786 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1600,7 +1600,7 @@ int swsusp_check(bool exclusive)
put:
if (error)
- fput(hib_resume_bdev_file);
+ bdev_fput(hib_resume_bdev_file);
else
pr_debug("Image signature found, resuming\n");
} else {
diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c
index da35e5b7f0..8800f5acc0 100644
--- a/kernel/vhost_task.c
+++ b/kernel/vhost_task.c
@@ -10,38 +10,32 @@
enum vhost_task_flags {
VHOST_TASK_FLAGS_STOP,
+ VHOST_TASK_FLAGS_KILLED,
};
struct vhost_task {
bool (*fn)(void *data);
+ void (*handle_sigkill)(void *data);
void *data;
struct completion exited;
unsigned long flags;
struct task_struct *task;
+ /* serialize SIGKILL and vhost_task_stop calls */
+ struct mutex exit_mutex;
};
static int vhost_task_fn(void *data)
{
struct vhost_task *vtsk = data;
- bool dead = false;
for (;;) {
bool did_work;
- if (!dead && signal_pending(current)) {
+ if (signal_pending(current)) {
struct ksignal ksig;
- /*
- * Calling get_signal will block in SIGSTOP,
- * or clear fatal_signal_pending, but remember
- * what was set.
- *
- * This thread won't actually exit until all
- * of the file descriptors are closed, and
- * the release function is called.
- */
- dead = get_signal(&ksig);
- if (dead)
- clear_thread_flag(TIF_SIGPENDING);
+
+ if (get_signal(&ksig))
+ break;
}
/* mb paired w/ vhost_task_stop */
@@ -57,7 +51,19 @@ static int vhost_task_fn(void *data)
schedule();
}
+ mutex_lock(&vtsk->exit_mutex);
+ /*
+ * If a vhost_task_stop and SIGKILL race, we can ignore the SIGKILL.
+ * When the vhost layer has called vhost_task_stop it's already stopped
+ * new work and flushed.
+ */
+ if (!test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) {
+ set_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags);
+ vtsk->handle_sigkill(vtsk->data);
+ }
+ mutex_unlock(&vtsk->exit_mutex);
complete(&vtsk->exited);
+
do_exit(0);
}
@@ -78,12 +84,17 @@ EXPORT_SYMBOL_GPL(vhost_task_wake);
* @vtsk: vhost_task to stop
*
* vhost_task_fn ensures the worker thread exits after
- * VHOST_TASK_FLAGS_SOP becomes true.
+ * VHOST_TASK_FLAGS_STOP becomes true.
*/
void vhost_task_stop(struct vhost_task *vtsk)
{
- set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
- vhost_task_wake(vtsk);
+ mutex_lock(&vtsk->exit_mutex);
+ if (!test_bit(VHOST_TASK_FLAGS_KILLED, &vtsk->flags)) {
+ set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
+ vhost_task_wake(vtsk);
+ }
+ mutex_unlock(&vtsk->exit_mutex);
+
/*
* Make sure vhost_task_fn is no longer accessing the vhost_task before
* freeing it below.
@@ -96,14 +107,16 @@ EXPORT_SYMBOL_GPL(vhost_task_stop);
/**
* vhost_task_create - create a copy of a task to be used by the kernel
* @fn: vhost worker function
- * @arg: data to be passed to fn
+ * @handle_sigkill: vhost function to handle when we are killed
+ * @arg: data to be passed to fn and handled_kill
* @name: the thread's name
*
* This returns a specialized task for use by the vhost layer or NULL on
* failure. The returned task is inactive, and the caller must fire it up
* through vhost_task_start().
*/
-struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
+struct vhost_task *vhost_task_create(bool (*fn)(void *),
+ void (*handle_sigkill)(void *), void *arg,
const char *name)
{
struct kernel_clone_args args = {
@@ -122,8 +135,10 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
if (!vtsk)
return NULL;
init_completion(&vtsk->exited);
+ mutex_init(&vtsk->exit_mutex);
vtsk->data = arg;
vtsk->fn = fn;
+ vtsk->handle_sigkill = handle_sigkill;
args.fn_arg = vtsk;