diff options
Diffstat (limited to '')
-rw-r--r-- | debian/patches-rt/0088-radix-tree-use-local-locks.patch | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/debian/patches-rt/0088-radix-tree-use-local-locks.patch b/debian/patches-rt/0088-radix-tree-use-local-locks.patch new file mode 100644 index 000000000..97af42757 --- /dev/null +++ b/debian/patches-rt/0088-radix-tree-use-local-locks.patch @@ -0,0 +1,176 @@ +From af6ad9480a842859f957315fdf189b6ad041a4be Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 25 Jan 2017 16:34:27 +0100 +Subject: [PATCH 088/347] radix-tree: use local locks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz + +The preload functionality uses per-CPU variables and preempt-disable to +ensure that it does not switch CPUs during its usage. This patch adds +local_locks() instead preempt_disable() for the same purpose and to +remain preemptible on -RT. + +Cc: stable-rt@vger.kernel.org +Reported-and-debugged-by: Mike Galbraith <efault@gmx.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/idr.h | 5 +---- + include/linux/radix-tree.h | 7 ++----- + lib/radix-tree.c | 32 +++++++++++++++++++++++--------- + 3 files changed, 26 insertions(+), 18 deletions(-) + +diff --git a/include/linux/idr.h b/include/linux/idr.h +index b6c6151c7446..81c9df5c04fa 100644 +--- a/include/linux/idr.h ++++ b/include/linux/idr.h +@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const struct idr *idr) + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ +-static inline void idr_preload_end(void) +-{ +- preempt_enable(); +-} ++void idr_preload_end(void); + + /** + * idr_for_each_entry() - Iterate over an IDR's elements of a given type. +diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h +index 34149e8b5f73..affb0fc4c5b6 100644 +--- a/include/linux/radix-tree.h ++++ b/include/linux/radix-tree.h +@@ -330,6 +330,8 @@ unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *, + int radix_tree_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); ++void radix_tree_preload_end(void); ++ + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *, + unsigned long index, unsigned int tag); +@@ -349,11 +351,6 @@ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, + unsigned int max_items, unsigned int tag); + int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); + +-static inline void radix_tree_preload_end(void) +-{ +- preempt_enable(); +-} +- + int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t); + int radix_tree_split(struct radix_tree_root *, unsigned long index, + unsigned new_order); +diff --git a/lib/radix-tree.c b/lib/radix-tree.c +index e5cab5c4e383..9309e813bc1f 100644 +--- a/lib/radix-tree.c ++++ b/lib/radix-tree.c +@@ -38,7 +38,7 @@ + #include <linux/rcupdate.h> + #include <linux/slab.h> + #include <linux/string.h> +- ++#include <linux/locallock.h> + + /* Number of nodes in fully populated tree of given height */ + static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; +@@ -87,6 +87,7 @@ struct radix_tree_preload { + struct radix_tree_node *nodes; + }; + static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); + + static inline struct radix_tree_node *entry_to_node(void *ptr) + { +@@ -405,12 +406,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = this_cpu_ptr(&radix_tree_preloads); ++ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes; + rtp->nodes = ret->parent; + rtp->nr--; + } ++ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); + /* + * Update the allocation stack trace as this is more useful + * for debugging. +@@ -476,14 +478,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) + */ + gfp_mask &= ~__GFP_ACCOUNT; + +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + rtp = this_cpu_ptr(&radix_tree_preloads); + while (rtp->nr < nr) { +- preempt_enable(); ++ local_unlock(radix_tree_preloads_lock); + node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); + if (node == NULL) + goto out; +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + rtp = this_cpu_ptr(&radix_tree_preloads); + if (rtp->nr < nr) { + node->parent = rtp->nodes; +@@ -525,7 +527,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) + if (gfpflags_allow_blocking(gfp_mask)) + return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); + /* Preloading doesn't help anything with this gfp mask, skip it */ +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + return 0; + } + EXPORT_SYMBOL(radix_tree_maybe_preload); +@@ -563,7 +565,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) + + /* Preloading doesn't help anything with this gfp mask, skip it */ + if (!gfpflags_allow_blocking(gfp_mask)) { +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + return 0; + } + +@@ -597,6 +599,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) + return __radix_tree_preload(gfp_mask, nr_nodes); + } + ++void radix_tree_preload_end(void) ++{ ++ local_unlock(radix_tree_preloads_lock); ++} ++EXPORT_SYMBOL(radix_tree_preload_end); ++ + static unsigned radix_tree_load_root(const struct radix_tree_root *root, + struct radix_tree_node **nodep, unsigned long *maxindex) + { +@@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged); + void idr_preload(gfp_t gfp_mask) + { + if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) +- preempt_disable(); ++ local_lock(radix_tree_preloads_lock); + } + EXPORT_SYMBOL(idr_preload); + ++void idr_preload_end(void) ++{ ++ local_unlock(radix_tree_preloads_lock); ++} ++EXPORT_SYMBOL(idr_preload_end); ++ + int ida_pre_get(struct ida *ida, gfp_t gfp) + { + /* +@@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) + * to return to the ida_pre_get() step. + */ + if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) +- preempt_enable(); ++ local_unlock(radix_tree_preloads_lock); + + if (!this_cpu_read(ida_bitmap)) { + struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); +-- +2.36.1 + |