summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0177-mm-vmalloc-Another-preempt-disable-region-which-suck.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0177-mm-vmalloc-Another-preempt-disable-region-which-suck.patch')
-rw-r--r--debian/patches-rt/0177-mm-vmalloc-Another-preempt-disable-region-which-suck.patch73
1 files changed, 73 insertions, 0 deletions
diff --git a/debian/patches-rt/0177-mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/debian/patches-rt/0177-mm-vmalloc-Another-preempt-disable-region-which-suck.patch
new file mode 100644
index 000000000..54171e08f
--- /dev/null
+++ b/debian/patches-rt/0177-mm-vmalloc-Another-preempt-disable-region-which-suck.patch
@@ -0,0 +1,73 @@
+From 1bb6422feb2c5d6c151b91f71dac97b33685af69 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Jul 2011 11:39:36 +0200
+Subject: [PATCH 177/347] mm/vmalloc: Another preempt disable region which
+ sucks
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.246-rt110.tar.xz
+
+Avoid the preempt disable version of get_cpu_var(). The inner-lock should
+provide enough serialisation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/vmalloc.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 1817871b0239..aa06badc76f4 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -853,7 +853,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ struct vmap_area *va;
+ unsigned long vb_idx;
+- int node, err;
++ int node, err, cpu;
+ void *vaddr;
+
+ node = numa_node_id();
+@@ -896,11 +896,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ BUG_ON(err);
+ radix_tree_preload_end();
+
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = this_cpu_ptr(&vmap_block_queue);
+ spin_lock(&vbq->lock);
+ list_add_tail_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+
+ return vaddr;
+ }
+@@ -969,6 +970,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ void *vaddr = NULL;
+ unsigned int order;
++ int cpu;
+
+ BUG_ON(offset_in_page(size));
+ BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -983,7 +985,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ order = get_order(size);
+
+ rcu_read_lock();
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = this_cpu_ptr(&vmap_block_queue);
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ unsigned long pages_off;
+
+@@ -1006,7 +1009,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ break;
+ }
+
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+ rcu_read_unlock();
+
+ /* Allocate new block if nothing was found */
+--
+2.36.1
+