From b15a952c52a6825376d3e7f6c1bf5c886c6d8b74 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:06:00 +0200 Subject: Adding debian version 5.10.209-2. Signed-off-by: Daniel Baumann --- ...Another-preempt-disable-region-which-suck.patch | 73 ++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 debian/patches-rt/0229-mm-vmalloc-Another-preempt-disable-region-which-suck.patch (limited to 'debian/patches-rt/0229-mm-vmalloc-Another-preempt-disable-region-which-suck.patch') diff --git a/debian/patches-rt/0229-mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/debian/patches-rt/0229-mm-vmalloc-Another-preempt-disable-region-which-suck.patch new file mode 100644 index 000000000..c699a1ab7 --- /dev/null +++ b/debian/patches-rt/0229-mm-vmalloc-Another-preempt-disable-region-which-suck.patch @@ -0,0 +1,73 @@ +From 61aced1d729a22bbaf66e1f68806e04058bafb3f Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 12 Jul 2011 11:39:36 +0200 +Subject: [PATCH 229/323] mm/vmalloc: Another preempt disable region which + sucks +Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz + +Avoid the preempt disable version of get_cpu_var(). The inner-lock should +provide enough serialisation. + +Signed-off-by: Thomas Gleixner +--- + mm/vmalloc.c | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index d6a4794fa8ca..8113e4f0d2f2 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1542,7 +1542,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + void *vaddr; + + node = numa_node_id(); +@@ -1579,11 +1579,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) + return ERR_PTR(err); + } + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + spin_lock(&vbq->lock); + list_add_tail_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vaddr; + } +@@ -1648,6 +1649,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) + struct vmap_block *vb; + void *vaddr = NULL; + unsigned int order; ++ int cpu; + + BUG_ON(offset_in_page(size)); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -1662,7 +1664,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) + order = get_order(size); + + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + unsigned long pages_off; + +@@ -1685,7 +1688,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) + break; + } + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + /* Allocate new block if nothing was found */ +-- +2.43.0 + -- cgit v1.2.3