summaryrefslogtreecommitdiffstats
path: root/mm/mempool.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:36 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:36 +0000
commit50ba0232fd5312410f1b65247e774244f89a628e (patch)
treefd8f2fc78e9e548af0ff9590276602ee6125be00 /mm/mempool.c
parentReleasing progress-linux version 6.7.12-1~progress7.99u1. (diff)
downloadlinux-50ba0232fd5312410f1b65247e774244f89a628e.tar.xz
linux-50ba0232fd5312410f1b65247e774244f89a628e.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm/mempool.c')
-rw-r--r--mm/mempool.c81
1 files changed, 64 insertions, 17 deletions
diff --git a/mm/mempool.c b/mm/mempool.c
index 734bcf5afb..dbbf0e9fb4 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -20,7 +20,7 @@
#include <linux/writeback.h>
#include "slab.h"
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+#ifdef CONFIG_SLUB_DEBUG_ON
static void poison_error(mempool_t *pool, void *element, size_t size,
size_t byte)
{
@@ -56,6 +56,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
static void check_element(mempool_t *pool, void *element)
{
+ /* Skip checking: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->free == mempool_kfree) {
__check_element(pool, element, (size_t)pool->pool_data);
@@ -64,10 +68,10 @@ static void check_element(mempool_t *pool, void *element)
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
@@ -81,6 +85,10 @@ static void __poison_element(void *element, size_t size)
static void poison_element(mempool_t *pool, void *element)
{
+ /* Skip poisoning: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_kmalloc) {
__poison_element(element, (size_t)pool->pool_data);
@@ -89,47 +97,49 @@ static void poison_element(mempool_t *pool, void *element)
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
-#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+#else /* CONFIG_SLUB_DEBUG_ON */
static inline void check_element(mempool_t *pool, void *element)
{
}
static inline void poison_element(mempool_t *pool, void *element)
{
}
-#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+#endif /* CONFIG_SLUB_DEBUG_ON */
-static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
+static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_slab_free_mempool(element);
+ return kasan_mempool_poison_object(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_poison_pages(element, (unsigned long)pool->pool_data,
- false);
+ return kasan_mempool_poison_pages(element,
+ (unsigned long)pool->pool_data);
+ return true;
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_kmalloc)
- kasan_unpoison_range(element, (size_t)pool->pool_data);
+ kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
else if (pool->alloc == mempool_alloc_slab)
- kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
+ kasan_mempool_unpoison_object(element,
+ kmem_cache_size(pool->pool_data));
else if (pool->alloc == mempool_alloc_pages)
- kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
- false);
+ kasan_mempool_unpoison_pages(element,
+ (unsigned long)pool->pool_data);
}
static __always_inline void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
- kasan_poison_element(pool, element);
- pool->elements[pool->curr_nr++] = element;
+ if (kasan_poison_element(pool, element))
+ pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
@@ -447,6 +457,43 @@ repeat_alloc:
EXPORT_SYMBOL(mempool_alloc);
/**
+ * mempool_alloc_preallocated - allocate an element from preallocated elements
+ * belonging to a specific memory pool
+ * @pool: pointer to the memory pool which was allocated via
+ * mempool_create().
+ *
+ * This function is similar to mempool_alloc, but it only attempts allocating
+ * an element from the preallocated elements. It does not sleep and immediately
+ * returns if no preallocated elements are available.
+ *
+ * Return: pointer to the allocated element or %NULL if no elements are
+ * available.
+ */
+void *mempool_alloc_preallocated(mempool_t *pool)
+{
+ void *element;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ if (likely(pool->curr_nr)) {
+ element = remove_element(pool);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ /* paired with rmb in mempool_free(), read comment there */
+ smp_wmb();
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(element);
+ return element;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mempool_alloc_preallocated);
+
+/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via