summaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /include/linux/slab_def.h
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h124
1 files changed, 124 insertions, 0 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
new file mode 100644
index 000000000..a61e7d55d
--- /dev/null
+++ b/include/linux/slab_def.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SLAB_DEF_H
+#define _LINUX_SLAB_DEF_H
+
+#include <linux/kfence.h>
+#include <linux/reciprocal_div.h>
+
+/*
+ * Definitions unique to the original Linux SLAB allocator.
+ */
+
+struct kmem_cache {
+ struct array_cache __percpu *cpu_cache;
+
+/* 1) Cache tunables. Protected by slab_mutex */
+ unsigned int batchcount;
+ unsigned int limit;
+ unsigned int shared;
+
+ unsigned int size;
+ struct reciprocal_value reciprocal_buffer_size;
+/* 2) touched by every alloc & free from the backend */
+
+ slab_flags_t flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+
+/* 3) cache_grow/shrink */
+ /* order of pgs per slab (2^n) */
+ unsigned int gfporder;
+
+ /* force GFP flags, e.g. GFP_DMA */
+ gfp_t allocflags;
+
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ unsigned int freelist_size;
+
+ /* constructor func */
+ void (*ctor)(void *obj);
+
+/* 4) cache creation/removal */
+ const char *name;
+ struct list_head list;
+ int refcount;
+ int object_size;
+ int align;
+
+/* 5) statistics */
+#ifdef CONFIG_DEBUG_SLAB
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+ unsigned long max_freeable;
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. 'size' contains the total
+ * object size including these internal fields, while 'obj_offset'
+ * and 'object_size' contain the offset to the user object and its
+ * size.
+ */
+ int obj_offset;
+#endif /* CONFIG_DEBUG_SLAB */
+
+#ifdef CONFIG_KASAN_GENERIC
+ struct kasan_cache kasan_info;
+#endif
+
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+ unsigned int *random_seq;
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+ unsigned int useroffset; /* Usercopy region offset */
+ unsigned int usersize; /* Usercopy region size */
+#endif
+
+ struct kmem_cache_node *node[MAX_NUMNODES];
+};
+
+static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
+ void *x)
+{
+ void *object = x - (x - slab->s_mem) % cache->size;
+ void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
+
+ if (unlikely(object > last_object))
+ return last_object;
+ else
+ return object;
+}
+
+/*
+ * We want to avoid an expensive divide : (offset / cache->size)
+ * Using the fact that size is a constant for a particular cache,
+ * we can replace (offset / cache->size) by
+ * reciprocal_divide(offset, cache->reciprocal_buffer_size)
+ */
+static inline unsigned int obj_to_index(const struct kmem_cache *cache,
+ const struct slab *slab, void *obj)
+{
+ u32 offset = (obj - slab->s_mem);
+ return reciprocal_divide(offset, cache->reciprocal_buffer_size);
+}
+
+static inline int objs_per_slab(const struct kmem_cache *cache,
+ const struct slab *slab)
+{
+ if (is_kfence_address(slab_address(slab)))
+ return 1;
+ return cache->num;
+}
+
+#endif /* _LINUX_SLAB_DEF_H */