summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:35:39 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:35:39 +0000
commitddbb0c19fe8ea90e33ad47299c7edd6305d0eaea (patch)
treede36f79327a07773a125f713f423457a88104e56 /mm
parentAdding debian version 6.8.9-1. (diff)
downloadlinux-ddbb0c19fe8ea90e33ad47299c7edd6305d0eaea.tar.xz
linux-ddbb0c19fe8ea90e33ad47299c7edd6305d0eaea.zip
Merging upstream version 6.8.11.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/slub.c52
2 files changed, 33 insertions, 23 deletions
diff --git a/mm/readahead.c b/mm/readahead.c
index 2648ec4f04..89139a8721 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -490,6 +490,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
pgoff_t index = readahead_index(ractl);
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
+ unsigned int nofs;
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);
@@ -506,6 +507,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
new_order--;
}
+ /* See comment in page_cache_ra_unbounded() */
+ nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
while (index <= limit) {
unsigned int order = new_order;
@@ -532,6 +535,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
read_pages(ractl);
filemap_invalidate_unlock_shared(mapping);
+ memalloc_nofs_restore(nofs);
/*
* If there were already pages in the page cache, then we may have
diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56..914c35a7d4 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -557,6 +557,26 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
*(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
}
+/*
+ * See comment in calculate_sizes().
+ */
+static inline bool freeptr_outside_object(struct kmem_cache *s)
+{
+ return s->offset >= s->inuse;
+}
+
+/*
+ * Return offset of the end of info block which is inuse + free pointer if
+ * not overlapping with object.
+ */
+static inline unsigned int get_info_end(struct kmem_cache *s)
+{
+ if (freeptr_outside_object(s))
+ return s->inuse + sizeof(void *);
+ else
+ return s->inuse;
+}
+
/* Loop over all objects in a slab */
#define for_each_object(__p, __s, __addr, __objects) \
for (__p = fixup_red_left(__s, __addr); \
@@ -845,26 +865,6 @@ static void print_section(char *level, char *text, u8 *addr,
metadata_access_disable();
}
-/*
- * See comment in calculate_sizes().
- */
-static inline bool freeptr_outside_object(struct kmem_cache *s)
-{
- return s->offset >= s->inuse;
-}
-
-/*
- * Return offset of the end of info block which is inuse + free pointer if
- * not overlapping with object.
- */
-static inline unsigned int get_info_end(struct kmem_cache *s)
-{
- if (freeptr_outside_object(s))
- return s->inuse + sizeof(void *);
- else
- return s->inuse;
-}
-
static struct track *get_track(struct kmem_cache *s, void *object,
enum track_item alloc)
{
@@ -2107,15 +2107,20 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
*
* The initialization memset's clear the object and the metadata,
* but don't touch the SLAB redzone.
+ *
+ * The object's freepointer is also avoided if stored outside the
+ * object.
*/
if (unlikely(init)) {
int rsize;
+ unsigned int inuse;
+ inuse = get_info_end(s);
if (!kasan_has_integrated_init())
memset(kasan_reset_tag(x), 0, s->object_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
- memset((char *)kasan_reset_tag(x) + s->inuse, 0,
- s->size - s->inuse - rsize);
+ memset((char *)kasan_reset_tag(x) + inuse, 0,
+ s->size - inuse - rsize);
}
/* KASAN might put x into memory quarantine, delaying its reuse. */
return !kasan_slab_free(s, x, init);
@@ -3737,7 +3742,8 @@ static void *__slab_alloc_node(struct kmem_cache *s,
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
{
- if (unlikely(slab_want_init_on_free(s)) && obj)
+ if (unlikely(slab_want_init_on_free(s)) && obj &&
+ !freeptr_outside_object(s))
memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
0, sizeof(void *));
}