summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-21 05:38:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-21 05:38:09 +0000
commitbb26c5b2b89d932f617fa415a715b2bb259a42bc (patch)
tree58c44da24c7a1fb78dda1c0b698510c6f8640981 /mm
parentAdding upstream version 6.10.4. (diff)
downloadlinux-bb26c5b2b89d932f617fa415a715b2bb259a42bc.tar.xz
linux-bb26c5b2b89d932f617fa415a715b2bb259a42bc.zip
Adding upstream version 6.10.6.upstream/6.10.6upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/list_lru.c28
-rw-r--r--mm/memcontrol.c22
-rw-r--r--mm/slub.c3
3 files changed, 45 insertions, 8 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 3fd64736bc..6ac393ea74 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -85,6 +85,7 @@ list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
}
#endif /* CONFIG_MEMCG_KMEM */
+/* The caller must ensure the memcg lifetime. */
bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
{
@@ -109,14 +110,22 @@ EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
- mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_add(lru, item, nid, memcg);
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_add(lru, item, nid, NULL);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_add_obj);
+/* The caller must ensure the memcg lifetime. */
bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
struct mem_cgroup *memcg)
{
@@ -139,11 +148,18 @@ EXPORT_SYMBOL_GPL(list_lru_del);
bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
{
+ bool ret;
int nid = page_to_nid(virt_to_page(item));
- struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ?
- mem_cgroup_from_slab_obj(item) : NULL;
- return list_lru_del(lru, item, nid, memcg);
+ if (list_lru_memcg_aware(lru)) {
+ rcu_read_lock();
+ ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
+ rcu_read_unlock();
+ } else {
+ ret = list_lru_del(lru, item, nid, NULL);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(list_lru_del_obj);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8f2f1bb18c..88c7a08610 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5568,11 +5568,28 @@ static struct cftype mem_cgroup_legacy_files[] = {
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
static DEFINE_IDR(mem_cgroup_idr);
+static DEFINE_SPINLOCK(memcg_idr_lock);
+
+static int mem_cgroup_alloc_id(void)
+{
+ int ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&memcg_idr_lock);
+ ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
+ GFP_NOWAIT);
+ spin_unlock(&memcg_idr_lock);
+ idr_preload_end();
+ return ret;
+}
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
{
if (memcg->id.id > 0) {
+ spin_lock(&memcg_idr_lock);
idr_remove(&mem_cgroup_idr, memcg->id.id);
+ spin_unlock(&memcg_idr_lock);
+
memcg->id.id = 0;
}
}
@@ -5706,8 +5723,7 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
if (!memcg)
return ERR_PTR(error);
- memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
- 1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
+ memcg->id.id = mem_cgroup_alloc_id();
if (memcg->id.id < 0) {
error = memcg->id.id;
goto fail;
@@ -5854,7 +5870,9 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
* publish it here at the end of onlining. This matches the
* regular ID destruction during offlining.
*/
+ spin_lock(&memcg_idr_lock);
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+ spin_unlock(&memcg_idr_lock);
return 0;
offline_kmem:
diff --git a/mm/slub.c b/mm/slub.c
index 4927edec6a..849e8972e2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4655,6 +4655,9 @@ static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
if (!df.slab)
continue;
+ if (kfence_free(df.freelist))
+ continue;
+
do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
_RET_IP_);
} while (likely(size));