summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:58:55 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:58:55 -0700
commit377ebc796ddb13251ce75a7491487a4966c3908a (patch)
treece1f419e8206ffdc40bb3459b570feee929d9127
parent528947c2aae7602cc9187aa69c016b0c44f4fe40 (diff)
download25-new-377ebc796ddb13251ce75a7491487a4966c3908a.tar.gz
foo
-rw-r--r--patches/mm-slub-avoid-recursive-loop-with-kmemleak.patch61
1 files changed, 61 insertions, 0 deletions
diff --git a/patches/mm-slub-avoid-recursive-loop-with-kmemleak.patch b/patches/mm-slub-avoid-recursive-loop-with-kmemleak.patch
new file mode 100644
index 000000000..18bc0f168
--- /dev/null
+++ b/patches/mm-slub-avoid-recursive-loop-with-kmemleak.patch
@@ -0,0 +1,61 @@
+From: Kees Cook <keescook@chromium.org>
+Subject: mm/slub: avoid recursive loop with kmemleak
+Date: Thu, 25 Apr 2024 13:55:23 -0700
+
+The system will immediate fill up stack and crash when both
+CONFIG_DEBUG_KMEMLEAK and CONFIG_MEM_ALLOC_PROFILING are enabled. Avoid
+allocation tagging of kmemleak caches, otherwise recursive allocation
+tracking occurs.
+
+Link: https://lkml.kernel.org/r/20240425205516.work.220-kees@kernel.org
+Fixes: 279bb991b4d9 ("mm/slab: add allocation accounting into slab allocation and free paths")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: Pekka Enberg <penberg@kernel.org>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+
+ mm/kmemleak.c | 4 ++--
+ mm/slub.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/kmemleak.c~mm-slub-avoid-recursive-loop-with-kmemleak
++++ a/mm/kmemleak.c
+@@ -463,7 +463,7 @@ static struct kmemleak_object *mem_pool_
+
+ /* try the slab allocator first */
+ if (object_cache) {
+- object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
++ object = kmem_cache_alloc_noprof(object_cache, gfp_kmemleak_mask(gfp));
+ if (object)
+ return object;
+ }
+@@ -947,7 +947,7 @@ static void add_scan_area(unsigned long
+ untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
+
+ if (scan_area_cache)
+- area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
++ area = kmem_cache_alloc_noprof(scan_area_cache, gfp_kmemleak_mask(gfp));
+
+ raw_spin_lock_irqsave(&object->lock, flags);
+ if (!area) {
+--- a/mm/slub.c~mm-slub-avoid-recursive-loop-with-kmemleak
++++ a/mm/slub.c
+@@ -2018,7 +2018,7 @@ prepare_slab_obj_exts_hook(struct kmem_c
+ if (!p)
+ return NULL;
+
+- if (s->flags & SLAB_NO_OBJ_EXT)
++ if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
+ return NULL;
+
+ if (flags & __GFP_NO_OBJ_EXT)
+_