summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2024-04-25 21:00:26 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 21:00:26 -0700
commit08ccdbdda7c248410a07280041f5eadb68d4aa77 (patch)
tree0042e33d37bc2f3cb8595eab001abf01e042de5b
parentc2b1a4083caba04c67f945f022026c34ef9ad99c (diff)
download25-new-08ccdbdda7c248410a07280041f5eadb68d4aa77.tar.gz
foo
-rw-r--r--patches/mm-slab-move-memcg-charging-to-post-alloc-hook.patch23
1 files changed, 20 insertions, 3 deletions
diff --git a/patches/mm-slab-move-memcg-charging-to-post-alloc-hook.patch b/patches/mm-slab-move-memcg-charging-to-post-alloc-hook.patch
index 3d616df58..b9fd1b153 100644
--- a/patches/mm-slab-move-memcg-charging-to-post-alloc-hook.patch
+++ b/patches/mm-slab-move-memcg-charging-to-post-alloc-hook.patch
@@ -23,9 +23,14 @@ potentially allows to separate charging from allocation in cases where
it's common that the allocation would be immediately freed, and the memcg
handling overhead could be saved.
+[vbabka@suse.cz: fix call to memcg_alloc_abort_single()]
+ Link: https://lkml.kernel.org/r/4af50be2-4109-45e5-8a36-2136252a635e@suse.cz
+[roman.gushchin@linux.dev: comment fixup]
+ Link: https://lkml.kernel.org/r/Zg2LsNm6twOmG69l@P9FQF9L96D.corp.robot.car
Link: https://lkml.kernel.org/r/20240326-slab-memcg-v3-0-d85d2563287a@suse.cz
Link: https://lkml.kernel.org/r/20240326-slab-memcg-v3-1-d85d2563287a@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lore.kernel.org/all/CAHk-=whYOOdM7jWy5jdrAm8LxcgCMFyk2bt8fYYvZzM4U-zAQA@mail.gmail.com/
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
@@ -50,9 +55,21 @@ Cc: Aishwarya TCV <aishwarya.tcv@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
- mm/slub.c | 180 ++++++++++++++++++++++------------------------------
- 1 file changed, 77 insertions(+), 103 deletions(-)
+ mm/memcontrol.c | 2
+ mm/slub.c | 180 +++++++++++++++++++---------------------------
+ 2 files changed, 78 insertions(+), 104 deletions(-)
+--- a/mm/memcontrol.c~mm-slab-move-memcg-charging-to-post-alloc-hook
++++ a/mm/memcontrol.c
+@@ -350,7 +350,7 @@ static void memcg_reparent_objcgs(struct
+
+ /*
+ * A lot of the calls to the cache allocation functions are expected to be
+- * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
++ * inlined by the compiler. Since the calls to memcg_slab_post_alloc_hook() are
+ * conditional to this static branch, we'll have to allow modules that does
+ * kmem_cache_alloc and the such to see this symbol as well
+ */
--- a/mm/slub.c~mm-slab-move-memcg-charging-to-post-alloc-hook
+++ a/mm/slub.c
@@ -2092,23 +2092,36 @@ static inline size_t obj_full_size(struc
@@ -183,7 +200,7 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
- obj_cgroup_uncharge(objcg, obj_full_size(s));
- }
+ if (likely(size == 1)) {
-+ memcg_alloc_abort_single(s, p);
++ memcg_alloc_abort_single(s, *p);
+ *p = NULL;
+ } else {
+ kmem_cache_free_bulk(s, size, p);