aboutsummaryrefslogtreecommitdiffstats
path: root/patches/next/mm-damon-paddr-implement-damon_folio_mkold.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/next/mm-damon-paddr-implement-damon_folio_mkold.patch')
-rw-r--r--patches/next/mm-damon-paddr-implement-damon_folio_mkold.patch82
1 files changed, 82 insertions, 0 deletions
diff --git a/patches/next/mm-damon-paddr-implement-damon_folio_mkold.patch b/patches/next/mm-damon-paddr-implement-damon_folio_mkold.patch
new file mode 100644
index 0000000..0512926
--- /dev/null
+++ b/patches/next/mm-damon-paddr-implement-damon_folio_mkold.patch
@@ -0,0 +1,82 @@
+From a687a04aadb8ec0e160da4419ad88071d876f353 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Fri, 8 Mar 2024 17:54:17 -0800
+Subject: [PATCH] mm/damon/paddr: implement damon_folio_mkold()
+
+damon_pa_mkold() receives a physical address, finds the folio covering
+the address, and makes the folio as old. Split the internal logic for
+checking access to the given folio, for future reuse of the logic.
+Also, change the name of the rmap walker from __damon_pa_mkold() to
+damon_folio_mkold_one() for more consistent naming.
+
+Signed-off-by: SeongJae Park <sj@kernel.org>
+---
+ mm/damon/paddr.c | 27 ++++++++++++++++-----------
+ 1 file changed, 16 insertions(+), 11 deletions(-)
+
+diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
+index 25c3ba2a9eaf..310b803c6277 100644
+--- a/mm/damon/paddr.c
++++ b/mm/damon/paddr.c
+@@ -16,8 +16,8 @@
+ #include "../internal.h"
+ #include "ops-common.h"
+
+-static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
+- unsigned long addr, void *arg)
++static bool damon_folio_mkold_one(struct folio *folio,
++ struct vm_area_struct *vma, unsigned long addr, void *arg)
+ {
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
+
+@@ -31,33 +31,38 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
+ return true;
+ }
+
+-static void damon_pa_mkold(unsigned long paddr)
++static void damon_folio_mkold(struct folio *folio)
+ {
+- struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+ struct rmap_walk_control rwc = {
+- .rmap_one = __damon_pa_mkold,
++ .rmap_one = damon_folio_mkold_one,
+ .anon_lock = folio_lock_anon_vma_read,
+ };
+ bool need_lock;
+
+- if (!folio)
+- return;
+-
+ if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
+ folio_set_idle(folio);
+- goto out;
++ return;
+ }
+
+ need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
+ if (need_lock && !folio_trylock(folio))
+- goto out;
++ return;
+
+ rmap_walk(folio, &rwc);
+
+ if (need_lock)
+ folio_unlock(folio);
+
+-out:
++}
++
++static void damon_pa_mkold(unsigned long paddr)
++{
++ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
++
++ if (!folio)
++ return;
++
++ damon_folio_mkold(folio);
+ folio_put(folio);
+ }
+
+--
+2.39.2
+