aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoropeneuler-ci-bot <george@openeuler.sh>2024-04-15 01:49:30 +0000
committerGitee <noreply@gitee.com>2024-04-15 01:49:30 +0000
commit6c747090cba90fad92e4dd12d5686e7eacf0a1e8 (patch)
tree93c8fb1bef1686ffb58c29c73ee343b790ff059e
parenta6453bac7707a3bfb1dba29ea9af98b1d08df2cd (diff)
parent9991b30a69e65d4911268ee0e160d3c31a2a3abc (diff)
downloadopenEuler-kernel-6c747090cba90fad92e4dd12d5686e7eacf0a1e8.tar.gz
!6044 [sync] PR-5995: mm/swap: fix race when skipping swapcache
Merge Pull Request from: @openeuler-sync-bot Origin pull request: https://gitee.com/openeuler/kernel/pulls/5995 PR sync from: Jinjiang Tu <tujinjiang@huawei.com> https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/OOA6SREAEMWJD733ZA2BL6Z6ZCZRJWXH/ https://gitee.com/src-openeuler/kernel/issues/I9E2M2 Link:https://gitee.com/openeuler/kernel/pulls/6044 Reviewed-by: Jialin Zhang <zhangjialin11@huawei.com> Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com>
-rw-r--r--include/linux/swap.h5
-rw-r--r--mm/memory.c20
-rw-r--r--mm/swapfile.c13
3 files changed, 38 insertions, 0 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7f49964f27d2d7..2c49020d7f115a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -474,6 +474,7 @@ extern void __delete_from_swap_cache(struct page *page,
extern void delete_from_swap_cache(struct page *);
extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t entry,
@@ -625,6 +626,10 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0;
}
+static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+}
+
static inline struct page *lookup_swap_cache(swp_entry_t swp,
struct vm_area_struct *vma,
unsigned long addr)
diff --git a/mm/memory.c b/mm/memory.c
index 5893c178251a03..841541835fc330 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3385,6 +3385,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL, *swapcache;
struct swap_info_struct *si = NULL;
+ bool need_clear_cache = false;
swp_entry_t entry;
pte_t pte;
int locked;
@@ -3442,6 +3443,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!page) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
+ /*
+ * Prevent parallel swapin from proceeding with
+ * the cache flag. Otherwise, another thread may
+ * finish swapin first, free the entry, and swapout
+ * reusing the same entry. It's undetectable as
+ * pte_same() returns true due to entry reuse.
+ */
+ if (swapcache_prepare(entry)) {
+ /* Relax a bit to prevent rapid repeated page faults */
+ schedule_timeout_uninterruptible(1);
+ goto out;
+ }
+ need_clear_cache = true;
+
/* skip swapcache */
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
vmf->address);
@@ -3611,6 +3626,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
unlock:
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
+ /* Clear the swap cache pin for direct swapin after PTL unlock */
+ if (need_clear_cache)
+ swapcache_clear(si, entry);
if (si)
put_swap_device(si);
return ret;
@@ -3624,6 +3642,8 @@ out_release:
unlock_page(swapcache);
put_page(swapcache);
}
+ if (need_clear_cache)
+ swapcache_clear(si, entry);
if (si)
put_swap_device(si);
return ret;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b0824a6fe21e33..1a2d348ce0572c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3592,6 +3592,19 @@ int swapcache_prepare(swp_entry_t entry)
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
+void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
+{
+ struct swap_cluster_info *ci;
+ unsigned long offset = swp_offset(entry);
+ unsigned char usage;
+
+ ci = lock_cluster_or_swap_info(si, offset);
+ usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
+ unlock_cluster_or_swap_info(si, ci);
+ if (!usage)
+ free_swap_slot(entry);
+}
+
struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
return swap_type_to_swap_info(swp_type(entry));