aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiaowei Xue <xuexiaowei1999@163.com>2023-09-16 23:33:49 +0800
committerXiaowei Xue <xuexiaowei1999@163.com>2023-09-25 19:22:46 +0800
commit1e3780778dd6321682fc66b1aa805ad9d53edd63 (patch)
tree7d6389fa4022d678308edbbb8a079b540cf9e527
parenta18e182376e191a1da9381e932dfb68cf369e6e7 (diff)
downloadopenEuler-kernel-1e3780778dd6321682fc66b1aa805ad9d53edd63.tar.gz
mm: kzerod: use kzerod alloc interface in anonymous page fault
openEuler inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I846IP CVE: NA ------------------------------- Use kzerod alloc interface instead of buddy alloc interface in anoymous page fault. Signed-off-by: Xiaowei Xue <xuexiaowei1999@163.com>
-rw-r--r--include/linux/highmem.h6
-rw-r--r--mm/huge_memory.c26
2 files changed, 29 insertions, 3 deletions
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index c3b75b4a8fc1e4..6e8c222b152ab1 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -274,6 +274,12 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifdef CONFIG_KZEROD
+ struct page *prezerod_page = alloc_prezeroed_page(0, smp_processor_id());
+
+ if (prezerod_page)
+ return prezerod_page;
+#endif
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d720c4323bd4d6..5b37e84e097b8e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -201,7 +201,13 @@ static ssize_t enabled_store(struct kobject *kobj,
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
} else
ret = -EINVAL;
-
+#ifdef CONFIG_KZEROD
+ if (sysfs_streq(buf, "always") || sysfs_streq(buf, "madvise")) {
+ kzerod_enable_order(HPAGE_PMD_ORDER);
+ } else if (sysfs_streq(buf, "never")) {
+ drain_zerod_page(HPAGE_PMD_ORDER);
+ }
+#endif
if (ret > 0) {
int err = start_stop_khugepaged();
if (err)
@@ -592,7 +598,7 @@ out:
EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
- struct page *page, gfp_t gfp)
+ struct page *page, gfp_t gfp, bool prezeroed)
{
struct vm_area_struct *vma = vmf->vma;
pgtable_t pgtable;
@@ -615,7 +621,12 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
goto release;
}
+#ifdef CONFIG_KZEROD
+ if (!prezeroed)
+ clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
+#else
clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
+#endif
/*
* The memory barrier inside __SetPageUptodate makes sure that
* clear_huge_page writes become visible before the set_pmd_at()
@@ -728,6 +739,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
gfp_t gfp;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ bool prezeroed = false;
if (!transhuge_vma_suitable(vma, haddr))
return VM_FAULT_FALLBACK;
@@ -774,13 +786,21 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return ret;
}
gfp = alloc_hugepage_direct_gfpmask(vma);
+#ifdef CONFIG_KZEROD
+ page = alloc_prezeroed_page(HPAGE_PMD_ORDER, smp_processor_id());
+ if (page)
+ prezeroed = true;
+ else
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+#else
page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+#endif
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
}
prep_transhuge_page(page);
- return __do_huge_pmd_anonymous_page(vmf, page, gfp);
+ return __do_huge_pmd_anonymous_page(vmf, page, gfp, prezeroed);
}
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,