aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill@shutemov.name>2020-01-03 04:02:53 +0000
committerKirill A. Shutemov <kirill.shutemov@linux.intel.com>2020-01-03 15:11:26 +0300
commitb007b820e755271d73db12313db3bc6f9ce440b5 (patch)
tree6c7f01fd09b3888c72268bda1e4cbbbbb512d322
parentfd6988496e79a6a4bdb514a4655d2920209eb85d (diff)
downloadlinux-next.tar.gz
mm/page_alloc: skip non present sections on zone initializationnext
memmap_init_zone() can be called on the ranges with holes during the boot. It will skip any non-valid PFNs one-by-one. It works fine as long as holes are not too big. But huge holes in the memory map causes a problem. It takes over 20 seconds to walk 32TiB hole. x86-64 with 5-level paging allows for much larger holes in the memory map which would practically hang the system. Deferred struct page init doesn't help here. It only works on the present ranges. Skipping non-present sections would fix the issue. Link: http://lkml.kernel.org/r/20191230093828.24613-1-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Baoquan He <bhe@redhat.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: "Jin, Zhi" <zhi.jin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/page_alloc.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4785a8a2040eec..bcdc78c10374a5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5873,6 +5873,30 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
return false;
}
+#ifdef CONFIG_SPARSEMEM
+/* Skip PFNs that belong to non-present sections */
+static inline __meminit unsigned long next_pfn(unsigned long pfn)
+{
+ unsigned long section_nr;
+
+ section_nr = pfn_to_section_nr(++pfn);
+ if (present_section_nr(section_nr))
+ return pfn;
+
+ while (++section_nr <= __highest_present_section_nr) {
+ if (present_section_nr(section_nr))
+ return section_nr_to_pfn(section_nr);
+ }
+
+ return -1;
+}
+#else
+static inline __meminit unsigned long next_pfn(unsigned long pfn)
+{
+ return pfn++;
+}
+#endif
+
/*
* Initially all pages are reserved - free ones are freed
* up by memblock_free_all() once the early boot process is
@@ -5912,8 +5936,10 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* function. They do not exist on hotplugged memory.
*/
if (context == MEMMAP_EARLY) {
- if (!early_pfn_valid(pfn))
+ if (!early_pfn_valid(pfn)) {
+ pfn = next_pfn(pfn) - 1;
continue;
+ }
if (!early_pfn_in_nid(pfn, nid))
continue;
if (overlap_memmap_init(zone, &pfn))