diff options
author | Keith Busch <kbusch@kernel.org> | 2019-08-07 16:27:19 -0600 |
---|---|---|
committer | Keith Busch <kbusch@kernel.org> | 2019-08-15 14:21:55 -0600 |
commit | 6616afe9a722f6ebedbb27ade3848cf07b9a3af7 (patch) | |
tree | d5fedc16a159339c1dc38387046a0ec76b0f48d8 | |
parent | db13e01743ad8f8ef91a629fbefe4a53e0c020f0 (diff) | |
download | linux-lru-promote.tar.gz |
mm: add new lru for page promotionlru-promote
An active refereced page marked access indicates a hot page. If this
page is referenced on a node with a promotion target, move this page
from the active lru to a new promotion candidate lru. When this node is
scanned later, pages in this lru will attempt to promote to their higher
performing target.
Signed-off-by: Keith Busch <keith.busch@intel.com>
-rw-r--r-- | include/linux/mm_inline.h | 9 | ||||
-rw-r--r-- | include/linux/mmzone.h | 18 | ||||
-rw-r--r-- | include/linux/page-flags.h | 9 | ||||
-rw-r--r-- | include/linux/swap.h | 3 | ||||
-rw-r--r-- | include/linux/vm_event_item.h | 2 | ||||
-rw-r--r-- | include/trace/events/mmflags.h | 10 | ||||
-rw-r--r-- | include/trace/events/pagemap.h | 23 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 36 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 76 | ||||
-rw-r--r-- | mm/vmscan.c | 86 | ||||
-rw-r--r-- | mm/vmstat.c | 5 |
12 files changed, 263 insertions, 16 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 6f2fef7b0784e0..f19635c1bb48b6 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -96,7 +96,10 @@ static __always_inline enum lru_list page_off_lru(struct page *page) lru = LRU_UNEVICTABLE; } else { lru = page_lru_base_type(page); - if (PageActive(page)) { + if (PagePromotable(page)) { + ClearPagePromotable(page); + lru += LRU_PROMOTE; + } else if (PageActive(page)) { __ClearPageActive(page); lru += LRU_ACTIVE; } @@ -119,7 +122,9 @@ static __always_inline enum lru_list page_lru(struct page *page) lru = LRU_UNEVICTABLE; else { lru = page_lru_base_type(page); - if (PageActive(page)) + if (PagePromotable(page)) + lru += LRU_PROMOTE; + else if (PageActive(page)) lru += LRU_ACTIVE; } return lru; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d77d717c620cbe..3e14fd3daa801e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -193,8 +193,10 @@ enum zone_stat_item { NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, NR_ZONE_ACTIVE_ANON, + NR_ZONE_PROMOTE_ANON, NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, + NR_ZONE_PROMOTE_FILE, NR_ZONE_UNEVICTABLE, NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ @@ -212,8 +214,10 @@ enum node_stat_item { NR_LRU_BASE, NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ NR_ACTIVE_ANON, /* " " " " " */ + NR_PROMOTE_ANON, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ + NR_PROMOTE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, @@ -255,24 +259,27 @@ enum node_stat_item { */ #define LRU_BASE 0 #define LRU_ACTIVE 1 -#define LRU_FILE 2 +#define LRU_PROMOTE 2 +#define LRU_FILE 3 enum lru_list { LRU_INACTIVE_ANON = LRU_BASE, LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, + LRU_PROMOTE_ANON = LRU_BASE + LRU_PROMOTE, LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, + LRU_PROMOTE_FILE = LRU_BASE + LRU_FILE + LRU_PROMOTE, LRU_UNEVICTABLE, NR_LRU_LISTS }; #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) -#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) +#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_PROMOTE_FILE; lru++) static inline int is_file_lru(enum lru_list lru) { - return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); + return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE || lru == LRU_PROMOTE_FILE); } static inline int is_active_lru(enum lru_list lru) @@ -280,6 +287,11 @@ static inline int is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } +static inline int is_promote_lru(enum lru_list lru) +{ + return (lru == LRU_PROMOTE_ANON || lru == LRU_PROMOTE_FILE); +} + struct zone_reclaim_stat { /* * The pageout code in vmscan.c keeps track of how many of the diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f91cb8898ff0af..56d877e388169e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -131,6 +131,9 @@ enum pageflags { PG_young, PG_idle, #endif +#ifdef CONFIG_HMEM_REPORTING + PG_promotable, +#endif __NR_PAGEFLAGS, /* Filesystems */ @@ -431,6 +434,12 @@ TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif +#ifdef CONFIG_HMEM_REPORTING +PAGEFLAG(Promotable, promotable, PF_ANY) +#else +PAGEFLAG_FALSE(Promotable) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; diff --git a/include/linux/swap.h b/include/linux/swap.h index 2d34f048f7556a..138f2fa67a13fc 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -699,7 +699,8 @@ static inline bool reclaim_anon_pages(struct mem_cgroup *memcg, } /* Also age anon pages if we can auto-migrate them */ - if (next_demotion_node(node_id) >= 0) + if (next_demotion_node(node_id) >= 0 || + next_promotion_node(node_id) >= 0) return true; /* No way to reclaim anon pages */ diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 47a3441cf4c4a4..237ca42a06e769 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -26,7 +26,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), FOR_ALL_ZONES(ALLOCSTALL), FOR_ALL_ZONES(PGSCAN_SKIP), - PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE, + PGFREE, PGACTIVATE, PGDEACTIVATE, PGPROMOTABLE, PGLAZYFREE, PGFAULT, PGMAJFAULT, PGLAZYFREED, PGREFILL, diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index a1675d43777e8f..b2d54bd62c8788 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -79,6 +79,12 @@ #define IF_HAVE_PG_IDLE(flag,string) #endif +#ifdef CONFIG_HMEM_REPORTING +#define IF_HAVE_PG_PROMOTABLE(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_PROMOTABLE(flag,string) +#endif + #define __def_pageflag_names \ {1UL << PG_locked, "locked" }, \ {1UL << PG_waiters, "waiters" }, \ @@ -105,7 +111,9 @@ IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ -IF_HAVE_PG_IDLE(PG_idle, "idle" ) +IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ +IF_HAVE_PG_PROMOTABLE(PG_promotable, "promotable" ) + #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/pagemap.h b/include/trace/events/pagemap.h index 8fd1babae761b9..05953995e87d13 100644 --- a/include/trace/events/pagemap.h +++ b/include/trace/events/pagemap.h @@ -82,6 +82,29 @@ TRACE_EVENT(mm_lru_activate, ); +TRACE_EVENT(mm_lru_promotable, + + TP_PROTO(struct page *page), + + TP_ARGS(page), + + TP_STRUCT__entry( + __field(struct page *, page ) + __field(unsigned long, pfn ) + ), + + TP_fast_assign( + __entry->page = page; + __entry->pfn = page_to_pfn(page); + ), + + /* Flag format is based on page-types.c formatting for pagemap */ + TP_printk("page=%p pfn=%lu nid=%d target=%d", __entry->page, __entry->pfn, + page_to_nid(__entry->page), + next_promotion_node(page_to_nid(__entry->page))) + +); + #endif /* _TRACE_PAGEMAP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index a5ab2973e8dc3b..cf63f99700a934 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -419,6 +419,42 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active, show_reclaim_flags(__entry->reclaim_flags)) ); +TRACE_EVENT(mm_vmscan_lru_shrink_promotable, + + TP_PROTO(int nid, unsigned long nr_taken, + unsigned long nr_active, unsigned long nr_referenced, + unsigned long nr_promoted, int priority, int file), + + TP_ARGS(nid, nr_taken, nr_active, nr_referenced, nr_promoted, priority, file), + + TP_STRUCT__entry( + __field(int, nid) + __field(unsigned long, nr_taken) + __field(unsigned long, nr_active) + __field(unsigned long, nr_referenced) + __field(unsigned long, nr_promoted) + __field(int, priority) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->nr_taken = nr_taken; + __entry->nr_active = nr_active; + __entry->nr_referenced = nr_referenced; + __entry->nr_promoted = nr_promoted; + __entry->priority = priority; + __entry->reclaim_flags = trace_reclaim_flags(file); + ), + + TP_printk("nid=%d nr_taken=%ld nr_actived=%ld nr_referenced=%ld nr_promoted=%ld priority=%d flags=%s", + __entry->nid, + __entry->nr_taken, + __entry->nr_active, __entry->nr_referenced, __entry->nr_promoted, + __entry->priority, + show_reclaim_flags(__entry->reclaim_flags)) +); + TRACE_EVENT(mm_vmscan_inactive_list_is_low, TP_PROTO(int nid, int reclaim_idx, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cdbb7a84cb6e18..8f2d2c378db632 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -96,8 +96,10 @@ static bool do_memsw_account(void) static const char *const mem_cgroup_lru_names[] = { "inactive_anon", "active_anon", + "promotable_anon", "inactive_file", "active_file", + "promotable_file", "unevictable", }; diff --git a/mm/swap.c b/mm/swap.c index ae300397dfdac9..d0386e1166af97 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -50,6 +50,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); +static DEFINE_PER_CPU(struct pagevec, promotable_page_pvecs); #endif /* @@ -274,7 +275,7 @@ static void update_page_reclaim_stat(struct lruvec *lruvec, static void __activate_page(struct page *page, struct lruvec *lruvec, void *arg) { - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page) && !PagePromotable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); @@ -289,6 +290,29 @@ static void __activate_page(struct page *page, struct lruvec *lruvec, } } +static void __promotable_page(struct page *page, struct lruvec *lruvec, + void *arg) +{ + if (next_promotion_node(page_to_nid(page)) < 0) + return; + + if (PageLRU(page) && PageActive(page) && !PagePromotable(page) && + !PageUnevictable(page)) { + struct zone *zone = page_zone(page); + int file = page_is_file_cache(page); + int lru = page_lru_base_type(page); + + del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); + SetPagePromotable(page); + add_page_to_lru_list(page, lruvec, lru + LRU_PROMOTE); + trace_mm_lru_promotable(page); + + __count_vm_event(PGPROMOTABLE); + update_page_reclaim_stat(lruvec, file, 1); + wakeup_kswapd(zone, 0, 0, zone_idx(zone)); + } +} + #ifdef CONFIG_SMP static void activate_page_drain(int cpu) { @@ -316,11 +340,43 @@ void activate_page(struct page *page) } } +static void promotable_page_drain(int cpu) +{ + struct pagevec *pvec = &per_cpu(promotable_page_pvecs, cpu); + + if (pagevec_count(pvec)) + pagevec_lru_move_fn(pvec, __promotable_page, NULL); +} + +static bool need_promotable_page_drain(int cpu) +{ + return pagevec_count(&per_cpu(promotable_page_pvecs, cpu)) != 0; +} + +static void promotable_page(struct page *page) +{ + struct pagevec *pvec; + + page = compound_head(page); + if (PageUnevictable(page) || PageHuge(page) || !PageLRU(page) || + next_promotion_node(page_to_nid(page)) < 0) + return; + + pvec = &get_cpu_var(promotable_page_pvecs); + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, __promotable_page, NULL); + put_cpu_var(promotable_page_pvecs); +} #else static inline void activate_page_drain(int cpu) { } +static inline void promotable_page_drain(int cpu) +{ +} + void activate_page(struct page *page) { pg_data_t *pgdat = page_pgdat(page); @@ -330,6 +386,16 @@ void activate_page(struct page *page) __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); spin_unlock_irq(&pgdat->lru_lock); } + +void promotable_page(struct page *page) +{ + pg_data_t *pgdat = page_pgdat(page); + + page = compound_head(page); + spin_lock_irq(&pgdat->lru_lock); + __promotable_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); + spin_unlock_irq(&pgdat->lru_lock); +} #endif static void __lru_cache_activate_page(struct page *page) @@ -365,6 +431,7 @@ static void __lru_cache_activate_page(struct page *page) * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced + * active,referenced -> promotable * * When a newly allocated page is not yet visible, so safe for non-atomic ops, * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). @@ -390,6 +457,9 @@ void mark_page_accessed(struct page *page) workingset_activation(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); + } else if (PageActive(page) && PageReferenced(page)) { + if (!PagePromotable(page) && PageLRU(page)) + promotable_page(page); } if (page_is_idle(page)) clear_page_idle(page); @@ -595,6 +665,7 @@ void lru_add_drain_cpu(int cpu) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); activate_page_drain(cpu); + promotable_page_drain(cpu); } /** @@ -688,7 +759,8 @@ void lru_add_drain_all(void) pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || - need_activate_page_drain(cpu)) { + need_activate_page_drain(cpu) || + need_promotable_page_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); queue_work_on(cpu, mm_percpu_wq, work); cpumask_set_cpu(cpu, &has_work); diff --git a/mm/vmscan.c b/mm/vmscan.c index bff6d88d21ec47..0b818b60c4de9b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2072,6 +2072,75 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, return nr_reclaimed; } +static unsigned int shrink_promote_list(unsigned long nr_to_scan, + struct lruvec *lruvec, + struct scan_control *sc, + enum lru_list lru) +{ + int rc, file = is_file_lru(lru); + unsigned long nr_reclaimed = 0, nr_scanned, nr_taken, nr_activate; + LIST_HEAD(l_hold); + LIST_HEAD(l_free); + LIST_HEAD(l_active); + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + + lru_add_drain(); + spin_lock_irq(&pgdat->lru_lock); + nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, + &nr_scanned, sc, lru); + spin_unlock_irq(&pgdat->lru_lock); + + if (list_empty(&l_hold)) + return 0; + + while (!list_empty(&l_hold)) { + struct page *page; + + cond_resched(); + page = lru_to_page(&l_hold); + list_del(&page->lru); + + if (!trylock_page(page)) { + putback_lru_page(page); + continue; + } + + ClearPagePromotable(page); + rc = migrate_promote_mapping(page); + if (rc == -ENOMEM && PageTransHuge(page) && + !split_huge_page_to_list(page, &l_hold)) + rc = migrate_promote_mapping(page); + + if (rc == MIGRATEPAGE_SUCCESS) { + unlock_page(page); + if (likely(put_page_testzero(page))) + list_add(&page->lru, &l_free); + nr_reclaimed++; + } else { + SetPageActive(page); + count_memcg_page_event(page, PGACTIVATE); + unlock_page(page); + list_add(&page->lru, &l_active); + } + } + + mem_cgroup_uncharge_list(&l_free); + try_to_unmap_flush(); + free_unref_page_list(&l_free); + + spin_lock_irq(&pgdat->lru_lock); + nr_activate = move_pages_to_lru(lruvec, &l_active); + spin_unlock_irq(&pgdat->lru_lock); + + mem_cgroup_uncharge_list(&l_active); + free_unref_page_list(&l_active); + + trace_mm_vmscan_lru_shrink_promotable(pgdat->node_id, nr_taken, nr_activate, + nr_scanned, nr_reclaimed, sc->priority, file); + + return nr_reclaimed; +} + static void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, @@ -2256,6 +2325,8 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, if (inactive_list_is_low(lruvec, is_file_lru(lru), sc, true)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; + } else if (is_promote_lru(lru)) { + return shrink_promote_list(nr_to_scan, lruvec, sc, lru); } return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); @@ -2274,8 +2345,8 @@ enum scan_balance { * by looking at the fraction of the pages scanned we did rotate back * onto the active list instead of evict. * - * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan - * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan + * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan; nr[2] = anon promotable pages to scan + * nr[3] = file inactive pages to scan; nr[4] = file active pages to scan; nr[5] = file promotable pages to scan */ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, struct scan_control *sc, unsigned long *nr) @@ -2398,8 +2469,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg, */ anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) + + lruvec_lru_size(lruvec, LRU_PROMOTE_ANON, MAX_NR_ZONES) + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES); file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) + + lruvec_lru_size(lruvec, LRU_PROMOTE_FILE, MAX_NR_ZONES) + lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES); spin_lock_irq(&pgdat->lru_lock); @@ -2440,7 +2513,7 @@ out: * If the cgroup's already been deleted, make sure to * scrape out the remaining cache. */ - if (!scan && !mem_cgroup_online(memcg)) + if (!scan && (!mem_cgroup_online(memcg) || is_promote_lru(lru))) scan = min(size, SWAP_CLUSTER_MAX); switch (scan_balance) { @@ -2510,7 +2583,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc sc->priority == DEF_PRIORITY); blk_start_plug(&plug); - while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || + while (nr[LRU_INACTIVE_ANON] || nr[LRU_PROMOTE_ANON] || + nr[LRU_ACTIVE_FILE] || nr[LRU_PROMOTE_FILE] || nr[LRU_INACTIVE_FILE]) { unsigned long nr_anon, nr_file, percentage; unsigned long nr_scanned; @@ -2537,8 +2611,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc * stop reclaiming one LRU and reduce the amount scanning * proportional to the original scan target. */ - nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; - nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; + nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE] + nr[LRU_PROMOTE_FILE]; + nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON] + nr[LRU_PROMOTE_ANON]; /* * It's just vindictive to attack the larger once the smaller diff --git a/mm/vmstat.c b/mm/vmstat.c index fd7e16ca6996cc..b7c6a49480b997 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1111,8 +1111,10 @@ const char * const vmstat_text[] = { "nr_free_pages", "nr_zone_inactive_anon", "nr_zone_active_anon", + "nr_zone_promote_anon", "nr_zone_inactive_file", "nr_zone_active_file", + "nr_zone_promote_file", "nr_zone_unevictable", "nr_zone_write_pending", "nr_mlock", @@ -1137,8 +1139,10 @@ const char * const vmstat_text[] = { /* Node-based counters */ "nr_inactive_anon", "nr_active_anon", + "nr_promote_anon", "nr_inactive_file", "nr_active_file", + "nr_promote_file", "nr_unevictable", "nr_slab_reclaimable", "nr_slab_unreclaimable", @@ -1184,6 +1188,7 @@ const char * const vmstat_text[] = { "pgfree", "pgactivate", "pgdeactivate", + "pgpromotable", "pglazyfree", "pgfault", |