From 4568b6013fcbff5a3aa35396a1bac03b1e022f9c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 3 Jul 2009 08:44:37 -0500 Subject: [PATCH] mm: page_alloc reduce lock sections further commit 46167aec68f48cbbeff23cae9173bc4d19a7bcda in tip. Split out the pages which are to be freed into a separate list and call free_pages_bulk() outside of the percpu page allocator locks. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- include/linux/list.h | 3 +++ mm/page_alloc.c | 44 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/include/linux/list.h b/include/linux/list.h index 8392884..703cd07 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -359,6 +359,9 @@ static inline void list_splice_tail_init(struct list_head *list, #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) +#define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + /** * list_for_each - iterate over a list * @pos: the &struct list_head to use as a loop cursor. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d870c91..fac0711 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -599,8 +599,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, { int migratetype = 0; int batch_free = 0; + unsigned long flags; - spin_lock(&zone->lock); + spin_lock_irqsave(&zone->lock, flags); zone->all_unreclaimable = 0; zone->pages_scanned = 0; @@ -630,21 +631,26 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, zone, 0, page_private(page)); trace_mm_page_pcpu_drain(page, 0, page_private(page)); +#ifdef CONFIG_PREEMPT_RT + cond_resched_lock(&zone->lock); +#endif } while (--count && --batch_free && !list_empty(list)); } - spin_unlock(&zone->lock); + spin_unlock_irqrestore(&zone->lock, flags); } static void free_one_page(struct zone *zone, struct page *page, int order, int migratetype) { - spin_lock(&zone->lock); + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); zone->all_unreclaimable = 0; zone->pages_scanned = 0; __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); __free_one_page(page, zone, order, migratetype); - spin_unlock(&zone->lock); + spin_unlock_irqrestore(&zone->lock, flags); } static void __free_pages_ok(struct page *page, unsigned int order) @@ -1039,6 +1045,16 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, return i; } +static void +isolate_pcp_pages(int count, struct list_head *src, struct list_head *dst) +{ + while (count--) { + struct page *page = list_last_entry(src, struct page, lru); + list_move(&page->lru, dst); + } +} + + #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this @@ -1050,6 +1066,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { + LIST_HEAD(free_list); unsigned long flags; int to_drain; int this_cpu; @@ -1059,9 +1076,10 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) to_drain = pcp->batch; else to_drain = pcp->count; - free_pcppages_bulk(zone, to_drain, pcp); + isolate_pcp_pages(to_drain, &pcp->list, &free_list); pcp->count -= to_drain; unlock_cpu_pcp(flags, this_cpu); + free_pages_bulk(zone, to_drain, &free_list, 0); } #endif @@ -1080,6 +1098,8 @@ static void drain_pages(unsigned int cpu) for_each_populated_zone(zone) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(free_list); + int count; __lock_cpu_pcp(&flags, cpu); pset = per_cpu_ptr(zone->pageset, cpu); @@ -1090,9 +1110,11 @@ static void drain_pages(unsigned int cpu) continue; } pcp = &pset->pcp; - free_pcppages_bulk(zone, pcp->count, pcp); + isolate_pcp_pages(pcp->count, &pcp->list, &free_list); + count = pcp->count; pcp->count = 0; unlock_cpu_pcp(flags, cpu); + free_pages_bulk(zone, count, &free_list, 0); } } @@ -1199,7 +1221,7 @@ void free_hot_cold_page(struct page *page, int cold) struct per_cpu_pages *pcp; unsigned long flags; int migratetype; - int this_cpu, wasMlocked = __TestClearPageMlocked(page); + int count, this_cpu, wasMlocked = __TestClearPageMlocked(page); trace_mm_page_free_direct(page, 0); kmemcheck_free_shadow(page, 0); @@ -1245,8 +1267,14 @@ void free_hot_cold_page(struct page *page, int cold) list_add(&page->lru, &pcp->lists[migratetype]); pcp->count++; if (pcp->count >= pcp->high) { - free_pcppages_bulk(zone, pcp->batch, pcp); + LIST_HEAD(free_list); + + isolate_pcp_pages(pcp->batch, &pcp->list, &free_list); pcp->count -= pcp->batch; + count = pcp->batch; + put_zone_pcp(zone, flags, this_cpu); + free_pages_bulk(zone, count, &free_list, 0); + return; } out: -- 1.7.0.4