aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-07-03 08:29:37 -0500
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-07-11 17:28:09 +0200
commit2480ff2f48a7c49ac0bc4c6170c29b2f466337db (patch)
tree531dda2ce2b1035aad3b9f42b50a0331cac922aa
parentd9b49fdaa4fd0079572e5aac64f4ad3b87b7d8fa (diff)
downloadlinux-rt-devel-2480ff2f48a7c49ac0bc4c6170c29b2f466337db.tar.gz
mm: page_alloc: rt-friendly per-cpu pages
rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Contains fixes from: Peter Zijlstra <a.p.zijlstra@chello.nl> Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--mm/page_alloc.c62
1 files changed, 43 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0519f51bf6d5d7..c6161af2c7e0cd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -62,6 +62,7 @@
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
#include <linux/sched/mm.h>
+#include <linux/locallock.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
@@ -311,6 +312,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
+static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define cpu_lock_irqsave(cpu, flags) \
+ local_lock_irqsave_on(pa_lock, flags, cpu)
+# define cpu_unlock_irqrestore(cpu, flags) \
+ local_unlock_irqrestore_on(pa_lock, flags, cpu)
+#else
+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
+#endif
+
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1389,10 +1402,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
void __free_pages_core(struct page *page, unsigned int order)
@@ -2748,13 +2761,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
int to_drain, batch;
LIST_HEAD(dst);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch);
if (to_drain > 0)
isolate_pcp_pages(to_drain, pcp, &dst);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
@@ -2776,7 +2789,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
LIST_HEAD(dst);
int count;
- local_irq_save(flags);
+ cpu_lock_irqsave(cpu, flags);
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
@@ -2784,7 +2797,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
if (count)
isolate_pcp_pages(count, pcp, &dst);
- local_irq_restore(flags);
+ cpu_unlock_irqrestore(cpu, flags);
if (count)
free_pcppages_bulk(zone, &dst, false);
@@ -2822,6 +2835,7 @@ void drain_local_pages(struct zone *zone)
drain_pages(cpu);
}
+#ifndef CONFIG_PREEMPT_RT_BASE
static void drain_local_pages_wq(struct work_struct *work)
{
struct pcpu_drain *drain;
@@ -2839,6 +2853,7 @@ static void drain_local_pages_wq(struct work_struct *work)
drain_local_pages(drain->zone);
preempt_enable();
}
+#endif
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
@@ -2906,6 +2921,14 @@ void drain_all_pages(struct zone *zone)
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
+#ifdef CONFIG_PREEMPT_RT_BASE
+ for_each_cpu(cpu, &cpus_with_pcps) {
+ if (zone)
+ drain_pages_zone(cpu, zone);
+ else
+ drain_pages(cpu);
+ }
+#else
for_each_cpu(cpu, &cpus_with_pcps) {
struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
@@ -2915,6 +2938,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
+#endif
mutex_unlock(&pcpu_drain_mutex);
}
@@ -3034,9 +3058,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
free_unref_page_commit(page, pfn, &dst);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
@@ -3063,7 +3087,7 @@ void free_unref_page_list(struct list_head *list)
set_page_private(page, pfn);
}
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
@@ -3078,12 +3102,12 @@ void free_unref_page_list(struct list_head *list)
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
batch_count = 0;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
}
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
@@ -3233,7 +3257,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct page *page;
unsigned long flags;
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
@@ -3241,7 +3265,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
return page;
}
@@ -3268,7 +3292,7 @@ struct page *rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
do {
page = NULL;
@@ -3288,7 +3312,7 @@ struct page *rmqueue(struct zone *preferred_zone,
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
out:
/* Separate test+clear to avoid unnecessary atomics */
@@ -3301,7 +3325,7 @@ out:
return page;
failed:
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
return NULL;
}
@@ -8490,7 +8514,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
@@ -8499,7 +8523,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
#ifdef CONFIG_MEMORY_HOTREMOVE