From f6c03c1c5fe8633aa2b622fff539153fb2ca0849 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 3 Jul 2009 08:44:05 -0500 Subject: [PATCH] mm: quicklist: Convert to percpu locked commit 1578a2b7d1300f4e27cea087e6cdce9b8fbbcb4a in tip. Use per cpu locked for quicklists as well to make the code preemptible. [ tglx: folded Ingo's "release before free page fix" ] Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Paul Gortmaker --- include/linux/quicklist.h | 27 ++++++++++++++++++--------- mm/quicklist.c | 15 ++++++--------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h index bd46643..1bc3d46 100644 --- a/include/linux/quicklist.h +++ b/include/linux/quicklist.h @@ -18,7 +18,7 @@ struct quicklist { int nr_pages; }; -DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; +DECLARE_PER_CPU_LOCKED(struct quicklist, quicklist)[CONFIG_NR_QUICK]; /* * The two key functions quicklist_alloc and quicklist_free are inline so @@ -30,19 +30,27 @@ DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; * The fast patch in quicklist_alloc touched only a per cpu cacheline and * the first cacheline of the page itself. There is minmal overhead involved. */ -static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +static inline void *__quicklist_alloc(struct quicklist *q) { - struct quicklist *q; - void **p = NULL; + void **p = q->page; - q =&get_cpu_var(quicklist)[nr]; - p = q->page; if (likely(p)) { q->page = p[0]; p[0] = NULL; q->nr_pages--; } - put_cpu_var(quicklist); + return p; +} + +static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *)) +{ + struct quicklist *q; + void **p; + int cpu; + + q = &get_cpu_var_locked(quicklist, &cpu)[nr]; + p = __quicklist_alloc(q); + put_cpu_var_locked(quicklist, cpu); if (likely(p)) return p; @@ -56,12 +64,13 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, struct page *page) { struct quicklist *q; + int cpu; - q = &get_cpu_var(quicklist)[nr]; + q = &get_cpu_var_locked(quicklist, &cpu)[nr]; *(void **)p = q->page; q->page = p; q->nr_pages++; - put_cpu_var(quicklist); + put_cpu_var_locked(quicklist, cpu); } static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp) diff --git a/mm/quicklist.c b/mm/quicklist.c index 2876349..1b1ee21 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c @@ -20,7 +20,7 @@ #include #include -DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist); +DEFINE_PER_CPU_LOCKED(struct quicklist [CONFIG_NR_QUICK], quicklist); #define FRACTION_OF_NODE_MEM 16 @@ -66,17 +66,14 @@ void quicklist_trim(int nr, void (*dtor)(void *), { long pages_to_free; struct quicklist *q; + int cpu; - q = &get_cpu_var(quicklist)[nr]; + q = &get_cpu_var_locked(quicklist, &cpu)[nr]; if (q->nr_pages > min_pages) { pages_to_free = min_pages_to_free(q, min_pages, max_free); while (pages_to_free > 0) { - /* - * We pass a gfp_t of 0 to quicklist_alloc here - * because we will never call into the page allocator. - */ - void *p = quicklist_alloc(nr, 0, NULL); + void *p = __quicklist_alloc(q); if (dtor) dtor(p); @@ -84,7 +81,7 @@ void quicklist_trim(int nr, void (*dtor)(void *), pages_to_free--; } } - put_cpu_var(quicklist); + put_cpu_var_locked(quicklist, cpu); } unsigned long quicklist_total_size(void) @@ -94,7 +91,7 @@ unsigned long quicklist_total_size(void) struct quicklist *ql, *q; for_each_online_cpu(cpu) { - ql = per_cpu(quicklist, cpu); + ql = per_cpu_var_locked(quicklist, cpu); for (q = ql; q < ql + CONFIG_NR_QUICK; q++) count += q->nr_pages; } -- 1.7.0.4