aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMartin J. Bligh <mbligh@aracnet.com>2004-06-26 20:55:07 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-26 20:55:07 -0700
commit12d9986b3c8acda935906514c593ea806e1736bd (patch)
treedc5abbfd0e984458e25ca80231cefd5c5dc204c5 /mm
parentdd47df980c02eb33833b2690b033c34fba2fa80d (diff)
downloadhistory-12d9986b3c8acda935906514c593ea806e1736bd.tar.gz
[PATCH] make __free_pages_bulk more comprehensible
I find __free_pages_bulk very hard to understand ... (I was trying to mod it for the non MAX_ORDER aligned zones, and cleaned it up first). This should make it much more comprehensible to mortal man ... I benchmarked the changes on the big 16x and it's no slower (actually it's about 0.5% faster, but that's within experimental error). I moved the creation of mask into __free_pages_bulk from the caller - it seems to really belong inside there. Then instead of doing wierd limbo dances with mask, I made it use order instead where it's more intuitive. Personally I find this makes the whole thing a damned sight easier to understand. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c25
1 files changed, 11 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fe55c14bd4dc98..9e1885109665e5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -179,20 +179,20 @@ static void destroy_compound_page(struct page *page, unsigned long order)
*/
static inline void __free_pages_bulk (struct page *page, struct page *base,
- struct zone *zone, struct free_area *area, unsigned long mask,
- unsigned int order)
+ struct zone *zone, struct free_area *area, unsigned int order)
{
- unsigned long page_idx, index;
+ unsigned long page_idx, index, mask;
if (order)
destroy_compound_page(page, order);
+ mask = (~0UL) << order;
page_idx = page - base;
if (page_idx & ~mask)
BUG();
index = page_idx >> (1 + order);
- zone->free_pages -= mask;
- while (mask + (1 << (MAX_ORDER-1))) {
+ zone->free_pages += 1 << order;
+ while (order < MAX_ORDER-1) {
struct page *buddy1, *buddy2;
BUG_ON(area >= zone->free_area + MAX_ORDER);
@@ -201,17 +201,15 @@ static inline void __free_pages_bulk (struct page *page, struct page *base,
* the buddy page is still allocated.
*/
break;
- /*
- * Move the buddy up one level.
- * This code is taking advantage of the identity:
- * -mask = 1+~mask
- */
- buddy1 = base + (page_idx ^ -mask);
+
+ /* Move the buddy up one level. */
+ buddy1 = base + (page_idx ^ (1 << order));
buddy2 = base + page_idx;
BUG_ON(bad_range(zone, buddy1));
BUG_ON(bad_range(zone, buddy2));
list_del(&buddy1->lru);
mask <<= 1;
+ order++;
area++;
index >>= 1;
page_idx &= mask;
@@ -255,12 +253,11 @@ static int
free_pages_bulk(struct zone *zone, int count,
struct list_head *list, unsigned int order)
{
- unsigned long mask, flags;
+ unsigned long flags;
struct free_area *area;
struct page *base, *page = NULL;
int ret = 0;
- mask = (~0UL) << order;
base = zone->zone_mem_map;
area = zone->free_area + order;
spin_lock_irqsave(&zone->lock, flags);
@@ -270,7 +267,7 @@ free_pages_bulk(struct zone *zone, int count,
page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->lru);
- __free_pages_bulk(page, base, zone, area, mask, order);
+ __free_pages_bulk(page, base, zone, area, order);
ret++;
}
spin_unlock_irqrestore(&zone->lock, flags);