--- x/include/linux/gfp.h.~1~ 2003-08-31 02:38:26.000000000 +0200 +++ x/include/linux/gfp.h 2004-04-02 02:49:21.241968968 +0200 @@ -32,6 +32,7 @@ #define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */ +#define __GFP_NO_COMP 0x4000 /* Return non compound pages if order > 0 */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) --- x/include/linux/mm.h.~1~ 2004-04-01 18:32:55.000000000 +0200 +++ x/include/linux/mm.h 2004-04-02 03:39:40.884913464 +0200 @@ -445,8 +445,6 @@ struct page { extern void FASTCALL(__page_cache_release(struct page *)); -#ifdef CONFIG_HUGETLB_PAGE - static inline int page_count(struct page *p) { if (PageCompound(p)) @@ -478,23 +476,6 @@ static inline void put_page(struct page __page_cache_release(page); } -#else /* CONFIG_HUGETLB_PAGE */ - -#define page_count(p) atomic_read(&(p)->count) - -static inline void get_page(struct page *page) -{ - atomic_inc(&page->count); -} - -static inline void put_page(struct page *page) -{ - if (!PageReserved(page) && put_page_testzero(page)) - __page_cache_release(page); -} - -#endif /* CONFIG_HUGETLB_PAGE */ - /* * Multiple processes may "see" the same page. E.g. for untouched * mappings of /dev/null, all processes see the same page full of --- x/kernel/power/pmdisk.c.~1~ 2004-03-11 08:27:47.000000000 +0100 +++ x/kernel/power/pmdisk.c 2004-04-02 02:51:09.000000000 +0200 @@ -531,7 +531,7 @@ static void calc_order(void) static int alloc_pagedir(void) { calc_order(); - pagedir_save = (suspend_pagedir_t *)__get_free_pages(GFP_ATOMIC | __GFP_COLD, + pagedir_save = (suspend_pagedir_t *)__get_free_pages(GFP_ATOMIC | __GFP_COLD | __GFP_NO_COMP, pagedir_order); if(!pagedir_save) return -ENOMEM; --- x/kernel/power/swsusp.c.~1~ 2004-03-11 08:27:47.000000000 +0100 +++ x/kernel/power/swsusp.c 2004-04-02 03:03:03.327992896 +0200 @@ -442,7 +442,7 @@ static suspend_pagedir_t *create_suspend pagedir_order = get_bitmask_order(SUSPEND_PD_PAGES(nr_copy_pages)); - p = pagedir = (suspend_pagedir_t *)__get_free_pages(GFP_ATOMIC | __GFP_COLD, pagedir_order); + p = pagedir = (suspend_pagedir_t *)__get_free_pages(GFP_ATOMIC | __GFP_COLD | __GFP_NO_COMP, pagedir_order); if(!pagedir) return NULL; --- x/mm/page_alloc.c.~1~ 2004-04-01 18:32:54.000000000 +0200 +++ x/mm/page_alloc.c 2004-04-02 03:53:33.897276336 +0200 @@ -93,10 +93,6 @@ static void bad_page(const char *functio page->mapcount = 0; } -#ifndef CONFIG_HUGETLB_PAGE -#define prep_compound_page(page, order) do { } while (0) -#define destroy_compound_page(page, order) do { } while (0) -#else /* * Higher-order pages are called "compound pages". They are structured thusly: * @@ -147,7 +143,6 @@ static void destroy_compound_page(struct ClearPageCompound(p); } } -#endif /* CONFIG_HUGETLB_PAGE */ /* * Freeing function for a buddy system allocator. @@ -178,7 +173,7 @@ static inline void __free_pages_bulk (st { unsigned long page_idx, index; - if (order) + if (PageCompound(page)) destroy_compound_page(page, order); page_idx = page - base; if (page_idx & ~mask) @@ -306,47 +301,37 @@ expand(struct zone *zone, struct page *p return page; } -static inline void set_page_refs(struct page *page, int order) -{ -#ifdef CONFIG_MMU - set_page_count(page, 1); -#else - int i; - - /* - * We need to reference all the pages for this order, otherwise if - * anyone accesses one of the pages with (get/put) it will be freed. - */ - for (i = 0; i < (1 << order); i++) - set_page_count(page+i, 1); -#endif /* CONFIG_MMU */ -} - /* * This page is about to be returned from the page allocator */ -static void prep_new_page(struct page *page, int order) +static void prep_new_page(struct page * _page, int order) { - if (page->mapping || - page->mapcount || - (page->flags & ( - 1 << PG_private | - 1 << PG_locked | - 1 << PG_lru | - 1 << PG_active | - 1 << PG_dirty | - 1 << PG_reclaim | - 1 << PG_anon | - 1 << PG_maplock | - 1 << PG_swapcache | - 1 << PG_writeback ))) - bad_page(__FUNCTION__, page); + int i; + + for (i = 0; i < (1 << order); i++) { + struct page * page = _page + i; - page->flags &= ~(1 << PG_uptodate | 1 << PG_error | - 1 << PG_referenced | 1 << PG_arch_1 | - 1 << PG_checked | 1 << PG_mappedtodisk); - page->private = 0; - set_page_refs(page, order); + if (page->mapping || + page->mapcount || + (page->flags & ( + 1 << PG_private | + 1 << PG_locked | + 1 << PG_lru | + 1 << PG_active | + 1 << PG_dirty | + 1 << PG_reclaim | + 1 << PG_anon | + 1 << PG_maplock | + 1 << PG_swapcache | + 1 << PG_writeback ))) + bad_page(__FUNCTION__, page); + + page->flags &= ~(1 << PG_uptodate | 1 << PG_error | + 1 << PG_referenced | 1 << PG_arch_1 | + 1 << PG_checked | 1 << PG_mappedtodisk); + page->private = 0; + set_page_count(page, 1); + } } /* @@ -498,10 +483,11 @@ void fastcall free_cold_page(struct page * or two. */ -static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) +static struct page *buffered_rmqueue(struct zone *zone, int order, int cold_compound) { unsigned long flags; struct page *page = NULL; + int cold = !!(cold_compound & __GFP_COLD); if (order == 0) { struct per_cpu_pages *pcp; @@ -530,7 +516,7 @@ static struct page *buffered_rmqueue(str BUG_ON(bad_range(zone, page)); mod_page_state_zone(zone, pgalloc, 1 << order); prep_new_page(page, order); - if (order) + if (unlikely(order) && !(cold_compound & __GFP_NO_COMP)) prep_compound_page(page, order); } return page; @@ -570,7 +556,9 @@ __alloc_pages(unsigned int gfp_mask, uns cold = 0; if (gfp_mask & __GFP_COLD) - cold = 1; + cold = __GFP_COLD; + if (gfp_mask & __GFP_NO_COMP) + cold |= __GFP_NO_COMP; zones = zonelist->zones; /* the list of zones suitable for gfp_mask */ if (zones[0] == NULL) /* no zones in the zonelist */