Rework this function so that we only make the indirect call to the page-freeing function on the final put_page(), rather than on every invokation. i386/mm/hugetlbpage.c | 4 +++- ia64/mm/hugetlbpage.c | 3 +++ sparc64/mm/hugetlbpage.c | 2 ++ x86_64/mm/hugetlbpage.c | 2 ++ linux/mm.h | 12 ++++++++---- 5 files changed, 18 insertions(+), 5 deletions(-) diff -puN include/linux/mm.h~put_page-speedup include/linux/mm.h --- 25/include/linux/mm.h~put_page-speedup 2003-02-11 17:48:27.000000000 -0800 +++ 25-akpm/include/linux/mm.h 2003-02-11 17:48:27.000000000 -0800 @@ -232,11 +232,15 @@ static inline void get_page(struct page static inline void put_page(struct page *page) { if (PageCompound(page)) { - page = (struct page *)page->lru.next; - if (page->lru.prev) { /* destructor? */ - (*(void (*)(struct page *))page->lru.prev)(page); - return; + if (put_page_testzero(page)) { + page = (struct page *)page->lru.next; + if (page->lru.prev) { /* destructor? */ + (*(void (*)(struct page *))page->lru.prev)(page); + } else { + __page_cache_release(page); + } } + return; } if (!PageReserved(page) && put_page_testzero(page)) __page_cache_release(page); diff -puN arch/i386/mm/hugetlbpage.c~put_page-speedup arch/i386/mm/hugetlbpage.c --- 25/arch/i386/mm/hugetlbpage.c~put_page-speedup 2003-02-11 17:48:27.000000000 -0800 +++ 25-akpm/arch/i386/mm/hugetlbpage.c 2003-02-11 17:48:27.000000000 -0800 @@ -29,6 +29,8 @@ static long htlbzone_pages; static LIST_HEAD(htlbpage_freelist); static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; +void free_huge_page(struct page *page); + static struct page *alloc_hugetlb_page(void) { int i; @@ -45,7 +47,7 @@ static struct page *alloc_hugetlb_page(v htlbpagemem--; spin_unlock(&htlbpage_lock); set_page_count(page, 1); - page->lru.prev = (void *)huge_page_release; + page->lru.prev = (void *)free_huge_page; for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) clear_highpage(&page[i]); return page; diff -puN arch/ia64/mm/hugetlbpage.c~put_page-speedup arch/ia64/mm/hugetlbpage.c --- 25/arch/ia64/mm/hugetlbpage.c~put_page-speedup 2003-02-11 17:48:27.000000000 -0800 +++ 25-akpm/arch/ia64/mm/hugetlbpage.c 2003-02-11 17:48:27.000000000 -0800 @@ -26,6 +26,8 @@ static long htlbzone_pages; static LIST_HEAD(htlbpage_freelist); static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; +void free_huge_page(struct page *page); + static struct page *alloc_hugetlb_page(void) { int i; @@ -42,6 +44,7 @@ static struct page *alloc_hugetlb_page(v htlbpagemem--; spin_unlock(&htlbpage_lock); set_page_count(page, 1); + page->lru.prev = (void *)free_huge_page; for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) clear_highpage(&page[i]); return page; diff -puN arch/sparc64/mm/hugetlbpage.c~put_page-speedup arch/sparc64/mm/hugetlbpage.c --- 25/arch/sparc64/mm/hugetlbpage.c~put_page-speedup 2003-02-11 17:48:27.000000000 -0800 +++ 25-akpm/arch/sparc64/mm/hugetlbpage.c 2003-02-11 17:48:27.000000000 -0800 @@ -25,6 +25,7 @@ spinlock_t htlbpage_lock = SPIN_LOCK_UNL extern long htlbpagemem; static void zap_hugetlb_resources(struct vm_area_struct *); +void free_huge_page(struct page *page); #define MAX_ID 32 struct htlbpagekey { @@ -64,6 +65,7 @@ static struct page *alloc_hugetlb_page(v spin_unlock(&htlbpage_lock); set_page_count(page, 1); + page->lru.prev = (void *)free_huge_page; memset(page_address(page), 0, HPAGE_SIZE); return page; diff -puN arch/x86_64/mm/hugetlbpage.c~put_page-speedup arch/x86_64/mm/hugetlbpage.c --- 25/arch/x86_64/mm/hugetlbpage.c~put_page-speedup 2003-02-11 17:48:27.000000000 -0800 +++ 25-akpm/arch/x86_64/mm/hugetlbpage.c 2003-02-11 17:48:27.000000000 -0800 @@ -27,6 +27,7 @@ static long htlbzone_pages; static LIST_HEAD(htlbpage_freelist); static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; +void free_huge_page(struct page *page); static struct page *alloc_hugetlb_page(void) { @@ -44,6 +45,7 @@ static struct page *alloc_hugetlb_page(v htlbpagemem--; spin_unlock(&htlbpage_lock); set_page_count(page, 1); + page->lru.prev = (void *)free_huge_page; for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) clear_highpage(&page[i]); return page; _