diff -urN linux/include/linux/mmzone.h linux-wli/include/linux/mmzone.h --- linux/include/linux/mmzone.h Sat Mar 16 02:26:52 2002 +++ linux-wli/include/linux/mmzone.h Tue Mar 19 00:24:44 2002 @@ -25,6 +25,7 @@ } free_area_t; struct pglist_data; +struct pte_chain; /* * On machines where it is needed (eg PCs) we divide physical memory @@ -54,6 +55,9 @@ struct list_head inactive_dirty_list; struct list_head inactive_clean_list; free_area_t free_area[MAX_ORDER]; + + spinlock_t pte_chain_freelist_lock; + struct pte_chain * pte_chain_freelist; /* * wait_table -- the array holding the hash table diff -urN linux/mm/page_alloc.c linux-wli/mm/page_alloc.c --- linux/mm/page_alloc.c Sat Mar 16 02:26:58 2002 +++ linux-wli/mm/page_alloc.c Tue Mar 19 00:25:18 2002 @@ -953,10 +953,12 @@ zone->inactive_clean_pages = 0; zone->inactive_dirty_pages = 0; zone->need_balance = 0; + zone->pte_chain_freelist = NULL; INIT_LIST_HEAD(&zone->active_list); INIT_LIST_HEAD(&zone->inactive_dirty_list); INIT_LIST_HEAD(&zone->inactive_clean_list); spin_lock_init(&zone->lru_lock); + spin_lock_init(&zone->pte_chain_freelist_lock); if (!size) continue; diff -urN linux/mm/rmap.c linux-wli/mm/rmap.c --- linux/mm/rmap.c Tue Mar 19 00:57:03 2002 +++ linux-wli/mm/rmap.c Tue Mar 19 00:51:41 2002 @@ -49,10 +49,27 @@ pte_t * ptep; }; -static struct pte_chain * pte_chain_freelist; -static inline struct pte_chain * pte_chain_alloc(void); -static inline void pte_chain_free(struct pte_chain *, struct pte_chain *, struct page *); -static void alloc_new_pte_chains(void); +static inline struct pte_chain * pte_chain_alloc(zone_t *); +static inline void pte_chain_free(zone_t *, struct pte_chain *, struct pte_chain *, struct page *); +static void alloc_new_pte_chains(zone_t *); + +static inline void pte_chain_push(zone_t *zone, struct pte_chain *pte_chain) +{ + pte_chain->ptep = NULL; + pte_chain->next = zone->pte_chain_freelist; + zone->pte_chain_freelist = pte_chain; +} + +static inline struct pte_chain *pte_chain_pop(zone_t *zone) +{ + struct pte_chain *pte_chain; + + pte_chain = zone->pte_chain_freelist; + zone->pte_chain_freelist = pte_chain->next; + pte_chain->next = NULL; + + return pte_chain; +} /** * page_referenced - test if the page was referenced @@ -91,12 +108,15 @@ void FASTCALL(page_add_rmap(struct page *, pte_t *)); void page_add_rmap(struct page * page, pte_t * ptep) { + zone_t *zone; struct pte_chain * pte_chain; if (!VALID_PAGE(page) || PageReserved(page)) return; - lock_lru(page_zone(page)); + zone = page_zone(page); + + lock_lru(zone); #ifdef DEBUG_RMAP if (!page || !ptep) BUG(); @@ -112,14 +132,14 @@ } } #endif - pte_chain = pte_chain_alloc(); + pte_chain = pte_chain_alloc(zone); /* Hook up the pte_chain to the page. */ pte_chain->ptep = ptep; pte_chain->next = page->pte_chain; page->pte_chain = pte_chain; - unlock_lru(page_zone(page)); + unlock_lru(zone); } /** @@ -136,15 +156,18 @@ void page_remove_rmap(struct page * page, pte_t * ptep) { struct pte_chain * pc, * prev_pc = NULL; + zone_t *zone; BUG_ON(!page || !ptep); if (!VALID_PAGE(page) || PageReserved(page)) return; - lock_lru(page_zone(page)); + zone = page_zone(page); + + lock_lru(zone); for (pc = page->pte_chain; pc; prev_pc = pc, pc = pc->next) { if (pc->ptep == ptep) { - pte_chain_free(pc, prev_pc, page); + pte_chain_free(zone, pc, prev_pc, page); goto out; } } @@ -159,7 +182,7 @@ #endif out: - unlock_lru(page_zone(page)); + unlock_lru(zone); return; } @@ -264,7 +287,7 @@ switch (try_to_unmap_one(page, pc->ptep)) { case SWAP_SUCCESS: /* Free the pte_chain struct. */ - pte_chain_free(pc, prev_pc, page); + pte_chain_free(page_zone(page), pc, prev_pc, page); break; case SWAP_AGAIN: /* Skip this pte, remembering status. */ @@ -320,16 +343,19 @@ * called for new pte_chain structures which aren't on any list yet. * Caller needs to hold the lru lock. */ -static inline void pte_chain_free(struct pte_chain * pte_chain, struct pte_chain * prev_pte_chain, struct page * page) +static inline void pte_chain_free( zone_t * zone, + struct pte_chain * pte_chain, + struct pte_chain * prev_pte_chain, + struct page * page) { if (prev_pte_chain) prev_pte_chain->next = pte_chain->next; else if (page) page->pte_chain = pte_chain->next; - pte_chain->ptep = NULL; - pte_chain->next = pte_chain_freelist; - pte_chain_freelist = pte_chain; + spin_lock(&zone->pte_chain_freelist_lock); + pte_chain_push(zone, pte_chain); + spin_unlock(&zone->pte_chain_freelist_lock); } /** @@ -339,18 +365,18 @@ * pte_chain structures as required. * Caller needs to hold the lru locks. */ -static inline struct pte_chain * pte_chain_alloc(void) +static inline struct pte_chain * pte_chain_alloc(zone_t *zone) { struct pte_chain * pte_chain; /* Allocate new pte_chain structs as needed. */ - if (!pte_chain_freelist) - alloc_new_pte_chains(); + if (!zone->pte_chain_freelist) + alloc_new_pte_chains(zone); /* Grab the first pte_chain from the freelist. */ - pte_chain = pte_chain_freelist; - pte_chain_freelist = pte_chain->next; - pte_chain->next = NULL; + spin_lock(&zone->pte_chain_freelist_lock); + pte_chain = pte_chain_pop(zone); + spin_unlock(&zone->pte_chain_freelist_lock); return pte_chain; } @@ -365,16 +391,18 @@ * Note that we cannot use the slab cache because the pte_chain structure * is way smaller than the minimum size of a slab cache allocation. */ -static void alloc_new_pte_chains(void) +static void alloc_new_pte_chains(zone_t *zone) { struct pte_chain * pte_chain = (void *) get_zeroed_page(GFP_ATOMIC); int i = PAGE_SIZE / sizeof(struct pte_chain); + spin_lock(&zone->pte_chain_freelist_lock); if (pte_chain) { for (; i-- > 0; pte_chain++) - pte_chain_free(pte_chain, NULL, NULL); + pte_chain_push(zone, pte_chain); } else { /* Yeah yeah, I'll fix the pte_chain allocation ... */ panic("Fix pte_chain allocation, you lazy bastard!\n"); } + spin_unlock(&zone->pte_chain_freelist_lock); }