From Dave. Crappy name. fs/exec.c | 1 include/asm-i386/mman.h | 0 include/asm-ppc64/mman.h | 0 include/linux/mm.h | 1 include/linux/page-flags.h | 5 + mm/fremap.c | 6 + mm/memory.c | 8 + mm/mmap.c | 0 mm/page_alloc.c | 2 mm/rmap.c | 222 +++++++++++++++++++++++++++++++++++++++++++++ mm/swapfile.c | 1 11 files changed, 246 insertions(+) diff -puN fs/exec.c~objrmap-2.5.62-5 fs/exec.c --- 25/fs/exec.c~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/fs/exec.c 2003-03-13 02:31:54.000000000 -0800 @@ -316,6 +316,7 @@ void put_dirty_page(struct task_struct * lru_cache_add_active(page); flush_dcache_page(page); flush_page_to_ram(page); + SetPageAnon(page); set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, PAGE_COPY)))); pte_chain = page_add_rmap(page, pte, pte_chain); pte_unmap(pte); diff -puN include/asm-i386/mman.h~objrmap-2.5.62-5 include/asm-i386/mman.h diff -puN include/linux/mm.h~objrmap-2.5.62-5 include/linux/mm.h --- 25/include/linux/mm.h~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/include/linux/mm.h 2003-03-13 02:34:35.000000000 -0800 @@ -171,6 +171,7 @@ struct page { struct pte_chain *chain;/* Reverse pte mapping pointer. * protected by PG_chainlock */ pte_addr_t direct; + int mapcount; } pte; unsigned long private; /* mapping-private opaque data */ diff -puN include/linux/page-flags.h~objrmap-2.5.62-5 include/linux/page-flags.h --- 25/include/linux/page-flags.h~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/include/linux/page-flags.h 2003-03-13 02:31:54.000000000 -0800 @@ -74,6 +74,7 @@ #define PG_mappedtodisk 17 /* Has blocks allocated on-disk */ #define PG_reclaim 18 /* To be reclaimed asap */ #define PG_compound 19 /* Part of a compound page */ +#define PG_anon 20 /* Anonymous page */ /* * Global page accounting. One instance per CPU. Only unsigned longs are @@ -256,6 +257,10 @@ extern void get_full_page_state(struct p #define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) #define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) +#define PageAnon(page) test_bit(PG_anon, &(page)->flags) +#define SetPageAnon(page) set_bit(PG_anon, &(page)->flags) +#define ClearPageAnon(page) clear_bit(PG_anon, &(page)->flags) + /* * The PageSwapCache predicate doesn't use a PG_flag at this time, * but it may again do so one day. diff -puN mm/fremap.c~objrmap-2.5.62-5 mm/fremap.c --- 25/mm/fremap.c~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/mm/fremap.c 2003-03-13 02:35:34.000000000 -0800 @@ -57,6 +57,7 @@ int install_page(struct mm_struct *mm, s pgd_t *pgd; pmd_t *pmd; struct pte_chain *pte_chain; + unsigned long pgidx; pte_chain = pte_chain_alloc(GFP_KERNEL); if (!pte_chain) @@ -79,6 +80,11 @@ int install_page(struct mm_struct *mm, s flush_icache_page(vma, page); entry = mk_pte(page, prot); set_pte(pte, entry); + pgidx = (addr - vma->vm_start) >> PAGE_SHIFT; + pgidx += vma->vm_pgoff; + pgidx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; + if (page->index != pgidx) + SetPageAnon(page); pte_chain = page_add_rmap(page, pte, pte_chain); pte_unmap(pte); if (flush) diff -puN mm/memory.c~objrmap-2.5.62-5 mm/memory.c --- 25/mm/memory.c~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/mm/memory.c 2003-03-13 02:31:54.000000000 -0800 @@ -1017,6 +1017,7 @@ static int do_wp_page(struct mm_struct * ++mm->rss; page_remove_rmap(old_page, page_table); break_cow(vma, new_page, address, page_table); + SetPageAnon(new_page); pte_chain = page_add_rmap(new_page, page_table, pte_chain); lru_cache_add_active(new_page); @@ -1226,6 +1227,7 @@ static int do_swap_page(struct mm_struct flush_page_to_ram(page); flush_icache_page(vma, page); set_pte(page_table, pte); + SetPageAnon(page); pte_chain = page_add_rmap(page, page_table, pte_chain); /* No need to invalidate - it was non-present before */ @@ -1292,6 +1294,7 @@ do_anonymous_page(struct mm_struct *mm, entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); lru_cache_add_active(page); mark_page_accessed(page); + SetPageAnon(page); } set_pte(page_table, entry); @@ -1351,6 +1354,10 @@ do_no_page(struct mm_struct *mm, struct if (!pte_chain) goto oom; + /* See if nopage returned an anon page */ + if (!new_page->mapping || PageSwapCache(new_page)) + SetPageAnon(new_page); + /* * Should we do an early C-O-W break? */ @@ -1363,6 +1370,7 @@ do_no_page(struct mm_struct *mm, struct copy_user_highpage(page, new_page, address); page_cache_release(new_page); lru_cache_add_active(page); + SetPageAnon(page); new_page = page; } diff -puN mm/mmap.c~objrmap-2.5.62-5 mm/mmap.c diff -puN mm/page_alloc.c~objrmap-2.5.62-5 mm/page_alloc.c --- 25/mm/page_alloc.c~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/mm/page_alloc.c 2003-03-13 02:31:54.000000000 -0800 @@ -220,6 +220,8 @@ static inline void free_pages_check(cons bad_page(function, page); if (PageDirty(page)) ClearPageDirty(page); + if (PageAnon(page)) + ClearPageAnon(page); } /* diff -puN mm/rmap.c~objrmap-2.5.62-5 mm/rmap.c --- 25/mm/rmap.c~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/mm/rmap.c 2003-03-13 02:31:54.000000000 -0800 @@ -86,6 +86,87 @@ kmem_cache_t *pte_chain_cache; * If the page has a single-entry pte_chain, collapse that back to a PageDirect * representation. This way, it's only done under memory pressure. */ +static inline int +page_referenced_obj_one(struct vm_area_struct *vma, struct page *page) +{ + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + unsigned long loffset; + unsigned long address; + int referenced = 0; + + loffset = (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)); + if (loffset < vma->vm_pgoff) + goto out; + + address = vma->vm_start + ((loffset - vma->vm_pgoff) << PAGE_SHIFT); + + if (address >= vma->vm_end) + goto out; + + if (!spin_trylock(&mm->page_table_lock)) { + referenced = 1; + goto out; + } + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out_unlock; + + pmd = pmd_offset(pgd, address); + if (!pmd_present(*pmd)) + goto out_unlock; + + pte = pte_offset_map(pmd, address); + if (!pte_present(*pte)) + goto out_unmap; + + if (page_to_pfn(page) != pte_pfn(*pte)) + goto out_unmap; + + if (ptep_test_and_clear_young(pte)) + referenced++; +out_unmap: + pte_unmap(pte); + +out_unlock: + spin_unlock(&mm->page_table_lock); + +out: + return referenced; +} + +static int +page_referenced_obj(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct vm_area_struct *vma; + int referenced = 0; + + if (!page->pte.mapcount) + return 0; + + if (!mapping) + BUG(); + + if (PageSwapCache(page)) + BUG(); + + if (down_trylock(&mapping->i_shared_sem)) + return 1; + + list_for_each_entry(vma, &mapping->i_mmap, shared) + referenced += page_referenced_obj_one(vma, page); + + list_for_each_entry(vma, &mapping->i_mmap_shared, shared) + referenced += page_referenced_obj_one(vma, page); + + up(&mapping->i_shared_sem); + + return referenced; +} + int page_referenced(struct page * page) { struct pte_chain * pc; @@ -94,6 +175,10 @@ int page_referenced(struct page * page) if (TestClearPageReferenced(page)) referenced++; + if (!PageAnon(page)) { + referenced += page_referenced_obj(page); + goto out; + } if (PageDirect(page)) { pte_t *pte = rmap_ptep_map(page->pte.direct); if (ptep_test_and_clear_young(pte)) @@ -127,6 +212,7 @@ int page_referenced(struct page * page) __pte_chain_free(pc); } } +out: return referenced; } @@ -159,6 +245,18 @@ page_add_rmap(struct page *page, pte_t * pte_chain_lock(page); + if (!PageAnon(page)) { + if (!page->mapping) + BUG(); + if (PageSwapCache(page)) + BUG(); + if (!page->pte.mapcount) + inc_page_state(nr_mapped); + page->pte.mapcount++; + pte_chain_unlock(page); + return pte_chain; + } + #ifdef DEBUG_RMAP /* * This stuff needs help to get up to highmem speed. @@ -247,6 +345,20 @@ void page_remove_rmap(struct page * page pte_chain_lock(page); + if (!PageAnon(page)) { + if (!page->mapping) + BUG(); + if (PageSwapCache(page)) + BUG(); + if (!page->pte.mapcount) + BUG(); + page->pte.mapcount--; + if (!page->pte.mapcount) + dec_page_state(nr_mapped); + pte_chain_unlock(page); + return; + } + if (PageDirect(page)) { if (page->pte.direct == pte_paddr) { page->pte.direct = 0; @@ -310,6 +422,111 @@ out: return; } +static inline int +try_to_unmap_obj_one(struct vm_area_struct *vma, struct page *page) +{ + struct mm_struct *mm = vma->vm_mm; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + pte_t pteval; + unsigned long loffset; + unsigned long address; + int ret = SWAP_SUCCESS; + + loffset = (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)); + if (loffset < vma->vm_pgoff) + goto out; + + address = vma->vm_start + ((loffset - vma->vm_pgoff) << PAGE_SHIFT); + + if (address >= vma->vm_end) + goto out; + + if (!spin_trylock(&mm->page_table_lock)) { + ret = SWAP_AGAIN; + goto out; + } + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + goto out_unlock; + + pmd = pmd_offset(pgd, address); + if (!pmd_present(*pmd)) + goto out_unlock; + + pte = pte_offset_map(pmd, address); + if (!pte_present(*pte)) + goto out_unmap; + + if (page_to_pfn(page) != pte_pfn(*pte)) + goto out_unmap; + + if (vma->vm_flags & VM_LOCKED) { + ret = SWAP_FAIL; + goto out_unmap; + } + + flush_cache_page(vma, address); + pteval = ptep_get_and_clear(pte); + flush_tlb_page(vma, address); + + if (pte_dirty(pteval)) + set_page_dirty(page); + + if (!page->pte.mapcount) + BUG(); + + mm->rss--; + page->pte.mapcount--; + page_cache_release(page); + +out_unmap: + pte_unmap(pte); + +out_unlock: + spin_unlock(&mm->page_table_lock); + +out: + return ret; +} + +static int +try_to_unmap_obj(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct vm_area_struct *vma; + int ret = SWAP_SUCCESS; + + if (!mapping) + BUG(); + + if (PageSwapCache(page)) + BUG(); + + if (down_trylock(&mapping->i_shared_sem)) + return SWAP_AGAIN; + + list_for_each_entry(vma, &mapping->i_mmap, shared) { + ret = try_to_unmap_obj_one(vma, page); + if (ret != SWAP_SUCCESS) + goto out; + } + + list_for_each_entry(vma, &mapping->i_mmap_shared, shared) { + ret = try_to_unmap_obj_one(vma, page); + if (ret != SWAP_SUCCESS) + goto out; + } + + if (page->pte.mapcount) + BUG(); + +out: + up(&mapping->i_shared_sem); + return ret; +} + /** * try_to_unmap_one - worker function for try_to_unmap * @page: page to unmap @@ -431,6 +648,11 @@ int try_to_unmap(struct page * page) if (!page->mapping) BUG(); + if (!PageAnon(page)) { + ret = try_to_unmap_obj(page); + goto out; + } + if (PageDirect(page)) { ret = try_to_unmap_one(page, page->pte.direct); if (ret == SWAP_SUCCESS) { diff -puN mm/swapfile.c~objrmap-2.5.62-5 mm/swapfile.c --- 25/mm/swapfile.c~objrmap-2.5.62-5 2003-03-13 02:31:54.000000000 -0800 +++ 25-akpm/mm/swapfile.c 2003-03-13 02:31:54.000000000 -0800 @@ -392,6 +392,7 @@ unuse_pte(struct vm_area_struct *vma, un return; get_page(page); set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot))); + SetPageAnon(page); *pte_chainp = page_add_rmap(page, dir, *pte_chainp); swap_free(entry); ++vma->vm_mm->rss; diff -puN include/asm-ppc64/mman.h~objrmap-2.5.62-5 include/asm-ppc64/mman.h _