diff options
author | Hugh Dickins <hugh@veritas.com> | 2004-08-23 21:24:46 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-08-23 21:24:46 -0700 |
commit | 6992904141794e1d5f6ef2e9b770ba491550a237 (patch) | |
tree | 07fd2e4dec00ab9ae701a6c540621f9e7d8e73e6 /mm | |
parent | 9d9ae43b26ac90bab0213b8bb1245ac9ef966a97 (diff) | |
download | history-6992904141794e1d5f6ef2e9b770ba491550a237.tar.gz |
[PATCH] rmaplock: swapoff use anon_vma
Swapoff can make good use of a page's anon_vma and index, while it's still
left in swapcache, or once it's brought back in and the first pte mapped back:
unuse_vma go directly to just one page of only those vmas with the same
anon_vma. And unuse_process can skip any vmas without an anon_vma (extending
the hugetlb check: hugetlb vmas have no anon_vma).
This just hacks in on top of the existing procedure, still going through all
the vmas of all the mms in mmlist. A more elegant procedure might replace
mmlist by a list of anon_vmas: but that would be more work to implement, with
apparently more overhead in the common paths.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 20 | ||||
-rw-r--r-- | mm/swapfile.c | 23 |
2 files changed, 36 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index 9709b89a39b1fb..19c159bf3c9831 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -231,6 +231,24 @@ vma_address(struct page *page, struct vm_area_struct *vma) } /* + * At what user virtual address is page expected in vma? checking that the + * page matches the vma: currently only used by unuse_process, on anon pages. + */ +unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) +{ + if (PageAnon(page)) { + if ((void *)vma->anon_vma != + (void *)page->mapping - PAGE_MAPPING_ANON) + return -EFAULT; + } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { + if (vma->vm_file->f_mapping != page->mapping) + return -EFAULT; + } else + return -EFAULT; + return vma_address(page, vma); +} + +/* * Subfunctions of page_referenced: page_referenced_one called * repeatedly from either page_referenced_anon or page_referenced_file. */ @@ -459,6 +477,8 @@ void page_remove_rmap(struct page *page) * which increments mapcount after us but sets mapping * before us: so leave the reset to free_hot_cold_page, * and remember that it's only reliable while mapped. + * Leaving it set also helps swapoff to reinstate ptes + * faster for those pages still in swapcache. */ if (page_test_and_clear_dirty(page)) set_page_dirty(page); diff --git a/mm/swapfile.c b/mm/swapfile.c index a523ca237636d4..b7ecb1bb201496 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -520,14 +520,24 @@ static unsigned long unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, } /* vma->vm_mm->page_table_lock is held */ -static unsigned long unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir, +static unsigned long unuse_vma(struct vm_area_struct * vma, swp_entry_t entry, struct page *page) { - unsigned long start = vma->vm_start, end = vma->vm_end; + pgd_t *pgdir; + unsigned long start, end; unsigned long foundaddr; - if (start >= end) - BUG(); + if (page->mapping) { + start = page_address_in_vma(page, vma); + if (start == -EFAULT) + return 0; + else + end = start + PAGE_SIZE; + } else { + start = vma->vm_start; + end = vma->vm_end; + } + pgdir = pgd_offset(vma->vm_mm, start); do { foundaddr = unuse_pgd(vma, pgdir, start, end - start, entry, page); @@ -559,9 +569,8 @@ static int unuse_process(struct mm_struct * mm, } spin_lock(&mm->page_table_lock); for (vma = mm->mmap; vma; vma = vma->vm_next) { - if (!is_vm_hugetlb_page(vma)) { - pgd_t * pgd = pgd_offset(mm, vma->vm_start); - foundaddr = unuse_vma(vma, pgd, entry, page); + if (vma->anon_vma) { + foundaddr = unuse_vma(vma, entry, page); if (foundaddr) break; } |