aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:05:14 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:05:14 -0700
commite8a2ef16fd2fbcf65d423a137290f94bd7f2bb02 (patch)
treef16dde1bf152f3c8feaa3e9055848b9d316d9475 /mm
parent6633401dda1a4994174344c80685cd89c7d1e83c (diff)
downloadhistory-e8a2ef16fd2fbcf65d423a137290f94bd7f2bb02.tar.gz
[PATCH] numa api: Add policy support to anonymous memory
From: Andi Kleen <ak@suse.de> Change to core VM to use alloc_page_vma() instead of alloc_page(). Change the swap readahead to follow the policy of the VMA.
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c43
-rw-r--r--mm/swap_state.c5
-rw-r--r--mm/swapfile.c2
3 files changed, 40 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b36b907bcaa040..71330a82b9b901 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1071,7 +1071,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
page_cache_get(old_page);
spin_unlock(&mm->page_table_lock);
- new_page = alloc_page(GFP_HIGHUSER);
+ new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!new_page)
goto no_new_page;
copy_cow_page(old_page,new_page,address);
@@ -1237,9 +1237,17 @@ EXPORT_SYMBOL(vmtruncate);
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
*/
-void swapin_readahead(swp_entry_t entry)
+void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
{
+#ifdef CONFIG_NUMA
+ struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
+#endif
int i, num;
struct page *new_page;
unsigned long offset;
@@ -1251,10 +1259,31 @@ void swapin_readahead(swp_entry_t entry)
for (i = 0; i < num; offset++, i++) {
/* Ok, do the async read-ahead now */
new_page = read_swap_cache_async(swp_entry(swp_type(entry),
- offset));
+ offset), vma, addr);
if (!new_page)
break;
page_cache_release(new_page);
+#ifdef CONFIG_NUMA
+ /*
+ * Find the next applicable VMA for the NUMA policy.
+ */
+ addr += PAGE_SIZE;
+ if (addr == 0)
+ vma = NULL;
+ if (vma) {
+ if (addr >= vma->vm_end) {
+ vma = next_vma;
+ next_vma = vma ? vma->vm_next : NULL;
+ }
+ if (vma && addr < vma->vm_start)
+ vma = NULL;
+ } else {
+ if (next_vma && addr >= next_vma->vm_start) {
+ vma = next_vma;
+ next_vma = vma->vm_next;
+ }
+ }
+#endif
}
lru_add_drain(); /* Push any new pages onto the LRU now */
}
@@ -1276,8 +1305,8 @@ static int do_swap_page(struct mm_struct * mm,
spin_unlock(&mm->page_table_lock);
page = lookup_swap_cache(entry);
if (!page) {
- swapin_readahead(entry);
- page = read_swap_cache_async(entry);
+ swapin_readahead(entry, address, vma);
+ page = read_swap_cache_async(entry, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte while
@@ -1372,7 +1401,7 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap(page_table);
spin_unlock(&mm->page_table_lock);
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!page)
goto no_mem;
clear_user_highpage(page, addr);
@@ -1454,7 +1483,7 @@ retry:
* Should we do an early C-O-W break?
*/
if (write_access && !(vma->vm_flags & VM_SHARED)) {
- struct page * page = alloc_page(GFP_HIGHUSER);
+ struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, address);
if (!page)
goto oom;
copy_user_highpage(page, new_page, address);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 600ab2ef090b3b..6dd0825561fc3b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -327,7 +327,8 @@ struct page * lookup_swap_cache(swp_entry_t entry)
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
-struct page * read_swap_cache_async(swp_entry_t entry)
+struct page *read_swap_cache_async(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
int err;
@@ -351,7 +352,7 @@ struct page * read_swap_cache_async(swp_entry_t entry)
* Get a new page to read into from swap.
*/
if (!new_page) {
- new_page = alloc_page(GFP_HIGHUSER);
+ new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!new_page)
break; /* Out of memory */
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index efbbcfe6b61907..e5f7e8829aa8ad 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -657,7 +657,7 @@ static int try_to_unuse(unsigned int type)
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
- page = read_swap_cache_async(entry);
+ page = read_swap_cache_async(entry, NULL, 0);
if (!page) {
/*
* Either swap_duplicate() failed because entry