aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/swap.h3
-rw-r--r--mm/mempolicy.c27
-rw-r--r--mm/vmscan.c17
3 files changed, 32 insertions, 15 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index eb591eaad1b746..389d1c382e208c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -178,7 +178,8 @@ extern int vm_swappiness;
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l);
-extern int migrate_pages(struct list_head *l, struct list_head *t);
+extern int migrate_pages(struct list_head *l, struct list_head *t,
+ struct list_head *moved, struct list_head *failed);
#endif
#ifdef CONFIG_MMU
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 20d5ad39fa4110..30bdafba52d8aa 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -429,6 +429,19 @@ static int contextualize_policy(int mode, nodemask_t *nodes)
return mpol_check_policy(mode, nodes);
}
+static int swap_pages(struct list_head *pagelist)
+{
+ LIST_HEAD(moved);
+ LIST_HEAD(failed);
+ int n;
+
+ n = migrate_pages(pagelist, NULL, &moved, &failed);
+ putback_lru_pages(&failed);
+ putback_lru_pages(&moved);
+
+ return n;
+}
+
long do_mbind(unsigned long start, unsigned long len,
unsigned long mode, nodemask_t *nmask, unsigned long flags)
{
@@ -481,10 +494,13 @@ long do_mbind(unsigned long start, unsigned long len,
(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ? &pagelist : NULL);
err = PTR_ERR(vma);
if (!IS_ERR(vma)) {
+ int nr_failed = 0;
+
err = mbind_range(vma, start, end, new);
if (!list_empty(&pagelist))
- migrate_pages(&pagelist, NULL);
- if (!err && !list_empty(&pagelist) && (flags & MPOL_MF_STRICT))
+ nr_failed = swap_pages(&pagelist);
+
+ if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
}
if (!list_empty(&pagelist))
@@ -635,11 +651,12 @@ int do_migrate_pages(struct mm_struct *mm,
down_read(&mm->mmap_sem);
check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+
if (!list_empty(&pagelist)) {
- migrate_pages(&pagelist, NULL);
- if (!list_empty(&pagelist))
- count = putback_lru_pages(&pagelist);
+ count = swap_pages(&pagelist);
+ putback_lru_pages(&pagelist);
}
+
up_read(&mm->mmap_sem);
return count;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 73ba4046ed272e..5eecb514ccea00 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -670,10 +670,10 @@ retry:
* list. The direct migration patchset
* extends this function to avoid the use of swap.
*/
-int migrate_pages(struct list_head *l, struct list_head *t)
+int migrate_pages(struct list_head *from, struct list_head *to,
+ struct list_head *moved, struct list_head *failed)
{
int retry;
- LIST_HEAD(failed);
int nr_failed = 0;
int pass = 0;
struct page *page;
@@ -686,12 +686,12 @@ int migrate_pages(struct list_head *l, struct list_head *t)
redo:
retry = 0;
- list_for_each_entry_safe(page, page2, l, lru) {
+ list_for_each_entry_safe(page, page2, from, lru) {
cond_resched();
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
- move_to_lru(page);
+ list_move(&page->lru, moved);
continue;
}
/*
@@ -722,7 +722,7 @@ redo:
if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page, GFP_KERNEL)) {
unlock_page(page);
- list_move(&page->lru, &failed);
+ list_move(&page->lru, failed);
nr_failed++;
continue;
}
@@ -732,8 +732,10 @@ redo:
* Page is properly locked and writeback is complete.
* Try to migrate the page.
*/
- if (!swap_page(page))
+ if (!swap_page(page)) {
+ list_move(&page->lru, moved);
continue;
+ }
retry_later:
retry++;
}
@@ -743,9 +745,6 @@ retry_later:
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
- if (!list_empty(&failed))
- list_splice(&failed, l);
-
return nr_failed + retry;
}