From: Nick Piggin Use the new __GFP_NOMEMALLOC to simplify the previous handling of PF_MEMALLOC. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton --- drivers/md/dm-crypt.c | 19 +++++-------------- mm/swap_state.c | 27 ++++++++------------------- 2 files changed, 13 insertions(+), 33 deletions(-) diff -puN drivers/md/dm-crypt.c~mm-use-__gfp_nomemalloc drivers/md/dm-crypt.c --- 25/drivers/md/dm-crypt.c~mm-use-__gfp_nomemalloc 2005-04-26 05:30:35.000000000 -0700 +++ 25-akpm/drivers/md/dm-crypt.c 2005-04-26 05:35:47.000000000 -0700 @@ -331,25 +331,19 @@ crypt_alloc_buffer(struct crypt_config * struct bio *bio; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; - unsigned long flags = current->flags; unsigned int i; /* - * Tell VM to act less aggressively and fail earlier. - * This is not necessary but increases throughput. + * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and + * to fail earlier. This is not necessary but increases throughput. * FIXME: Is this really intelligent? */ - current->flags &= ~PF_MEMALLOC; - if (base_bio) - bio = bio_clone(base_bio, GFP_NOIO); + bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); else - bio = bio_alloc(GFP_NOIO, nr_iovecs); - if (!bio) { - if (flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; + bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); + if (!bio) return NULL; - } /* if the last bio was not complete, continue where that one ended */ bio->bi_idx = *bio_vec_idx; @@ -386,9 +380,6 @@ crypt_alloc_buffer(struct crypt_config * size -= bv->bv_len; } - if (flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; - if (!bio->bi_size) { bio_put(bio); return NULL; diff -puN mm/swap_state.c~mm-use-__gfp_nomemalloc mm/swap_state.c --- 25/mm/swap_state.c~mm-use-__gfp_nomemalloc 2005-04-26 05:30:35.000000000 -0700 +++ 25-akpm/mm/swap_state.c 2005-04-26 05:32:22.000000000 -0700 @@ -147,7 +147,6 @@ void __delete_from_swap_cache(struct pag int add_to_swap(struct page *page, void *cookie, pgoff_t index) { swp_entry_t entry; - int pf_flags; int err; if (!PageLocked(page)) @@ -158,29 +157,19 @@ int add_to_swap(struct page *page, void if (!entry.val) return 0; - /* Radix-tree node allocations are performing - * GFP_ATOMIC allocations under PF_MEMALLOC. - * They can completely exhaust the page allocator. - * - * So PF_MEMALLOC is dropped here. This causes the slab - * allocations to fail earlier, so radix-tree nodes will - * then be allocated from the mempool reserves. + /* + * Radix-tree node allocations from PF_MEMALLOC contexts could + * completely exhaust the page allocator. __GFP_NOMEMALLOC + * stops emergency reserves from being allocated. * - * We're still using __GFP_HIGH for radix-tree node - * allocations, so some of the emergency pools are available, - * just not all of them. + * TODO: this could cause a theoretical memory reclaim + * deadlock in the swap out path. */ - - pf_flags = current->flags; - current->flags &= ~PF_MEMALLOC; - /* * Add it to the swap cache and mark it dirty */ - err = __add_to_swap_cache(page, entry, GFP_ATOMIC|__GFP_NOWARN); - - if (pf_flags & PF_MEMALLOC) - current->flags |= PF_MEMALLOC; + err = __add_to_swap_cache(page, entry, + GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN); switch (err) { case 0: /* Success */ _