diff options
author | Christoph Lameter <clameter@sgi.com> | 2005-01-04 23:36:59 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-01-04 23:36:59 -0800 |
commit | 76af7e63a1304bc7ecd93c24320315d2e12df197 (patch) | |
tree | 4a9ddfaef44d20d5b0f2818665e4889957311d81 /mm | |
parent | 904e737b7ccca4eb807db430320f0fea7c2a6c9a (diff) | |
download | history-76af7e63a1304bc7ecd93c24320315d2e12df197.tar.gz |
[PATCH] Make page allocator aware of requests for zeroed memory
Thisintroduces __GFP_ZERO as an additional gfp_mask element to allow to
request zeroed pages from the page allocator:
- Modifies the page allocator so that it zeroes memory if __GFP_ZERO is
set
- Replace all page zeroing after allocating pages by prior allocations with
allocations using __GFP_ZERO
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 20 | ||||
-rw-r--r-- | mm/shmem.c | 8 |
3 files changed, 18 insertions, 13 deletions
diff --git a/mm/memory.c b/mm/memory.c index b0c61dac23e6a5..ad9407594ba552 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1673,10 +1673,9 @@ do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(anon_vma_prepare(vma))) goto no_mem; - page = alloc_page_vma(GFP_HIGHUSER, vma, addr); + page = alloc_page_vma(GFP_HIGHZERO, vma, addr); if (!page) goto no_mem; - clear_user_highpage(page, addr); spin_lock(&mm->page_table_lock); page_table = pte_offset_map(pmd, addr); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4fb4da2dd5e28e..fd6f8014689b87 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -558,6 +558,13 @@ void fastcall free_cold_page(struct page *page) * we cheat by calling it from here, in the order > 0 path. Saves a branch * or two. */ +static inline void prep_zero_page(struct page *page, int order) +{ + int i; + + for(i = 0; i < (1 << order); i++) + clear_highpage(page + i); +} static struct page * buffered_rmqueue(struct zone *zone, int order, int gfp_flags) @@ -593,6 +600,10 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags) BUG_ON(bad_range(zone, page)); mod_page_state_zone(zone, pgalloc, 1 << order); prep_new_page(page, order); + + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order); + if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); } @@ -805,12 +816,9 @@ fastcall unsigned long get_zeroed_page(unsigned int gfp_mask) */ BUG_ON(gfp_mask & __GFP_HIGHMEM); - page = alloc_pages(gfp_mask, 0); - if (page) { - void *address = page_address(page); - clear_page(address); - return (unsigned long) address; - } + page = alloc_pages(gfp_mask | __GFP_ZERO, 0); + if (page) + return (unsigned long) page_address(page); return 0; } diff --git a/mm/shmem.c b/mm/shmem.c index e1ff7d74c249a4..2a97375d5eceab 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -369,9 +369,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long } spin_unlock(&info->lock); - page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); + page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); if (page) { - clear_highpage(page); page->nr_swapped = 0; } spin_lock(&info->lock); @@ -910,7 +909,7 @@ shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_pgoff = idx; pvma.vm_end = PAGE_SIZE; - page = alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); mpol_free(pvma.vm_policy); return page; } @@ -926,7 +925,7 @@ static inline struct page * shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info, unsigned long idx) { - return alloc_page(gfp); + return alloc_page(gfp | __GFP_ZERO); } #endif @@ -1135,7 +1134,6 @@ repeat: info->alloced++; spin_unlock(&info->lock); - clear_highpage(filepage); flush_dcache_page(filepage); SetPageUptodate(filepage); } |