aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-14 05:42:45 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-14 05:42:45 -0700
commitc78a6f265684bbe76a03d45666b8b6dee4952045 (patch)
treef058d4b233608fef8dc8b94e65c560da7596d960 /mm
parent70d1f017811daab3cdf75d69fa1e37b1a08f4bb8 (diff)
downloadhistory-c78a6f265684bbe76a03d45666b8b6dee4952045.tar.gz
[PATCH] rename rmap_lock to page_map_lock
Sync this up with Andrea's patches.
Diffstat (limited to 'mm')
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/vmscan.c20
2 files changed, 14 insertions, 14 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 5577805ae992b6..6564f6dfe9a8ca 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -186,7 +186,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
if (PageReserved(page))
return pte_chain;
- rmap_lock(page);
+ page_map_lock(page);
if (page->pte.direct == 0) {
page->pte.direct = pte_paddr;
@@ -223,7 +223,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
cur_pte_chain->ptes[pte_chain_idx(cur_pte_chain) - 1] = pte_paddr;
cur_pte_chain->next_and_idx--;
out:
- rmap_unlock(page);
+ page_map_unlock(page);
return pte_chain;
}
@@ -245,7 +245,7 @@ void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
return;
- rmap_lock(page);
+ page_map_lock(page);
if (!page_mapped(page))
goto out_unlock; /* remap_page_range() from a driver? */
@@ -294,7 +294,7 @@ out:
dec_page_state(nr_mapped);
}
out_unlock:
- rmap_unlock(page);
+ page_map_unlock(page);
}
/**
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c8bb8861a5f802..f5d9c8c887d9ac 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -276,11 +276,11 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (PageWriteback(page))
goto keep_locked;
- rmap_lock(page);
+ page_map_lock(page);
referenced = page_referenced(page);
if (referenced && page_mapping_inuse(page)) {
/* In active use or really unfreeable. Activate it. */
- rmap_unlock(page);
+ page_map_unlock(page);
goto activate_locked;
}
@@ -295,10 +295,10 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
* XXX: implement swap clustering ?
*/
if (PageAnon(page) && !PageSwapCache(page)) {
- rmap_unlock(page);
+ page_map_unlock(page);
if (!add_to_swap(page))
goto activate_locked;
- rmap_lock(page);
+ page_map_lock(page);
}
if (PageSwapCache(page)) {
mapping = &swapper_space;
@@ -313,16 +313,16 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (page_mapped(page) && mapping) {
switch (try_to_unmap(page)) {
case SWAP_FAIL:
- rmap_unlock(page);
+ page_map_unlock(page);
goto activate_locked;
case SWAP_AGAIN:
- rmap_unlock(page);
+ page_map_unlock(page);
goto keep_locked;
case SWAP_SUCCESS:
; /* try to free the page below */
}
}
- rmap_unlock(page);
+ page_map_unlock(page);
/*
* If the page is dirty, only perform writeback if that write
@@ -663,13 +663,13 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
list_add(&page->lru, &l_active);
continue;
}
- rmap_lock(page);
+ page_map_lock(page);
if (page_referenced(page)) {
- rmap_unlock(page);
+ page_map_unlock(page);
list_add(&page->lru, &l_active);
continue;
}
- rmap_unlock(page);
+ page_map_unlock(page);
}
/*
* FIXME: need to consider page_count(page) here if/when we