aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:09:16 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:09:16 -0700
commit4877b14f909ede73c3cdd593e72e90cf03dcf93c (patch)
treef9cd0d955305fb9daf2424d7d7afaa8d4d326320 /mm
parent114c71ee019870e1822a4d1a0cfdda1f8a28f51a (diff)
downloadhistory-4877b14f909ede73c3cdd593e72e90cf03dcf93c.tar.gz
[PATCH] rmap 34 vm_flags page_table_lock
From: Hugh Dickins <hugh@veritas.com> First of a batch of seven rmap patches, based on 2.6.6-mm3. Probably the final batch: remaining issues outstanding can have isolated patches. The first half of the batch is good for anonmm or anon_vma, the second half of the batch replaces my anonmm rmap by Andrea's anon_vma rmap. Judge for yourselves which you prefer. I do think I was wrong to call anon_vma more complex than anonmm (its lists are easier to understand than my refcounting), and I'm happy with its vma merging after the last patch. It just comes down to whether we can spare the extra 24 bytes (maximum, on 32-bit) per vma for its advantages in swapout and mremap. rmap 34 vm_flags page_table_lock Why do we guard vm_flags mods with page_table_lock when it's already down_write guarded by mmap_sem? There's probably a historical reason, but no sign of any need for it now. Andrea added a comment and removed the instance from mprotect.c, Hugh plagiarized his comment and removed the instances from madvise.c and mlock.c. Huge leap in scalability... not expected; but this should stop people asking why those spinlocks.
Diffstat (limited to 'mm')
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/mlock.c9
-rw-r--r--mm/mprotect.c6
3 files changed, 13 insertions, 7 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index e54f3906d8ccaf..0439c560e0b4ce 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -31,7 +31,9 @@ static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
return -EAGAIN;
}
- spin_lock(&mm->page_table_lock);
+ /*
+ * vm_flags is protected by the mmap_sem held in write mode.
+ */
VM_ClearReadHint(vma);
switch (behavior) {
@@ -44,7 +46,6 @@ static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
default:
break;
}
- spin_unlock(&mm->page_table_lock);
return 0;
}
diff --git a/mm/mlock.c b/mm/mlock.c
index 0cf446b5f38323..a9e37161dcef6f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -32,10 +32,13 @@ static int mlock_fixup(struct vm_area_struct * vma,
goto out;
}
}
-
- spin_lock(&mm->page_table_lock);
+
+ /*
+ * vm_flags is protected by the mmap_sem held in write mode.
+ * It's okay if try_to_unmap_one unmaps a page just after we
+ * set VM_LOCKED, make_pages_present below will bring it back.
+ */
vma->vm_flags = newflags;
- spin_unlock(&mm->page_table_lock);
/*
* Keep track of amount of locked VM.
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7934dcec6e27f6..b2c74166d988ef 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -213,10 +213,12 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
goto fail;
}
- spin_lock(&mm->page_table_lock);
+ /*
+ * vm_flags and vm_page_prot are protected by the mmap_sem
+ * held in write mode.
+ */
vma->vm_flags = newflags;
vma->vm_page_prot = newprot;
- spin_unlock(&mm->page_table_lock);
success:
change_protection(vma, start, end, newprot);
return 0;