aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-01-07 21:58:19 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:58:19 -0800
commit84c496cfadc2c9471340a9ccdf7f82be6f627a5b (patch)
tree9650eea738d0448c9025ff4ed814de7a0222ade9 /mm
parentb37e39b03bcd49397cac7b937d9b180157705e08 (diff)
downloadhistory-84c496cfadc2c9471340a9ccdf7f82be6f627a5b.tar.gz
[PATCH] vmtrunc: restore unmap_vmas zap_bytes
The low-latency unmap_vmas patch silently moved the zap_bytes test after the TLB finish and lockbreak and regather: why? That not only makes zap_bytes redundant (might as well use ZAP_BLOCK_SIZE), it makes the unmap_vmas level redundant too - it's all about saving TLB flushes when unmapping a series of small vmas. Move zap_bytes test back before the lockbreak, and delete the curious comment that a small zap block size doesn't matter: it's true need_flush prevents TLB flush when no page has been unmapped, but unmapping pages in small blocks involves many more TLB flushes than in large blocks. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 5c6a4db990acfa..cb073d049868c5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -613,10 +613,6 @@ static void unmap_page_range(struct mmu_gather *tlb,
}
#ifdef CONFIG_PREEMPT
-/*
- * It's not an issue to have a small zap block size - TLB flushes
- * only happen once normally, due to the tlb->need_flush optimization.
- */
# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
#else
/* No preempt: go for improved straight-line efficiency */
@@ -695,6 +691,9 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
start += block;
zap_bytes -= block;
+ if ((long)zap_bytes > 0)
+ continue;
+
if (!atomic) {
int fullmm = tlb_is_full_mm(*tlbp);
tlb_finish_mmu(*tlbp, tlb_start, start);
@@ -702,8 +701,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
*tlbp = tlb_gather_mmu(mm, fullmm);
tlb_start_valid = 0;
}
- if ((long)zap_bytes > 0)
- continue;
zap_bytes = ZAP_BLOCK_SIZE;
}
}