aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-01-07 21:51:41 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:51:41 -0800
commit94017c753ff3d3ffaf5e246928621787497e0fa9 (patch)
tree480bedfd852ce9c6dae4a696b2fa6551f97dd3d8 /mm
parent4ca0fab5343bf76841d2eb715ff0ce46f0edda02 (diff)
downloadhistory-94017c753ff3d3ffaf5e246928621787497e0fa9.tar.gz
[PATCH] sched: mm: fix scheduling latencies in unmap_vmas()
The attached patch fixes long latencies in unmap_vmas(). We had lockbreak code in that function already but it did not take delayed effects of TLB-gather into account. Has been tested as part of the -VP patchset. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ad9407594ba552..9c168c91f281fc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -612,19 +612,15 @@ static void unmap_page_range(struct mmu_gather *tlb,
tlb_end_vma(tlb, vma);
}
-/* Dispose of an entire struct mmu_gather per rescheduling point */
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE)
-#endif
-
-/* For UP, 256 pages at a time gives nice low latency */
-#if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE (256 * PAGE_SIZE)
-#endif
-
+#ifdef CONFIG_PREEMPT
+/*
+ * It's not an issue to have a small zap block size - TLB flushes
+ * only happen once normally, due to the tlb->need_flush optimization.
+ */
+# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
+#else
/* No preempt: go for improved straight-line efficiency */
-#if !defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
+# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
#endif
/**
@@ -699,15 +695,15 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
start += block;
zap_bytes -= block;
- if ((long)zap_bytes > 0)
- continue;
- if (!atomic && need_resched()) {
+ if (!atomic) {
int fullmm = tlb_is_full_mm(*tlbp);
tlb_finish_mmu(*tlbp, tlb_start, start);
cond_resched_lock(&mm->page_table_lock);
*tlbp = tlb_gather_mmu(mm, fullmm);
tlb_start_valid = 0;
}
+ if ((long)zap_bytes > 0)
+ continue;
zap_bytes = ZAP_BLOCK_SIZE;
}
}