From: Ingo Molnar The attached patch fixes long latencies in unmap_vmas(). We had lockbreak code in that function already but it did not take delayed effects of TLB-gather into account. Has been tested as part of the -VP patchset. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- 25-akpm/mm/memory.c | 26 +++++++++++--------------- 1 files changed, 11 insertions(+), 15 deletions(-) diff -puN mm/memory.c~sched-mm-fix-scheduling-latencies-in-unmap_vmas mm/memory.c --- 25/mm/memory.c~sched-mm-fix-scheduling-latencies-in-unmap_vmas 2004-11-17 20:46:36.378827096 -0800 +++ 25-akpm/mm/memory.c 2004-11-17 20:46:36.383826336 -0800 @@ -612,19 +612,15 @@ static void unmap_page_range(struct mmu_ tlb_end_vma(tlb, vma); } -/* Dispose of an entire struct mmu_gather per rescheduling point */ -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) -#define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE) -#endif - -/* For UP, 256 pages at a time gives nice low latency */ -#if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) -#define ZAP_BLOCK_SIZE (256 * PAGE_SIZE) -#endif - +#ifdef CONFIG_PREEMPT +/* + * It's not an issue to have a small zap block size - TLB flushes + * only happen once normally, due to the tlb->need_flush optimization. + */ +# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) +#else /* No preempt: go for improved straight-line efficiency */ -#if !defined(CONFIG_PREEMPT) -#define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) +# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) #endif /** @@ -699,15 +695,15 @@ int unmap_vmas(struct mmu_gather **tlbp, start += block; zap_bytes -= block; - if ((long)zap_bytes > 0) - continue; - if (!atomic && need_resched()) { + if (!atomic) { int fullmm = tlb_is_full_mm(*tlbp); tlb_finish_mmu(*tlbp, tlb_start, start); cond_resched_lock(&mm->page_table_lock); *tlbp = tlb_gather_mmu(mm, fullmm); tlb_start_valid = 0; } + if ((long)zap_bytes > 0) + continue; zap_bytes = ZAP_BLOCK_SIZE; } } _