--- x/mm/memory.c.~1~ 2004-04-04 06:32:28.347398504 +0200 +++ x/mm/memory.c 2004-04-04 06:44:45.809287256 +0200 @@ -469,21 +469,14 @@ void unmap_page_range(struct mmu_gather tlb_end_vma(tlb, vma); } +#ifdef CONFIG_SMP /* Dispose of an entire struct mmu_gather per rescheduling point */ -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) #define ZAP_BLOCK_SIZE (FREE_PTE_NR * PAGE_SIZE) -#endif - +#else /* For UP, 256 pages at a time gives nice low latency */ -#if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) #define ZAP_BLOCK_SIZE (256 * PAGE_SIZE) #endif -/* No preempt: go for the best straight-line efficiency */ -#if !defined(CONFIG_PREEMPT) -#define ZAP_BLOCK_SIZE (~(0UL)) -#endif - /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlbp: address of the caller's struct mmu_gather