aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-01-07 21:53:24 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:53:24 -0800
commit2b5726db9f5d0583eaad993c5a0b99355bf05a20 (patch)
tree718af726aa66b66506d3801c8ad1ec436a029a83 /mm
parenta0332406058f2ffc44c15c60d5650708914465a2 (diff)
downloadhistory-2b5726db9f5d0583eaad993c5a0b99355bf05a20.tar.gz
[PATCH] sched: fix scheduling latencies for !PREEMPT kernels
This patch adds a handful of cond_resched() points to a number of key, scheduling-latency related non-inlined functions. This reduces preemption latency for !PREEMPT kernels. These are scheduling points complementary to PREEMPT_VOLUNTARY scheduling points (might_sleep() places) - i.e. these are all points where an explicit cond_resched() had to be added. Has been tested as part of the -VP patchset. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c1
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c3
3 files changed, 5 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 24079123809fac..6224cb1933c145 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1742,6 +1742,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry:
+ cond_resched();
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
/* no page was available -- either SIGBUS or OOM */
diff --git a/mm/slab.c b/mm/slab.c
index 12bf28e2b9cfc3..478a6cdbf2c757 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2836,7 +2836,7 @@ static void cache_reap(void *unused)
next_unlock:
spin_unlock_irq(&searchp->spinlock);
next:
- ;
+ cond_resched();
}
check_irq_on();
up(&cache_chain_sem);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 402ca278f1efb4..69217c5bb02de4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -361,6 +361,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
int may_enter_fs;
int referenced;
+ cond_resched();
+
page = lru_to_page(page_list);
list_del(&page->lru);
@@ -710,6 +712,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
reclaim_mapped = 1;
while (!list_empty(&l_hold)) {
+ cond_resched();
page = lru_to_page(&l_hold);
list_del(&page->lru);
if (page_mapped(page)) {