diff -urN 2.4.15pre7/fs/buffer.c schedule/fs/buffer.c --- 2.4.15pre7/fs/buffer.c Tue Nov 20 06:07:12 2001 +++ schedule/fs/buffer.c Tue Nov 20 16:47:22 2001 @@ -178,6 +178,7 @@ struct buffer_head * bh = *array++; bh->b_end_io = end_buffer_io_sync; submit_bh(WRITE, bh); + conditional_schedule(); } while (--count); } @@ -214,6 +215,8 @@ continue; spin_unlock(&lru_list_lock); + conditional_schedule(); + write_locked_buffers(array, count); return -EAGAIN; } @@ -221,6 +224,7 @@ __refile_buffer(bh); } spin_unlock(&lru_list_lock); + conditional_schedule(); if (count) write_locked_buffers(array, count); @@ -232,9 +236,9 @@ */ static void write_unlocked_buffers(kdev_t dev) { - do { + do spin_lock(&lru_list_lock); - } while (write_some_buffers(dev)); + while (write_some_buffers(dev)); run_task_queue(&tq_disk); } @@ -267,9 +271,11 @@ spin_unlock(&lru_list_lock); wait_on_buffer (bh); put_bh(bh); + conditional_schedule(); return -EAGAIN; } spin_unlock(&lru_list_lock); + conditional_schedule(); return 0; } @@ -281,9 +287,9 @@ static int wait_for_locked_buffers(kdev_t dev, int index, int refile) { - do { + do spin_lock(&lru_list_lock); - } while (wait_for_buffers(dev, index, refile)); + while (wait_for_buffers(dev, index, refile)); return 0; } diff -urN 2.4.15pre7/fs/proc/array.c schedule/fs/proc/array.c --- 2.4.15pre7/fs/proc/array.c Wed Oct 24 08:04:23 2001 +++ schedule/fs/proc/array.c Tue Nov 20 16:46:23 2001 @@ -415,6 +415,8 @@ pte_t page = *pte; struct page *ptpage; + conditional_schedule(); + address += PAGE_SIZE; pte++; if (pte_none(page)) diff -urN 2.4.15pre7/fs/proc/generic.c schedule/fs/proc/generic.c --- 2.4.15pre7/fs/proc/generic.c Sun Sep 23 21:11:40 2001 +++ schedule/fs/proc/generic.c Tue Nov 20 16:46:23 2001 @@ -98,7 +98,9 @@ retval = n; break; } - + + conditional_schedule(); + /* This is a hack to allow mangling of file pos independent * of actual bytes read. Simply place the data at page, * return the bytes, and set `start' to the desired offset diff -urN 2.4.15pre7/include/linux/condsched.h schedule/include/linux/condsched.h --- 2.4.15pre7/include/linux/condsched.h Thu Jan 1 01:00:00 1970 +++ schedule/include/linux/condsched.h Tue Nov 20 16:46:23 2001 @@ -0,0 +1,14 @@ +#ifndef _LINUX_CONDSCHED_H +#define _LINUX_CONDSCHED_H + +#ifndef __ASSEMBLY__ +#define conditional_schedule() \ +do { \ + if (unlikely(current->need_resched)) { \ + __set_current_state(TASK_RUNNING); \ + schedule(); \ + } \ +} while(0) +#endif + +#endif diff -urN 2.4.15pre7/include/linux/sched.h schedule/include/linux/sched.h --- 2.4.15pre7/include/linux/sched.h Tue Nov 20 06:07:30 2001 +++ schedule/include/linux/sched.h Tue Nov 20 16:46:23 2001 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff -urN 2.4.15pre7/mm/filemap.c schedule/mm/filemap.c --- 2.4.15pre7/mm/filemap.c Tue Nov 20 06:07:33 2001 +++ schedule/mm/filemap.c Tue Nov 20 16:46:23 2001 @@ -714,6 +714,8 @@ struct page **hash = page_hash(mapping, offset); struct page *page; + conditional_schedule(); + spin_lock(&pagecache_lock); page = __find_page_nolock(mapping, offset, *hash); spin_unlock(&pagecache_lock); @@ -1382,6 +1384,9 @@ offset &= ~PAGE_CACHE_MASK; page_cache_release(page); + + conditional_schedule(); + if (ret == nr && desc->count) continue; break; @@ -2905,6 +2910,8 @@ SetPageReferenced(page); UnlockPage(page); page_cache_release(page); + + conditional_schedule(); if (status < 0) break;