diff -urN 2.4.19pre5/drivers/block/ll_rw_blk.c lowlat/drivers/block/ll_rw_blk.c --- 2.4.19pre5/drivers/block/ll_rw_blk.c Sat Mar 30 00:11:54 2002 +++ lowlat/drivers/block/ll_rw_blk.c Sat Mar 30 20:58:33 2002 @@ -1000,6 +1000,7 @@ kstat.pgpgin += count; break; } + conditional_schedule(); } /** diff -urN 2.4.19pre5/fs/buffer.c lowlat/fs/buffer.c --- 2.4.19pre5/fs/buffer.c Sat Mar 30 00:12:03 2002 +++ lowlat/fs/buffer.c Sat Mar 30 20:58:33 2002 @@ -219,6 +219,8 @@ continue; spin_unlock(&lru_list_lock); + conditional_schedule(); + write_locked_buffers(array, count); return -EAGAIN; } @@ -226,6 +228,7 @@ __refile_buffer(bh); } spin_unlock(&lru_list_lock); + conditional_schedule(); if (count) write_locked_buffers(array, count); @@ -271,17 +274,19 @@ spin_unlock(&lru_list_lock); wait_on_buffer (bh); put_bh(bh); + conditional_schedule(); return -EAGAIN; } spin_unlock(&lru_list_lock); + conditional_schedule(); return 0; } static int wait_for_locked_buffers(kdev_t dev, int index, int refile) { - do { + do spin_lock(&lru_list_lock); - } while (wait_for_buffers(dev, index, refile)); + while (wait_for_buffers(dev, index, refile)); return 0; } @@ -1027,6 +1032,7 @@ for (;;) { struct buffer_head * bh; + conditional_schedule(); bh = get_hash_table(dev, block, size); if (bh) return bh; diff -urN 2.4.19pre5/fs/dcache.c lowlat/fs/dcache.c --- 2.4.19pre5/fs/dcache.c Mon Feb 25 22:05:08 2002 +++ lowlat/fs/dcache.c Sat Mar 30 20:58:33 2002 @@ -71,7 +71,7 @@ * d_iput() operation if defined. * Called with dcache_lock held, drops it. */ -static inline void dentry_iput(struct dentry * dentry) +static void dentry_iput(struct dentry * dentry) { struct inode *inode = dentry->d_inode; if (inode) { @@ -84,6 +84,7 @@ iput(inode); } else spin_unlock(&dcache_lock); + conditional_schedule(); } /* diff -urN 2.4.19pre5/fs/jbd/commit.c lowlat/fs/jbd/commit.c --- 2.4.19pre5/fs/jbd/commit.c Mon Feb 25 22:05:08 2002 +++ lowlat/fs/jbd/commit.c Sat Mar 30 20:58:33 2002 @@ -212,6 +212,16 @@ __journal_remove_journal_head(bh); refile_buffer(bh); __brelse(bh); + if (current->need_resched) { + if (commit_transaction->t_sync_datalist) + commit_transaction->t_sync_datalist = + next_jh; + if (bufs) + break; + spin_unlock(&journal_datalist_lock); + conditional_schedule(); + goto write_out_data; + } } } if (bufs == ARRAY_SIZE(wbuf)) { diff -urN 2.4.19pre5/fs/proc/array.c lowlat/fs/proc/array.c --- 2.4.19pre5/fs/proc/array.c Tue Jan 22 18:54:59 2002 +++ lowlat/fs/proc/array.c Sat Mar 30 20:58:33 2002 @@ -415,6 +415,8 @@ pte_t page = *pte; struct page *ptpage; + conditional_schedule(); + address += PAGE_SIZE; pte++; if (pte_none(page)) diff -urN 2.4.19pre5/fs/proc/generic.c lowlat/fs/proc/generic.c --- 2.4.19pre5/fs/proc/generic.c Tue Jan 22 18:53:53 2002 +++ lowlat/fs/proc/generic.c Sat Mar 30 20:58:33 2002 @@ -98,7 +98,9 @@ retval = n; break; } - + + conditional_schedule(); + /* This is a hack to allow mangling of file pos independent * of actual bytes read. Simply place the data at page, * return the bytes, and set `start' to the desired offset diff -urN 2.4.19pre5/include/linux/condsched.h lowlat/include/linux/condsched.h --- 2.4.19pre5/include/linux/condsched.h Thu Jan 1 01:00:00 1970 +++ lowlat/include/linux/condsched.h Sat Mar 30 20:58:33 2002 @@ -0,0 +1,14 @@ +#ifndef _LINUX_CONDSCHED_H +#define _LINUX_CONDSCHED_H + +#ifndef __ASSEMBLY__ +#define conditional_schedule() \ +do { \ + if (unlikely(current->need_resched)) { \ + __set_current_state(TASK_RUNNING); \ + schedule(); \ + } \ +} while(0) +#endif + +#endif diff -urN 2.4.19pre5/include/linux/sched.h lowlat/include/linux/sched.h --- 2.4.19pre5/include/linux/sched.h Sat Mar 30 00:12:05 2002 +++ lowlat/include/linux/sched.h Sat Mar 30 20:58:33 2002 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff -urN 2.4.19pre5/mm/filemap.c lowlat/mm/filemap.c --- 2.4.19pre5/mm/filemap.c Sat Mar 30 00:12:05 2002 +++ lowlat/mm/filemap.c Sat Mar 30 20:58:33 2002 @@ -295,11 +295,7 @@ page_cache_release(page); - if (current->need_resched) { - __set_current_state(TASK_RUNNING); - schedule(); - } - + conditional_schedule(); spin_lock(&pagecache_lock); goto restart; } @@ -405,11 +401,8 @@ } page_cache_release(page); - if (current->need_resched) { - __set_current_state(TASK_RUNNING); - schedule(); - } + conditional_schedule(); spin_lock(&pagecache_lock); goto restart; } @@ -699,6 +692,8 @@ struct page **hash = page_hash(mapping, offset); struct page *page; + conditional_schedule(); + spin_lock(&pagecache_lock); page = __find_page_nolock(mapping, offset, *hash); spin_unlock(&pagecache_lock); @@ -1432,6 +1427,9 @@ offset &= ~PAGE_CACHE_MASK; page_cache_release(page); + + conditional_schedule(); + if (ret == nr && desc->count) continue; break; @@ -3092,6 +3090,8 @@ SetPageReferenced(page); UnlockPage(page); page_cache_release(page); + + conditional_schedule(); if (status < 0) break;