diff -ur linux.orig/arch/i386/kernel/sys_i386.c linux/arch/i386/kernel/sys_i386.c --- linux.orig/arch/i386/kernel/sys_i386.c Mon Mar 19 15:35:09 2001 +++ linux/arch/i386/kernel/sys_i386.c Mon Apr 30 15:00:47 2001 @@ -254,3 +254,4 @@ return -ERESTARTNOHAND; } + diff -ur linux.orig/fs/buffer.c linux/fs/buffer.c --- linux.orig/fs/buffer.c Thu Apr 26 16:01:45 2001 +++ linux/fs/buffer.c Thu Apr 26 18:59:47 2001 @@ -1292,6 +1292,9 @@ */ static struct buffer_head * create_buffers(struct page * page, unsigned long size, int async) { + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + DECLARE_WAITQUEUE(wait2, tsk); struct buffer_head *bh, *head; long offset; @@ -1357,7 +1360,22 @@ * Set our state for sleeping, then check again for buffer heads. * This ensures we won't miss a wake_up from an interrupt. */ - wait_event(buffer_wait, nr_unused_buffer_heads >= MAX_BUF_PER_PAGE); + //wait_event(buffer_wait, nr_unused_buffer_heads >= MAX_BUF_PER_PAGE); + add_wait_queue(&buffer_wait, &wait); + add_wait_queue(&memory_wait, &wait2); + + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + /* This isn't perfect, but it should wake us up once there is some + * memory freed that may allow us to make progress. + */ + if (nr_unused_buffer_heads < MAX_BUF_PER_PAGE) + schedule(); + + set_task_state(tsk, TASK_RUNNING); + + remove_wait_queue(&memory_wait, &wait2); + remove_wait_queue(&buffer_wait, &wait); + goto try_again; } diff -ur linux.orig/include/asm-i386/system.h linux/include/asm-i386/system.h --- linux.orig/include/asm-i386/system.h Thu Apr 26 16:35:17 2001 +++ linux/include/asm-i386/system.h Mon Apr 30 15:04:23 2001 @@ -296,15 +296,40 @@ #define set_wmb(var, value) do { var = value; wmb(); } while (0) /* interrupt control.. */ -#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */) -#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory") -#define __cli() __asm__ __volatile__("cli": : :"memory") -#define __sti() __asm__ __volatile__("sti": : :"memory") + +/* cli and sti synchronise the pipelines on x86. + * let's do some tricks instead. + */ +static inline void __cli(void); +static inline void __sti(void); +#define real__cli() __asm__ __volatile__("orb $1,%0" : "=m" (current->cli) : : "memory") +#define real__sti() __asm__ __volatile__( \ + "andb $0x7e,%0\n" \ + "jnz 2f\n" \ + "1:\n" \ + ".section .text.lock,\"ax\"\n" \ + "2:\n" \ + "leal %0,%%eax\n" \ + "call __sti_helper\n" \ + "jmp 1b\n" \ + ".previous" \ + : "=m" (current->cli) \ + : : "eax", "memory") + +//#define real__save_flags(x) do { (x) = current->cli & 1; __cli(); } while (0) +//#define real__restore_flags(x) do { if (x) __cli(); else __sti(); } while (0) + +static inline long real__save_flags(void); +static inline void real__restore_flags(long); + +#define __save_flags(x) do { (x) = real__save_flags(); } while(0) +#define __restore_flags(x) do { real__restore_flags(x); } while(0) + /* used in the idle loop; sti takes one instruction cycle to complete */ #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") /* For spinlocks etc */ -#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") +#define local_irq_save(x) __save_flags(x) #define local_irq_restore(x) __restore_flags(x) #define local_irq_disable() __cli() #define local_irq_enable() __sti() diff -ur linux.orig/include/linux/mm.h linux/include/linux/mm.h --- linux.orig/include/linux/mm.h Thu Apr 26 16:35:17 2001 +++ linux/include/linux/mm.h Mon Apr 30 15:24:25 2001 @@ -397,6 +397,7 @@ #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) +extern wait_queue_head_t memory_wait; extern unsigned long FASTCALL(__get_free_pages(int gfp_mask, unsigned long order)); extern unsigned long FASTCALL(get_zeroed_page(int gfp_mask)); diff -ur linux.orig/include/linux/sched.h linux/include/linux/sched.h --- linux.orig/include/linux/sched.h Thu Apr 26 20:40:36 2001 +++ linux/include/linux/sched.h Mon Apr 30 15:24:25 2001 @@ -3,6 +3,10 @@ #include /* for HZ */ +#ifndef _I386_CURRENT_H +#include +#endif + extern unsigned long event; #include @@ -405,7 +409,34 @@ u32 self_exec_id; /* Protection of (de-)allocation: mm, files, fs, tty */ spinlock_t alloc_lock; + + int cli; }; + +static inline void __cli(void) +{ + real__cli(); +} + +static inline void __sti(void) +{ + real__sti(); +} + + +static inline long real__save_flags(void) +{ + long x = current->cli & 1; __cli(); + return x; +} + +static inline void real__restore_flags(long x) +{ + if (x) + real__cli(); + else + real__sti(); +} /* * Per process flags diff -ur linux.orig/mm/page_alloc.c linux/mm/page_alloc.c --- linux.orig/mm/page_alloc.c Thu Apr 26 17:20:11 2001 +++ linux/mm/page_alloc.c Thu Apr 26 18:14:40 2001 @@ -19,6 +19,7 @@ #include #include +DECLARE_WAIT_QUEUE_HEAD(memory_wait); int nr_swap_pages; int nr_active_pages; int nr_inactive_dirty_pages; @@ -163,6 +164,8 @@ */ if (memory_pressure > NR_CPUS) memory_pressure--; + + wake_up(&memory_wait); } #define MARK_USED(index, order, area) \