diff -urN ref/arch/i386/lib/usercopy.c unlock/arch/i386/lib/usercopy.c --- ref/arch/i386/lib/usercopy.c Fri Sep 15 17:31:10 2000 +++ unlock/arch/i386/lib/usercopy.c Fri Sep 15 17:31:28 2000 @@ -31,6 +31,8 @@ #define __do_strncpy_from_user(dst,src,count,res) \ do { \ int __d0, __d1, __d2; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ " testl %1,%1\n" \ " jz 2f\n" \ @@ -55,6 +57,7 @@ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) long @@ -82,6 +85,8 @@ #define __do_clear_user(addr,size) \ do { \ int __d0; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; stosl\n" \ " movl %2,%0\n" \ @@ -99,6 +104,7 @@ : "=&c"(size), "=&D" (__d0) \ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) unsigned long @@ -126,7 +132,9 @@ { unsigned long mask = -__addr_ok(s); unsigned long res, tmp; + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); + release_kernel_lock_save(lock_depth); __asm__ __volatile__( " andl %0,%%ecx\n" "0: repne; scasb\n" @@ -146,5 +154,6 @@ :"0" (n), "1" (s), "2" (0), "3" (mask) :"cc"); conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); return res & mask; } diff -urN ref/include/asm-i386/smplock.h unlock/include/asm-i386/smplock.h --- ref/include/asm-i386/smplock.h Thu Sep 14 19:28:09 2000 +++ unlock/include/asm-i386/smplock.h Fri Sep 15 17:31:28 2000 @@ -26,6 +26,25 @@ spin_lock(&kernel_flag); \ } while (0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) int x + +#define release_kernel_lock_save(local_depth) \ +do { \ + (local_depth) = current->lock_depth; \ + if ((local_depth) >= 0) { \ + current->lock_depth = -1; \ + spin_unlock(&kernel_flag); \ + } \ +} while (0) + +#define reacquire_kernel_lock_restore(local_depth) \ +do { \ + if ((local_depth) >= 0) { \ + current->lock_depth = local_depth; \ + spin_lock(&kernel_flag); \ + } \ +} while (0) + /* * Getting the big kernel lock. diff -urN ref/include/asm-i386/uaccess.h unlock/include/asm-i386/uaccess.h --- ref/include/asm-i386/uaccess.h Fri Sep 15 17:31:10 2000 +++ unlock/include/asm-i386/uaccess.h Fri Sep 15 17:31:28 2000 @@ -7,6 +7,7 @@ #include #include #include +#include #include #define VERIFY_READ 0 @@ -254,6 +255,8 @@ #define __copy_user(to,from,size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ " movl %3,%0\n" \ @@ -272,11 +275,14 @@ : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ : "memory"); \ conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) #define __copy_user_zeroing(to,from,size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ " movl %3,%0\n" \ @@ -301,6 +307,7 @@ : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ : "memory"); \ conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) /* We let the __ versions of copy_from/to_user inline, because they're often @@ -325,8 +332,10 @@ #define __constant_copy_user(to, from, size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ switch (size & 3) { \ default: \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ "1:\n" \ @@ -342,6 +351,7 @@ : "1"(from), "2"(to), "0"(size/4) \ : "memory"); \ conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ break; \ case 1: \ __asm__ __volatile__( \ @@ -410,8 +420,10 @@ #define __constant_copy_user_zeroing(to, from, size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ switch (size & 3) { \ default: \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ "1:\n" \ @@ -433,6 +445,7 @@ : "1"(from), "2"(to), "0"(size/4) \ : "memory"); \ conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ break; \ case 1: \ __asm__ __volatile__( \ diff -urN ref/include/linux/smp_lock.h unlock/include/linux/smp_lock.h --- ref/include/linux/smp_lock.h Mon Sep 11 19:30:39 2000 +++ unlock/include/linux/smp_lock.h Fri Sep 15 17:32:35 2000 @@ -8,6 +8,10 @@ #define release_kernel_lock(task, cpu) do { } while(0) #define reacquire_kernel_lock(task) do { } while(0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) +#define release_kernel_lock_save(x) do {} while(0) +#define reacquire_kernel_lock_restore(x) do {} while(0) + #else #include