diff -urN 2.2.19pre3/arch/i386/lib/usercopy.c copy-user-reschedule-and-unlock/arch/i386/lib/usercopy.c --- 2.2.19pre3/arch/i386/lib/usercopy.c Mon Jan 17 16:44:33 2000 +++ copy-user-reschedule-and-unlock/arch/i386/lib/usercopy.c Mon Jan 1 16:36:33 2001 @@ -31,6 +31,8 @@ #define __do_strncpy_from_user(dst,src,count,res) \ do { \ int __d0, __d1, __d2; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ " testl %1,%1\n" \ " jz 2f\n" \ @@ -54,6 +56,8 @@ "=&D" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ : "memory"); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) long @@ -81,6 +85,8 @@ #define __do_clear_user(addr,size) \ do { \ int __d0; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; stosl\n" \ " movl %2,%0\n" \ @@ -97,6 +103,8 @@ ".previous" \ : "=&c"(size), "=&D" (__d0) \ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) unsigned long @@ -124,7 +132,9 @@ { unsigned long mask = -__addr_ok(s); unsigned long res, tmp; + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); + release_kernel_lock_save(lock_depth); __asm__ __volatile__( " andl %0,%%ecx\n" "0: repne; scasb\n" @@ -143,5 +153,7 @@ :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) :"0" (n), "1" (s), "2" (0), "3" (mask) :"cc"); + conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); return res & mask; } diff -urN 2.2.19pre3/include/asm-alpha/smplock.h copy-user-reschedule-and-unlock/include/asm-alpha/smplock.h --- 2.2.19pre3/include/asm-alpha/smplock.h Mon Dec 11 16:58:03 2000 +++ copy-user-reschedule-and-unlock/include/asm-alpha/smplock.h Mon Jan 1 16:36:43 2001 @@ -28,6 +28,25 @@ spin_lock(&kernel_flag); } +#define DECLARE_LOCAL_LOCK_DEPTH(x) int x + +#define release_kernel_lock_save(local_depth) \ +do { \ + (local_depth) = current->lock_depth; \ + if ((local_depth) >= 0) { \ + current->lock_depth = -1; \ + spin_unlock(&kernel_flag); \ + } \ +} while (0) + +#define reacquire_kernel_lock_restore(local_depth) \ +do { \ + if ((local_depth) >= 0) { \ + current->lock_depth = local_depth; \ + spin_lock(&kernel_flag); \ + } \ +} while (0) + /* * Getting the big kernel lock. * diff -urN 2.2.19pre3/include/asm-alpha/uaccess.h copy-user-reschedule-and-unlock/include/asm-alpha/uaccess.h --- 2.2.19pre3/include/asm-alpha/uaccess.h Thu Aug 24 19:11:39 2000 +++ copy-user-reschedule-and-unlock/include/asm-alpha/uaccess.h Mon Jan 1 16:36:43 2001 @@ -3,6 +3,8 @@ #include #include +#include +#include /* @@ -402,8 +404,17 @@ return len; } -#define __copy_to_user(to,from,n) __copy_tofrom_user_nocheck((to),(from),(n)) -#define __copy_from_user(to,from,n) __copy_tofrom_user_nocheck((to),(from),(n)) +#define __copy_to_user(to,from,n) \ +({ \ + long ret; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ + ret = __copy_tofrom_user_nocheck((to),(from),(n)); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ + ret; \ +}) +#define __copy_from_user(to,from,n) __copy_to_user(to,from,n) extern inline long copy_to_user(void *to, const void *from, long n) @@ -430,7 +441,7 @@ extern void __do_clear_user(void); extern inline long -__clear_user(void *to, long len) +____clear_user(void *to, long len) { /* This little bit of silliness is to get the GP loaded for a function that ordinarily wouldn't. Otherwise we could @@ -448,20 +459,22 @@ return __cl_len; } +#define __clear_user(to,len) \ +({ \ + long ret; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ + ret = ____clear_user((to),(len)); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ + ret; \ +}) + extern inline long clear_user(void *to, long len) { - if (__access_ok((long)to, len, get_fs())) { - register void * pv __asm__("$27") = __do_clear_user; - register void * __cl_to __asm__("$6") = to; - register long __cl_len __asm__("$0") = len; - __asm__ __volatile__( - "jsr $28,(%2),__do_clear_user\n\tldgp $29,0($28)" - : "=r"(__cl_len), "=r"(__cl_to), "=r"(pv) - : "0"(__cl_len), "1"(__cl_to), "2"(pv) - : "$1","$2","$3","$4","$5","$28","memory"); - len = __cl_len; - } + if (__access_ok((long)to, len, get_fs())) + len = __clear_user(to, len); return len; } @@ -474,8 +487,13 @@ strncpy_from_user(char *to, const char *from, long n) { long ret = -EFAULT; - if (__access_ok((long)from, 0, get_fs())) + if (__access_ok((long)from, 0, get_fs())) { + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); + release_kernel_lock_save(lock_depth); ret = __strncpy_from_user(to, from, n); + conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); + } return ret; } @@ -484,7 +502,15 @@ extern inline long strlen_user(const char *str) { - return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; + long ret = 0; + if (access_ok(VERIFY_READ,str,0)) { + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); + release_kernel_lock_save(lock_depth); + ret = __strlen_user(str); + conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); + } + return ret; } /* Returns: 0 if exception before NUL or reaching the supplied limit (N), @@ -493,7 +519,15 @@ extern inline long strnlen_user(const char *str, long n) { - return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; + long ret = 0; + if (access_ok(VERIFY_READ,str,0)) { + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); + release_kernel_lock_save(lock_depth); + ret = __strnlen_user(str, n); + conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); + } + return ret; } /* diff -urN 2.2.19pre3/include/asm-i386/smplock.h copy-user-reschedule-and-unlock/include/asm-i386/smplock.h --- 2.2.19pre3/include/asm-i386/smplock.h Mon Jan 1 16:36:25 2001 +++ copy-user-reschedule-and-unlock/include/asm-i386/smplock.h Mon Jan 1 16:36:33 2001 @@ -26,6 +26,25 @@ spin_lock(&kernel_flag); \ } while (0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) int x + +#define release_kernel_lock_save(local_depth) \ +do { \ + (local_depth) = current->lock_depth; \ + if ((local_depth) >= 0) { \ + current->lock_depth = -1; \ + spin_unlock(&kernel_flag); \ + } \ +} while (0) + +#define reacquire_kernel_lock_restore(local_depth) \ +do { \ + if ((local_depth) >= 0) { \ + current->lock_depth = local_depth; \ + spin_lock(&kernel_flag); \ + } \ +} while (0) + /* * Getting the big kernel lock. diff -urN 2.2.19pre3/include/asm-i386/uaccess.h copy-user-reschedule-and-unlock/include/asm-i386/uaccess.h --- 2.2.19pre3/include/asm-i386/uaccess.h Mon Jan 1 16:36:25 2001 +++ copy-user-reschedule-and-unlock/include/asm-i386/uaccess.h Mon Jan 1 16:36:33 2001 @@ -6,6 +6,8 @@ */ #include #include +#include +#include #include #define VERIFY_READ 0 @@ -253,6 +255,8 @@ #define __copy_user(to,from,size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ " movl %3,%0\n" \ @@ -270,11 +274,15 @@ : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \ : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ : "memory"); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) #define __copy_user_zeroing(to,from,size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ " movl %3,%0\n" \ @@ -298,6 +306,8 @@ : "=&c"(size), "=&D" (__d0), "=&S" (__d1) \ : "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \ : "memory"); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ } while (0) /* We let the __ versions of copy_from/to_user inline, because they're often @@ -322,8 +332,10 @@ #define __constant_copy_user(to, from, size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ switch (size & 3) { \ default: \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ "1:\n" \ @@ -338,6 +350,8 @@ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ : "1"(from), "2"(to), "0"(size/4) \ : "memory"); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ break; \ case 1: \ __asm__ __volatile__( \ @@ -406,8 +420,10 @@ #define __constant_copy_user_zeroing(to, from, size) \ do { \ int __d0, __d1; \ + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); \ switch (size & 3) { \ default: \ + release_kernel_lock_save(lock_depth); \ __asm__ __volatile__( \ "0: rep; movsl\n" \ "1:\n" \ @@ -428,6 +444,8 @@ : "=c"(size), "=&S" (__d0), "=&D" (__d1)\ : "1"(from), "2"(to), "0"(size/4) \ : "memory"); \ + conditional_schedule(); \ + reacquire_kernel_lock_restore(lock_depth); \ break; \ case 1: \ __asm__ __volatile__( \ diff -urN 2.2.19pre3/include/asm-ppc/smplock.h copy-user-reschedule-and-unlock/include/asm-ppc/smplock.h --- 2.2.19pre3/include/asm-ppc/smplock.h Mon Dec 11 16:58:05 2000 +++ copy-user-reschedule-and-unlock/include/asm-ppc/smplock.h Mon Jan 1 16:36:47 2001 @@ -26,6 +26,25 @@ spin_lock(&kernel_flag); \ } while (0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) int x + +#define release_kernel_lock_save(local_depth) \ +do { \ + (local_depth) = current->lock_depth; \ + if ((local_depth) >= 0) { \ + current->lock_depth = -1; \ + spin_unlock(&kernel_flag); \ + } \ +} while (0) + +#define reacquire_kernel_lock_restore(local_depth) \ +do { \ + if ((local_depth) >= 0) { \ + current->lock_depth = local_depth; \ + spin_lock(&kernel_flag); \ + } \ +} while (0) + /* * Getting the big kernel lock. diff -urN 2.2.19pre3/include/asm-sparc/smplock.h copy-user-reschedule-and-unlock/include/asm-sparc/smplock.h --- 2.2.19pre3/include/asm-sparc/smplock.h Mon Dec 11 16:58:05 2000 +++ copy-user-reschedule-and-unlock/include/asm-sparc/smplock.h Mon Jan 1 16:36:47 2001 @@ -26,6 +26,25 @@ spin_lock(&kernel_flag); \ } while (0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) int x + +#define release_kernel_lock_save(local_depth) \ +do { \ + (local_depth) = current->lock_depth; \ + if ((local_depth) >= 0) { \ + current->lock_depth = -1; \ + spin_unlock(&kernel_flag); \ + } \ +} while (0) + +#define reacquire_kernel_lock_restore(local_depth) \ +do { \ + if ((local_depth) >= 0) { \ + current->lock_depth = local_depth; \ + spin_lock(&kernel_flag); \ + } \ +} while (0) + /* * Getting the big kernel lock. diff -urN 2.2.19pre3/include/asm-sparc64/smplock.h copy-user-reschedule-and-unlock/include/asm-sparc64/smplock.h --- 2.2.19pre3/include/asm-sparc64/smplock.h Fri Dec 22 18:01:21 2000 +++ copy-user-reschedule-and-unlock/include/asm-sparc64/smplock.h Mon Jan 1 16:36:47 2001 @@ -27,6 +27,25 @@ spin_lock(&kernel_flag); \ } while (0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) int x + +#define release_kernel_lock_save(local_depth) \ +do { \ + (local_depth) = current->lock_depth; \ + if ((local_depth) >= 0) { \ + current->lock_depth = -1; \ + spin_unlock(&kernel_flag); \ + } \ +} while (0) + +#define reacquire_kernel_lock_restore(local_depth) \ +do { \ + if ((local_depth) >= 0) { \ + current->lock_depth = local_depth; \ + spin_lock(&kernel_flag); \ + } \ +} while (0) + /* * Getting the big kernel lock. diff -urN 2.2.19pre3/include/linux/condsched.h copy-user-reschedule-and-unlock/include/linux/condsched.h --- 2.2.19pre3/include/linux/condsched.h Thu Jan 1 01:00:00 1970 +++ copy-user-reschedule-and-unlock/include/linux/condsched.h Mon Jan 1 16:36:25 2001 @@ -0,0 +1,14 @@ +#ifndef _LINUX_CONDSCHED_H +#define _LINUX_CONDSCHED_H + +#ifndef __ASSEMBLY__ +#define conditional_schedule() \ +do { \ + if (current->need_resched) { \ + current->state = TASK_RUNNING; \ + schedule(); \ + } \ +} while(0) +#endif + +#endif diff -urN 2.2.19pre3/include/linux/smp_lock.h copy-user-reschedule-and-unlock/include/linux/smp_lock.h --- 2.2.19pre3/include/linux/smp_lock.h Mon Jan 1 16:36:25 2001 +++ copy-user-reschedule-and-unlock/include/linux/smp_lock.h Mon Jan 1 16:36:33 2001 @@ -8,6 +8,10 @@ #define release_kernel_lock(task, cpu) do { } while(0) #define reacquire_kernel_lock(task) do { } while(0) +#define DECLARE_LOCAL_LOCK_DEPTH(x) +#define release_kernel_lock_save(x) do {} while(0) +#define reacquire_kernel_lock_restore(x) do {} while(0) + #else #include diff -urN 2.2.19pre3/net/ipv4/tcp.c copy-user-reschedule-and-unlock/net/ipv4/tcp.c --- 2.2.19pre3/net/ipv4/tcp.c Sat Dec 23 06:27:16 2000 +++ copy-user-reschedule-and-unlock/net/ipv4/tcp.c Mon Jan 1 16:36:39 2001 @@ -416,6 +416,8 @@ #include #include #include +#include +#include #include #include @@ -767,6 +769,7 @@ int iovlen, flags; int mss_now; int err, copied; + DECLARE_LOCAL_LOCK_DEPTH(lock_depth); lock_sock(sk); @@ -848,10 +851,13 @@ skb->csum = csum_partial(skb->data, skb->len, 0); } else { + release_kernel_lock_save(lock_depth); skb->csum = csum_and_copy_from_user( from, skb_put(skb, copy), copy, skb->csum, &err); + conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); } /* @@ -966,8 +972,11 @@ * Reserve header space and checksum the data. */ skb_reserve(skb, MAX_HEADER + sk->prot->max_header); + release_kernel_lock_save(lock_depth); skb->csum = csum_and_copy_from_user(from, skb_put(skb, copy), copy, 0, &err); + conditional_schedule(); + reacquire_kernel_lock_restore(lock_depth); if (err) goto do_fault;