diff -urNp affinity-ref/arch/i386/kernel/entry.S affinity/arch/i386/kernel/entry.S --- affinity-ref/arch/i386/kernel/entry.S Wed May 29 20:20:16 2002 +++ affinity/arch/i386/kernel/entry.S Wed May 29 20:22:45 2002 @@ -639,8 +639,8 @@ ENTRY(sys_call_table) .long SYMBOL_NAME(sys_tkill) .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sendfile64 */ .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_setaffinity */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_getaffinity */ + .long SYMBOL_NAME(sys_sched_setaffinity) + .long SYMBOL_NAME(sys_sched_getaffinity) .rept NR_syscalls-(.-sys_call_table)/4 .long SYMBOL_NAME(sys_ni_syscall) diff -urNp affinity-ref/arch/ppc/kernel/misc.S affinity/arch/ppc/kernel/misc.S --- affinity-ref/arch/ppc/kernel/misc.S Wed May 29 02:12:23 2002 +++ affinity/arch/ppc/kernel/misc.S Wed May 29 20:25:35 2002 @@ -1162,6 +1162,22 @@ _GLOBAL(sys_call_table) .long sys_mincore .long sys_gettid .long sys_tkill + .long sys_ni_syscall + .long sys_ni_syscall /* 210 */ + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall /* 215 */ + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall /* 220 */ + .long sys_ni_syscall + .long sys_sched_setaffinity + .long sys_sched_getaffinity + .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall .endr diff -urNp affinity-ref/include/asm-ppc/unistd.h affinity/include/asm-ppc/unistd.h --- affinity-ref/include/asm-ppc/unistd.h Wed May 29 02:12:38 2002 +++ affinity/include/asm-ppc/unistd.h Wed May 29 20:25:35 2002 @@ -216,6 +216,8 @@ #define __NR_mincore 206 #define __NR_gettid 207 #define __NR_tkill 208 +#define __NR_sched_setaffinity 222 +#define __NR_sched_getaffinity 223 #define __NR(n) #n diff -urNp affinity-ref/include/linux/capability.h affinity/include/linux/capability.h --- affinity-ref/include/linux/capability.h Fri May 3 20:23:55 2002 +++ affinity/include/linux/capability.h Wed May 29 20:22:45 2002 @@ -243,6 +243,7 @@ typedef __u32 kernel_cap_t; /* Allow use of FIFO and round-robin (realtime) scheduling on own processes and setting the scheduling algorithm used by another process. */ +/* Allow setting cpu affinity on other processes */ #define CAP_SYS_NICE 23 diff -urNp affinity-ref/kernel/sched.c affinity/kernel/sched.c --- affinity-ref/kernel/sched.c Wed May 29 20:20:16 2002 +++ affinity/kernel/sched.c Wed May 29 20:22:49 2002 @@ -1179,6 +1179,98 @@ out_unlock: return retval; } +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned long new_mask; + task_t *p; + int retval; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + new_mask &= cpu_online_map; + if (!new_mask) + return -EINVAL; + + /* + * We cannot hold a lock across a call to set_cpus_allowed, however + * we need to assure our task does not slip out from under us. Since + * we are only concerned that its task_struct remains, we can pin it + * here and decrement the usage count when we are done. + */ + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) { + read_unlock(&tasklist_lock); + goto out_unlock; + } + + get_task_struct(p); + read_unlock(&tasklist_lock); + + retval = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; + + retval = 0; + set_cpus_allowed(p, new_mask); + +out_unlock: + free_task_struct(p); + return retval; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned long mask; + unsigned int real_len; + task_t *p; + int retval; + + real_len = sizeof(mask); + + if (len < real_len) + return -EINVAL; + + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = 0; + mask = p->cpus_allowed & cpu_online_map; + +out_unlock: + read_unlock(&tasklist_lock); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + asmlinkage long sys_sched_yield(void) { runqueue_t *rq;