diff -urNp setaff-ref/arch/i386/kernel/entry.S setaff/arch/i386/kernel/entry.S --- setaff-ref/arch/i386/kernel/entry.S Fri Aug 9 14:52:06 2002 +++ setaff/arch/i386/kernel/entry.S Sun Sep 1 17:21:01 2002 @@ -637,8 +637,8 @@ ENTRY(sys_call_table) .long SYMBOL_NAME(sys_tkill) .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sendfile64 */ .long SYMBOL_NAME(sys_ni_syscall) /* 240 reserved for futex */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_setaffinity */ - .long SYMBOL_NAME(sys_ni_syscall) /* reserved for sched_getaffinity */ + .long SYMBOL_NAME(sys_sched_setaffinity) + .long SYMBOL_NAME(sys_sched_getaffinity) .rept NR_syscalls-(.-sys_call_table)/4 .long SYMBOL_NAME(sys_ni_syscall) diff -urNp setaff-ref/arch/ppc/kernel/misc.S setaff/arch/ppc/kernel/misc.S --- setaff-ref/arch/ppc/kernel/misc.S Thu Aug 29 02:13:05 2002 +++ setaff/arch/ppc/kernel/misc.S Sun Sep 1 17:21:01 2002 @@ -1159,6 +1159,22 @@ _GLOBAL(sys_call_table) .long sys_mincore .long sys_gettid .long sys_tkill + .long sys_ni_syscall + .long sys_ni_syscall /* 210 */ + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall /* 215 */ + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_ni_syscall /* 220 */ + .long sys_ni_syscall + .long sys_sched_setaffinity + .long sys_sched_getaffinity + .rept NR_syscalls-(.-sys_call_table)/4 .long sys_ni_syscall .endr diff -urNp setaff-ref/include/asm-ppc/unistd.h setaff/include/asm-ppc/unistd.h --- setaff-ref/include/asm-ppc/unistd.h Fri Aug 9 14:52:28 2002 +++ setaff/include/asm-ppc/unistd.h Sun Sep 1 17:21:01 2002 @@ -216,6 +216,8 @@ #define __NR_mincore 206 #define __NR_gettid 207 #define __NR_tkill 208 +#define __NR_sched_setaffinity 222 +#define __NR_sched_getaffinity 223 #define __NR(n) #n diff -urNp setaff-ref/include/linux/capability.h setaff/include/linux/capability.h --- setaff-ref/include/linux/capability.h Tue Jul 16 23:56:42 2002 +++ setaff/include/linux/capability.h Sun Sep 1 17:21:01 2002 @@ -243,6 +243,7 @@ typedef __u32 kernel_cap_t; /* Allow use of FIFO and round-robin (realtime) scheduling on own processes and setting the scheduling algorithm used by another process. */ +/* Allow setting cpu affinity on other processes */ #define CAP_SYS_NICE 23 diff -urNp setaff-ref/kernel/sched.c setaff/kernel/sched.c --- setaff-ref/kernel/sched.c Thu Aug 29 02:13:21 2002 +++ setaff/kernel/sched.c Sun Sep 1 17:21:22 2002 @@ -1026,6 +1026,97 @@ out_unlock: return retval; } +/** + * sys_sched_setaffinity - set the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to the new cpu mask + */ +asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned long new_mask; + task_t *p; + int retval; + + if (len < sizeof(new_mask)) + return -EINVAL; + + if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) + return -EFAULT; + + new_mask &= cpu_online_map; + if (!new_mask) + return -EINVAL; + + /* + * We cannot hold a lock across a call to set_cpus_allowed, however + * we need to assure our task does not slip out from under us. Since + * we are only concerned that its task_struct remains, we can pin it + * here and decrement the usage count when we are done. + */ + read_lock(&tasklist_lock); + + p = find_process_by_pid(pid); + if (!p) { + read_unlock(&tasklist_lock); + return -ESRCH; + } + + get_task_struct(p); + read_unlock(&tasklist_lock); + + retval = -EPERM; + if ((current->euid != p->euid) && (current->euid != p->uid) && + !capable(CAP_SYS_NICE)) + goto out_unlock; + + retval = 0; + set_cpus_allowed(p, new_mask); + +out_unlock: + free_task_struct(p); + return retval; +} + +/** + * sys_sched_getaffinity - get the cpu affinity of a process + * @pid: pid of the process + * @len: length in bytes of the bitmask pointed to by user_mask_ptr + * @user_mask_ptr: user-space pointer to hold the current cpu mask + */ +asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len, + unsigned long *user_mask_ptr) +{ + unsigned long mask; + unsigned int real_len; + task_t *p; + int retval; + + real_len = sizeof(mask); + + if (len < real_len) + return -EINVAL; + + read_lock(&tasklist_lock); + + retval = -ESRCH; + p = find_process_by_pid(pid); + if (!p) + goto out_unlock; + + retval = 0; + mask = p->cpus_allowed & cpu_online_map; + +out_unlock: + read_unlock(&tasklist_lock); + if (retval) + return retval; + if (copy_to_user(user_mask_ptr, &mask, real_len)) + return -EFAULT; + return real_len; +} + asmlinkage long sys_sched_yield(void) { /*