diff -urN numa-sched/include/linux/sched.h parent/include/linux/sched.h --- numa-sched/include/linux/sched.h Tue Apr 17 04:25:11 2001 +++ parent/include/linux/sched.h Tue Apr 17 04:26:06 2001 @@ -305,7 +305,8 @@ int nice; unsigned int policy; struct mm_struct *mm; - int has_cpu, processor; + int has_cpu; + int processor; unsigned long cpus_allowed; /* * (only the 'next' pointer fits into the cacheline, but @@ -315,6 +316,7 @@ #ifdef CONFIG_NUMA_SCHED int nid; #endif + int get_child_timeslice; struct task_struct *next_task, *prev_task; struct mm_struct *active_mm; diff -urN numa-sched/kernel/exit.c parent/kernel/exit.c --- numa-sched/kernel/exit.c Thu Feb 22 03:45:13 2001 +++ parent/kernel/exit.c Tue Apr 17 04:25:41 2001 @@ -59,9 +59,11 @@ * timeslices, because any timeslice recovered here * was given away by the parent in the first place.) */ - current->counter += p->counter; - if (current->counter >= MAX_COUNTER) - current->counter = MAX_COUNTER; + if (p->get_child_timeslice) { + current->counter += p->counter; + if (current->counter >= MAX_COUNTER) + current->counter = MAX_COUNTER; + } free_task_struct(p); } else { printk("task releasing itself\n"); @@ -168,6 +170,7 @@ p->exit_signal = SIGCHLD; p->self_exec_id++; p->p_opptr = reaper; + p->get_child_timeslice = 0; if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0); } } diff -urN numa-sched/kernel/fork.c parent/kernel/fork.c --- numa-sched/kernel/fork.c Tue Apr 17 04:25:11 2001 +++ parent/kernel/fork.c Tue Apr 17 04:25:41 2001 @@ -676,6 +676,8 @@ p->counter = current->counter; current->counter = 0; current->need_resched = 1; + /* Tell the parent if it can get back its timeslice when child exits */ + p->get_child_timeslice = 1; /* * Ok, add it to the run-queues and make it diff -urN numa-sched/kernel/sched.c parent/kernel/sched.c --- numa-sched/kernel/sched.c Tue Apr 17 04:25:11 2001 +++ parent/kernel/sched.c Tue Apr 17 04:25:41 2001 @@ -750,6 +750,7 @@ continue; #endif p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice); + p->get_child_timeslice = 0; } read_unlock(&tasklist_lock); spin_lock_irq(&runqueue_lock);