From: Nick Piggin Move some fork related scheduler policy from fork.c to sched.c where it belongs. include/linux/sched.h | 1 + kernel/fork.c | 30 +++--------------------------- kernel/sched.c | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 27 deletions(-) diff -puN include/linux/sched.h~np-sched-01-sched-fork-cleanup include/linux/sched.h --- 25/include/linux/sched.h~np-sched-01-sched-fork-cleanup 2003-09-01 02:05:41.000000000 -0700 +++ 25-akpm/include/linux/sched.h 2003-09-01 02:05:41.000000000 -0700 @@ -561,6 +561,7 @@ extern int FASTCALL(wake_up_state(struct extern int FASTCALL(wake_up_process(struct task_struct * tsk)); extern int FASTCALL(wake_up_process_kick(struct task_struct * tsk)); extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); +extern void FASTCALL(sched_fork(task_t * p)); extern void FASTCALL(sched_exit(task_t * p)); asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru); diff -puN kernel/fork.c~np-sched-01-sched-fork-cleanup kernel/fork.c --- 25/kernel/fork.c~np-sched-01-sched-fork-cleanup 2003-09-01 02:05:41.000000000 -0700 +++ 25-akpm/kernel/fork.c 2003-09-01 02:05:41.000000000 -0700 @@ -912,33 +912,9 @@ struct task_struct *copy_process(unsigne p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); p->pdeath_signal = 0; - /* - * Share the timeslice between parent and child, thus the - * total amount of pending timeslices in the system doesn't change, - * resulting in more scheduling fairness. - */ - local_irq_disable(); - p->time_slice = (current->time_slice + 1) >> 1; - /* - * The remainder of the first timeslice might be recovered by - * the parent if the child exits early enough. - */ - p->first_time_slice = 1; - current->time_slice >>= 1; - p->last_run = jiffies; - if (!current->time_slice) { - /* - * This case is rare, it happens when the parent has only - * a single jiffy left from its timeslice. Taking the - * runqueue lock is not a problem. - */ - current->time_slice = 1; - preempt_disable(); - scheduler_tick(0, 0); - local_irq_enable(); - preempt_enable(); - } else - local_irq_enable(); + /* Perform scheduler related accounting */ + sched_fork(p); + /* * Ok, add it to the run-queues and make it * visible to the rest of the system. diff -puN kernel/sched.c~np-sched-01-sched-fork-cleanup kernel/sched.c --- 25/kernel/sched.c~np-sched-01-sched-fork-cleanup 2003-09-01 02:05:41.000000000 -0700 +++ 25-akpm/kernel/sched.c 2003-09-01 02:05:41.000000000 -0700 @@ -533,6 +533,41 @@ int wake_up_state(task_t *p, unsigned in } /* + * Perform scheduler related accounting for a newly forked process @p. + * @p is forked by current. + */ +void sched_fork(task_t *p) +{ + /* + * Share the timeslice between parent and child, thus the + * total amount of pending timeslices in the system doesn't change, + * resulting in more scheduling fairness. + */ + local_irq_disable(); + p->time_slice = (current->time_slice + 1) >> 1; + /* + * The remainder of the first timeslice might be recovered by + * the parent if the child exits early enough. + */ + p->first_time_slice = 1; + current->time_slice >>= 1; + p->last_run = jiffies; + if (!current->time_slice) { + /* + * This case is rare, it happens when the parent has only + * a single jiffy left from its timeslice. Taking the + * runqueue lock is not a problem. + */ + current->time_slice = 1; + preempt_disable(); + scheduler_tick(0, 0); + local_irq_enable(); + preempt_enable(); + } else + local_irq_enable(); +} + +/* * wake_up_forked_process - wake up a freshly forked process. * * This function will do some initial scheduler statistics housekeeping _