diff -urN linux-2.4.20-pre5-ac4-rml/kernel/sched.c linux/kernel/sched.c --- linux-2.4.20-pre5-ac4-rml/kernel/sched.c Wed Sep 11 17:44:00 2002 +++ linux/kernel/sched.c Wed Sep 11 17:43:47 2002 @@ -101,16 +101,23 @@ ((p)->prio <= (p)->static_prio - DELTA(p)) /* - * TASK_TIMESLICE scales user-nice values [ -20 ... 19 ] + * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ] * to time slice values. * - * The higher a process's priority, the bigger timeslices + * The higher a thread's priority, the bigger timeslices * it gets during one round of execution. But even the lowest - * priority process gets MIN_TIMESLICE worth of execution time. + * priority thread gets MIN_TIMESLICE worth of execution time. + * + * task_timeslice() is the interface that is used by the scheduler. */ +#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \ + ((MAX_TIMESLICE - MIN_TIMESLICE) * \ + (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1))) -#define TASK_TIMESLICE(p) (MIN_TIMESLICE + \ - ((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/39)) +static inline unsigned int task_timeslice(task_t *p) +{ + return BASE_TIMESLICE(p); +} /* * These are the runqueue data structures: @@ -135,8 +142,8 @@ */ struct runqueue { spinlock_t lock; - unsigned long nr_running, nr_switches, expired_timestamp; - signed long nr_uninterruptible; + unsigned long nr_running, nr_switches, expired_timestamp, + nr_uninterruptible; task_t *curr, *idle; prio_array_t *active, *expired, arrays[2]; int prev_nr_running[NR_CPUS]; @@ -204,8 +211,7 @@ static inline void rq_unlock(runqueue_t *rq) { - spin_unlock(&rq->lock); - local_irq_enable(); + spin_unlock_irq(&rq->lock); } /* @@ -545,13 +551,38 @@ return sum; } -/** - * idle_cpu - is a given cpu idle currently? - * @cpu: the processor in question. +/* + * double_rq_lock - safely lock two runqueues + * + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. */ -inline int idle_cpu(int cpu) +static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) { - return cpu_curr(cpu) == cpu_rq(cpu)->idle; + if (rq1 == rq2) + spin_lock(&rq1->lock); + else { + if (rq1 < rq2) { + spin_lock(&rq1->lock); + spin_lock(&rq2->lock); + } else { + spin_lock(&rq2->lock); + spin_lock(&rq1->lock); + } + } +} + +/* + * double_rq_unlock - safely unlock two runqueues + * + * Note this does not restore interrupts like task_rq_unlock, + * you need to do so manually after calling. + */ +static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) +{ + spin_unlock(&rq1->lock); + if (rq1 != rq2) + spin_unlock(&rq2->lock); } #if CONFIG_SMP @@ -815,7 +846,7 @@ * FIFO tasks have no timeslices. */ if ((p->policy == SCHED_RR) && !--p->time_slice) { - p->time_slice = TASK_TIMESLICE(p); + p->time_slice = task_timeslice(p); set_tsk_need_resched(p); /* put it at the end of the queue: */ @@ -838,7 +869,7 @@ dequeue_task(p, rq->active); set_tsk_need_resched(p); p->prio = effective_prio(p); - p->time_slice = TASK_TIMESLICE(p); + p->time_slice = task_timeslice(p); if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { if (!rq->expired_timestamp) @@ -1051,7 +1082,7 @@ wait_queue_t wait; \ init_waitqueue_entry(&wait, current); -#define SLEEP_ON_HEAD \ +#define SLEEP_ON_HEAD \ spin_lock_irqsave(&q->lock,flags); \ __add_wait_queue(q, &wait); \ spin_unlock(&q->lock); @@ -1207,6 +1238,15 @@ } /** + * idle_cpu - is a given cpu idle currently? + * @cpu: the processor in question. + */ +inline int idle_cpu(int cpu) +{ + return cpu_curr(cpu) == cpu_rq(cpu)->idle; +} + +/** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ @@ -1581,7 +1621,7 @@ p = find_process_by_pid(pid); if (p) jiffies_to_timespec(p->policy & SCHED_FIFO ? - 0 : TASK_TIMESLICE(p), &t); + 0 : task_timeslice(p), &t); read_unlock(&tasklist_lock); if (p) retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; @@ -1682,40 +1722,6 @@ read_unlock(&tasklist_lock); } -/* - * double_rq_lock - safely lock two runqueues - * - * Note this does not disable interrupts like task_rq_lock, - * you need to do so manually before calling. - */ -static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) -{ - if (rq1 == rq2) - spin_lock(&rq1->lock); - else { - if (rq1 < rq2) { - spin_lock(&rq1->lock); - spin_lock(&rq2->lock); - } else { - spin_lock(&rq2->lock); - spin_lock(&rq1->lock); - } - } -} - -/* - * double_rq_unlock - safely unlock two runqueues - * - * Note this does not restore interrupts like task_rq_unlock, - * you need to do so manually after calling. - */ -static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2) -{ - spin_unlock(&rq1->lock); - if (rq1 != rq2) - spin_unlock(&rq2->lock); -} - void __init init_idle(task_t *idle, int cpu) { runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));