diff -urN linux-2.4.19-pre7-ac2/kernel/sched.c linux/kernel/sched.c --- linux-2.4.19-pre7-ac2/kernel/sched.c Fri Apr 19 21:04:28 2002 +++ linux/kernel/sched.c Fri Apr 19 21:04:40 2002 @@ -1225,17 +1225,11 @@ asmlinkage long sys_sched_yield(void) { - task_t *prev = current, *next; - runqueue_t *rq = this_rq(); + runqueue_t *rq; prio_array_t *array; - list_t *queue; - if (unlikely(prev->state != TASK_RUNNING)) { - schedule(); - return 0; - } - release_kernel_lock(prev, smp_processor_id()); - prev->sleep_timestamp = jiffies; + rq = this_rq(); + /* * Decrease the yielding task's priority by one, to avoid * livelocks. This priority loss is temporary, it's recovered @@ -1261,27 +1255,9 @@ list_add_tail(¤t->run_list, array->queue + current->prio); __set_bit(current->prio, array->bitmap); } - /* - * Context-switch manually. This is equivalent to - * calling schedule(), but faster, because yield() - * knows lots of things that can be optimized away - * from the generic scheduler path: - */ - queue = array->queue + sched_find_first_bit(array->bitmap); - next = list_entry(queue->next, task_t, run_list); - prefetch(next); - - prev->need_resched = 0; - if (likely(prev != next)) { - rq->nr_switches++; - rq->curr = next; - context_switch(prev, next); - barrier(); - rq = this_rq(); - } - spin_unlock_irq(&rq->lock); + spin_unlock(&rq->lock); - reacquire_kernel_lock(current); + schedule(); return 0; }