drivers/block/as-iosched.c | 73 ++++++++++++++++++++++++++++++--------------- include/linux/sched.h | 2 - kernel/exit.c | 2 - 3 files changed, 52 insertions(+), 25 deletions(-) diff -puN drivers/block/as-iosched.c~as-atomicity-fix drivers/block/as-iosched.c --- 25/drivers/block/as-iosched.c~as-atomicity-fix 2003-03-05 22:17:44.000000000 -0800 +++ 25-akpm/drivers/block/as-iosched.c 2003-03-05 22:17:44.000000000 -0800 @@ -94,7 +94,7 @@ struct as_io_context { atomic_t refcount; pid_t pid; unsigned long state; - unsigned long nr_queued; /* queued reads & sync writes */ + atomic_t nr_queued; /* queued reads & sync writes */ unsigned long nr_dispatched; /* number of requests gone to the driver */ }; @@ -186,30 +186,45 @@ static void put_as_io_context(struct as_ return; BUG_ON(atomic_read(&aic->refcount) == 0); + *paic = NULL; if (atomic_dec_and_test(&aic->refcount)) { atomic_dec(&nr_as_io_requests); kfree(aic); - *paic = NULL; } } /* Called by the exitting task */ -void exit_as_io_context(struct as_io_context *aic) +void exit_as_io_context(void) { - clear_bit(AS_TASK_RUNNING, &aic->state); - put_as_io_context(&aic); + unsigned long flags; + struct as_io_context *aic; + + local_irq_save(flags); + aic = current->as_io_context; + if (aic) { + clear_bit(AS_TASK_RUNNING, &aic->state); + put_as_io_context(&aic); + current->as_io_context = NULL; + } + local_irq_restore(flags); } /* - * Called from process context, by the task which is submitting I/O. If the - * task has no IO context then create one and initialise it. If it does have - * a context, take a ref on it. + * If the current task has no IO context then create one and initialise it. + * If it does have a context, take a ref on it. + * + * This is always called in the context of the task which submitted the I/O. + * But weird things happen, so we disable local interrupts to ensure exclusive + * access to *current. */ static struct as_io_context *get_as_io_context(void) { struct task_struct *tsk = current; - struct as_io_context *ret = tsk->as_io_context; + unsigned long flags; + struct as_io_context *ret; + local_irq_save(flags); + ret = tsk->as_io_context; if (ret == NULL) { ret = kmalloc(sizeof(*ret), GFP_ATOMIC); if (ret) { @@ -217,11 +232,12 @@ static struct as_io_context *get_as_io_c atomic_set(&ret->refcount, 1); ret->pid = tsk->pid; ret->state = 1 << AS_TASK_RUNNING; - ret->nr_queued = 0; + atomic_set(&ret->nr_queued, 0); ret->nr_dispatched = 0; tsk->as_io_context = ret; } } + local_irq_restore(flags); atomic_inc(&ret->refcount); return ret; } @@ -453,16 +469,21 @@ static void as_antic_waitnext(struct as_ static void as_complete_arq(struct as_data *ad, struct as_rq *arq) { + if (!arq->as_io_context) + return; + set_bit(AS_REQ_FINISHED, &arq->as_io_context->state); if (ad->as_io_context == arq->as_io_context) { ad->antic_start = jiffies; - if (ad->antic_status == ANTIC_WAIT_REQ) - /* We were waiting on this request, now anticipate the - * next one */ + if (ad->antic_status == ANTIC_WAIT_REQ) { + /* + * We were waiting on this request, now anticipate + * the next one + */ as_antic_waitnext(ad); + } } - put_as_io_context(&arq->as_io_context); } @@ -476,7 +497,8 @@ static void as_add_request(struct as_dat const int data_dir = rq_data_dir(arq->request); arq->as_io_context = get_as_io_context(); - arq->as_io_context->nr_queued++; + if (arq->as_io_context) + atomic_inc(&arq->as_io_context->nr_queued); as_add_arq_rb(ad, arq); @@ -505,8 +527,10 @@ static void as_remove_queued_request(req const int data_dir = rq_data_dir(arq->request); struct as_data *ad = q->elevator.elevator_data; - BUG_ON(arq->as_io_context->nr_queued == 0); - arq->as_io_context->nr_queued--; + if (arq->as_io_context) { + BUG_ON(!atomic_read(&arq->as_io_context->nr_queued)); + atomic_dec(&arq->as_io_context->nr_queued); + } /* * Update the "next_arq" cache if we are about to remove its @@ -544,9 +568,10 @@ static void as_remove_dispatched_request if (arq) { BUG_ON(ON_RB(&arq->rb_node)); - BUG_ON(arq->as_io_context->nr_dispatched == 0); - arq->as_io_context->nr_dispatched--; - + if (arq->as_io_context) { + BUG_ON(arq->as_io_context->nr_dispatched == 0); + arq->as_io_context->nr_dispatched--; + } as_complete_arq(ad, arq); } } @@ -712,7 +737,8 @@ static void as_move_to_dispatch(struct a */ as_remove_queued_request(ad->q, arq->request); list_add_tail(&arq->request->queuelist, ad->dispatch); - arq->as_io_context->nr_dispatched++; + if (arq->as_io_context) + arq->as_io_context->nr_dispatched++; } #define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo) @@ -795,7 +821,8 @@ static void as_antic_waitreq(struct as_d if (ad->antic_status == ANTIC_OFF) { ant_stats.anticipate_starts++; - if (test_bit(AS_REQ_FINISHED, &ad->as_io_context->state)) + if (ad->as_io_context && test_bit(AS_REQ_FINISHED, + &ad->as_io_context->state)) as_antic_waitnext(ad); else ad->antic_status = ANTIC_WAIT_REQ; @@ -912,7 +939,7 @@ static int as_can_break_anticipation(str return 1; } - if (aic && aic->nr_queued > 0) { + if (aic && atomic_read(&aic->nr_queued) > 0) { ant_stats.queued_request++; return 1; } diff -puN kernel/exit.c~as-atomicity-fix kernel/exit.c --- 25/kernel/exit.c~as-atomicity-fix 2003-03-05 22:17:44.000000000 -0800 +++ 25-akpm/kernel/exit.c 2003-03-05 22:17:44.000000000 -0800 @@ -695,7 +695,7 @@ NORET_TYPE void do_exit(long code) if (unlikely(tsk->pid == 1)) panic("Attempted to kill init!"); if (tsk->as_io_context) - exit_as_io_context(tsk->as_io_context); + exit_as_io_context(); tsk->flags |= PF_EXITING; del_timer_sync(&tsk->real_timer); diff -puN include/linux/sched.h~as-atomicity-fix include/linux/sched.h --- 25/include/linux/sched.h~as-atomicity-fix 2003-03-05 22:17:44.000000000 -0800 +++ 25-akpm/include/linux/sched.h 2003-03-05 22:17:44.000000000 -0800 @@ -316,7 +316,7 @@ struct k_itimer { struct as_io_context; /* Anticipatory scheduler */ -void exit_as_io_context(struct as_io_context *); +void exit_as_io_context(void); struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ _