diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/block/ll_rw_blk.c x/drivers/block/ll_rw_blk.c --- x-ref/drivers/block/ll_rw_blk.c 2003-07-17 08:38:15.000000000 +0200 +++ x/drivers/block/ll_rw_blk.c 2003-07-17 08:39:10.000000000 +0200 @@ -396,9 +396,9 @@ void generic_unplug_device(void *data) request_queue_t *q = (request_queue_t *) data; unsigned long flags; - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(q->queue_lock, flags); __generic_unplug_device(q); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(q->queue_lock, flags); } /** blk_grow_request_list @@ -422,7 +422,7 @@ int blk_grow_request_list(request_queue_ * this causes system hangs during boot. * As a temporary fix, make the function non-blocking. */ - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(q->queue_lock, flags); while (q->nr_requests < nr_requests) { struct request *rq; @@ -453,7 +453,7 @@ int blk_grow_request_list(request_queue_ BUG_ON(!q->batch_sectors); atomic_set(&q->nr_sectors, 0); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(q->queue_lock, flags); return q->nr_requests; } @@ -482,8 +482,6 @@ static void blk_init_free_list(request_q blk_grow_request_list(q, nr_requests, max_queue_sectors); init_waitqueue_head(&q->wait_for_requests); - - spin_lock_init(&q->queue_lock); } static int __make_request(request_queue_t * q, int rw, struct buffer_head * bh); @@ -525,6 +523,7 @@ void blk_init_queue(request_queue_t * q, { INIT_LIST_HEAD(&q->queue_head); elevator_init(&q->elevator, ELEVATOR_LINUS); + q->queue_lock = &io_request_lock; blk_init_free_list(q); q->request_fn = rfn; q->back_merge_fn = ll_back_merge_fn; @@ -549,7 +548,6 @@ void blk_init_queue(request_queue_t * q, blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); } -#define blkdev_free_rq(list) list_entry((list)->next, struct request, queue); /* * Get a free request. io_request_lock must be held and interrupts * disabled on the way in. Returns NULL if there are no free requests. @@ -642,15 +640,15 @@ static struct request *__get_request_wai do { set_current_state(TASK_UNINTERRUPTIBLE); - spin_lock_irq(&io_request_lock); + spin_lock_irq(q->queue_lock); if (blk_oversized_queue(q) || q->rq.count == 0) { __generic_unplug_device(q); - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(q->queue_lock); schedule(); - spin_lock_irq(&io_request_lock); + spin_lock_irq(q->queue_lock); } rq = get_request(q, rw); - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(q->queue_lock); } while (rq == NULL); remove_wait_queue(&q->wait_for_requests, &wait); current->state = TASK_RUNNING; @@ -865,7 +863,7 @@ static inline void add_request(request_q drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1); if (!q->plugged && q->head_active && insert_here == &q->queue_head) { - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(q->queue_lock); BUG(); } @@ -1039,7 +1037,7 @@ static int __make_request(request_queue_ * Now we acquire the request spinlock, we have to be mega careful * not to schedule or do something nonatomic */ - spin_lock_irq(&io_request_lock); + spin_lock_irq(q->queue_lock); again: insert_here = head->prev; @@ -1119,7 +1117,7 @@ get_rq: */ if (rw_ahead) { if (q->rq.count < q->batch_requests || blk_oversized_queue_batch(q)) { - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(q->queue_lock); goto end_io; } req = get_request(q, rw); @@ -1128,10 +1126,10 @@ get_rq: } else { req = get_request(q, rw); if (req == NULL) { - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(q->queue_lock); freereq = __get_request_wait(q, rw); head = &q->queue_head; - spin_lock_irq(&io_request_lock); + spin_lock_irq(q->queue_lock); should_wake = 1; goto again; } @@ -1164,7 +1162,7 @@ out: get_request_wait_wakeup(q, rw); if (sync) __generic_unplug_device(q); - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(q->queue_lock); return 0; end_io: bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/hosts.c x/drivers/scsi/hosts.c --- x-ref/drivers/scsi/hosts.c 2003-06-13 22:07:32.000000000 +0200 +++ x/drivers/scsi/hosts.c 2003-07-17 08:38:21.000000000 +0200 @@ -148,11 +148,12 @@ struct Scsi_Host * scsi_register(Scsi_Ho } } atomic_set(&retval->host_active,0); - retval->host_busy = 0; + atomic_set(&retval->host_busy,0); retval->host_failed = 0; if(j > 0xffff) panic("Too many extra bytes requested\n"); retval->extra_bytes = j; retval->loaded_as_module = 1; + retval->lock = &io_request_lock; if (flag_new) { shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC); if (!shn) { diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/hosts.h x/drivers/scsi/hosts.h --- x-ref/drivers/scsi/hosts.h 2003-07-17 08:38:15.000000000 +0200 +++ x/drivers/scsi/hosts.h 2003-07-17 08:38:21.000000000 +0200 @@ -337,9 +337,11 @@ struct Scsi_Host unsigned int eh_active:1; /* Indicates the eh thread is awake and active if this is true. */ wait_queue_head_t host_wait; +#define SCSI_HAS_HOST_LOCK + spinlock_t * lock; Scsi_Host_Template * hostt; atomic_t host_active; /* commands checked out */ - volatile unsigned short host_busy; /* commands actually active on low-level */ + atomic_t host_busy; /* commands actually active on low-level */ volatile unsigned short host_failed; /* commands that failed. */ /* public: */ diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi.c x/drivers/scsi/scsi.c --- x-ref/drivers/scsi/scsi.c 2003-07-17 08:38:13.000000000 +0200 +++ x/drivers/scsi/scsi.c 2003-07-17 08:38:21.000000000 +0200 @@ -197,6 +197,8 @@ void scsi_initialize_queue(Scsi_Device blk_init_queue(q, scsi_request_fn); blk_queue_headactive(q, 0); + spin_lock_init(&SDpnt->device_lock); + q->queue_lock = &SDpnt->device_lock; blk_queue_throttle_sectors(q, 1); q->queuedata = (void *) SDpnt; } @@ -523,10 +525,10 @@ inline void __scsi_release_command(Scsi_ unsigned long flags; Scsi_Device * SDpnt; - spin_lock_irqsave(&device_request_lock, flags); - SDpnt = SCpnt->device; + spin_lock_irqsave(&device_request_lock, flags); + SCpnt->request.rq_status = RQ_INACTIVE; SCpnt->state = SCSI_STATE_UNUSED; SCpnt->owner = SCSI_OWNER_NOBODY; @@ -548,7 +550,7 @@ inline void __scsi_release_command(Scsi_ */ if (SCpnt->host->in_recovery && !SCpnt->host->eh_active - && SCpnt->host->host_busy == SCpnt->host->host_failed) { + && atomic_read(&SCpnt->host->host_busy) == SCpnt->host->host_failed) { SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n", atomic_read(&SCpnt->host->eh_wait->count))); up(SCpnt->host->eh_wait); @@ -629,8 +631,6 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) unsigned long flags = 0; unsigned long timeout; - ASSERT_LOCK(&io_request_lock, 0); - #if DEBUG unsigned long *ret = 0; #ifdef __mips__ @@ -687,63 +687,47 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) SCpnt->state = SCSI_STATE_QUEUED; SCpnt->owner = SCSI_OWNER_LOWLEVEL; + + /* + * This block must wait until the full support for 16 byte CDBs is + * added to the SCSI layer. + */ + if (CDB_SIZE(SCpnt) > host->max_cmd_len) { + SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmd : command too long %d(Max %d).\n", CDB_SIZE(SCpnt), host->max_cmd_len)); + SCpnt->result = (DID_ABORT << 16); + scsi_done(SCpnt); + return 1; + } + if (host->can_queue) { SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n", host->hostt->queuecommand)); /* - * Use the old error handling code if we haven't converted the driver - * to use the new one yet. Note - only the new queuecommand variant - * passes a meaningful return value. + * Use the old error handling code if we haven't converted the + * driver to use the new one yet. Note - only the new + * queuecommand variant passes a meaningful return value. */ - if (host->hostt->use_new_eh_code) { - /* - * Before we queue this command, check if the command - * length exceeds what the host adapter can handle. - */ - if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) { - spin_lock_irqsave(&io_request_lock, flags); - rtn = host->hostt->queuecommand(SCpnt, scsi_done); - spin_unlock_irqrestore(&io_request_lock, flags); - if (rtn != 0) { - scsi_delete_timer(SCpnt); - scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY); - SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n")); - } - } else { - SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n")); - SCpnt->result = (DID_ABORT << 16); - spin_lock_irqsave(&io_request_lock, flags); - scsi_done(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); - rtn = 1; - } - } else { - /* - * Before we queue this command, check if the command - * length exceeds what the host adapter can handle. - */ - if (CDB_SIZE(SCpnt) <= SCpnt->host->max_cmd_len) { - spin_lock_irqsave(&io_request_lock, flags); - host->hostt->queuecommand(SCpnt, scsi_old_done); - spin_unlock_irqrestore(&io_request_lock, flags); - } else { - SCSI_LOG_MLQUEUE(3, printk("queuecommand : command too long.\n")); - SCpnt->result = (DID_ABORT << 16); - spin_lock_irqsave(&io_request_lock, flags); - scsi_old_done(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); - rtn = 1; - } + spin_lock_irqsave(host->lock, flags); + if (host->hostt->use_new_eh_code) + rtn = host->hostt->queuecommand(SCpnt, scsi_done); + else + rtn = host->hostt->queuecommand(SCpnt, scsi_old_done); + spin_unlock_irqrestore(host->lock, flags); + + if (host->hostt->use_new_eh_code && rtn != 0) { + scsi_delete_timer(SCpnt); + scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY); + SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n")); } } else { int temp; SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command)); - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); temp = host->hostt->command(SCpnt); + spin_unlock_irqrestore(host->lock, flags); SCpnt->result = temp; #ifdef DEBUG_DELAY - spin_unlock_irqrestore(&io_request_lock, flags); clock = jiffies + 4 * HZ; while (time_before(jiffies, clock)) { barrier(); @@ -751,14 +735,8 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt) } printk("done(host = %d, result = %04x) : routine at %p\n", host->host_no, temp, host->hostt->command); - spin_lock_irqsave(&io_request_lock, flags); #endif - if (host->hostt->use_new_eh_code) { - scsi_done(SCpnt); - } else { - scsi_old_done(SCpnt); - } - spin_unlock_irqrestore(&io_request_lock, flags); + scsi_done(SCpnt); } SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n")); return rtn; @@ -828,8 +806,6 @@ void scsi_do_req(Scsi_Request * SRpnt, c Scsi_Device * SDpnt = SRpnt->sr_device; struct Scsi_Host *host = SDpnt->host; - ASSERT_LOCK(&io_request_lock, 0); - SCSI_LOG_MLQUEUE(4, { int i; @@ -925,8 +901,6 @@ void scsi_init_cmd_from_req(Scsi_Cmnd * { struct Scsi_Host *host = SCpnt->host; - ASSERT_LOCK(&io_request_lock, 0); - SCpnt->owner = SCSI_OWNER_MIDLEVEL; SRpnt->sr_command = SCpnt; @@ -1015,8 +989,6 @@ void scsi_do_cmd(Scsi_Cmnd * SCpnt, cons { struct Scsi_Host *host = SCpnt->host; - ASSERT_LOCK(&io_request_lock, 0); - SCpnt->pid = scsi_pid++; SCpnt->owner = SCSI_OWNER_MIDLEVEL; @@ -1182,6 +1154,10 @@ void scsi_done(Scsi_Cmnd * SCpnt) * level drivers away from using io_request_lock. Technically they should * all use their own locking. I am adding a small spinlock to protect * this datastructure to make it safe for that day. (ERY) + * + * We do *NOT* hold the io_request_lock for certain at this point. + * Don't make any assumptions, and we also don't need any other lock + * besides the bh queue lock. (DL) */ if (!scsi_bh_queue_head) { scsi_bh_queue_head = SCpnt; @@ -1223,6 +1199,7 @@ void scsi_done(Scsi_Cmnd * SCpnt) */ void scsi_bottom_half_handler(void) { + struct Scsi_Host *host; Scsi_Cmnd *SCpnt; Scsi_Cmnd *SCnext; unsigned long flags; @@ -1242,12 +1219,13 @@ void scsi_bottom_half_handler(void) for (; SCpnt; SCpnt = SCnext) { SCnext = SCpnt->bh_next; + host = SCpnt->host; switch (scsi_decide_disposition(SCpnt)) { case SUCCESS: /* * Add to BH queue. */ - SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy, + SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", atomic_read(&SCpnt->host->host_busy), SCpnt->host->host_failed, SCpnt->result)); @@ -1260,7 +1238,7 @@ void scsi_bottom_half_handler(void) * keeping track of the number of tries, so we don't end up looping, * of course. */ - SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy, + SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", atomic_read(&SCpnt->host->host_busy), SCpnt->host->host_failed, SCpnt->result)); scsi_retry_command(SCpnt); @@ -1286,7 +1264,7 @@ void scsi_bottom_half_handler(void) SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n", SCpnt, SCpnt->result, atomic_read(&SCpnt->host->host_active), - SCpnt->host->host_busy, + atomic_read(&SCpnt->host->host_busy), SCpnt->host->host_failed)); /* @@ -1304,7 +1282,7 @@ void scsi_bottom_half_handler(void) * If the host is having troubles, then look to see if this was the last * command that might have failed. If so, wake up the error handler. */ - if (SCpnt->host->host_busy == SCpnt->host->host_failed) { + if (atomic_read(&SCpnt->host->host_busy) == SCpnt->host->host_failed) { SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n", atomic_read(&SCpnt->host->eh_wait->count))); up(SCpnt->host->eh_wait); @@ -1364,9 +1342,6 @@ void scsi_finish_command(Scsi_Cmnd * SCp struct Scsi_Host *host; Scsi_Device *device; Scsi_Request * SRpnt; - unsigned long flags; - - ASSERT_LOCK(&io_request_lock, 0); host = SCpnt->host; device = SCpnt->device; @@ -1378,10 +1353,8 @@ void scsi_finish_command(Scsi_Cmnd * SCp * one execution context, but the device and host structures are * shared. */ - spin_lock_irqsave(&io_request_lock, flags); - host->host_busy--; /* Indicate that we are free */ - device->device_busy--; /* Decrement device usage counter. */ - spin_unlock_irqrestore(&io_request_lock, flags); + atomic_dec(&host->host_busy); /* Indicate that we are free */ + atomic_dec(&device->device_busy);/* Decrement device usage counter. */ /* * Clear the flags which say that the device/host is no longer @@ -1445,15 +1418,16 @@ void scsi_release_commandblocks(Scsi_Dev { Scsi_Cmnd *SCpnt, *SCnext; unsigned long flags; + request_queue_t *q = &SDpnt->request_queue; - spin_lock_irqsave(&device_request_lock, flags); + spin_lock_irqsave(q->queue_lock, flags); for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) { SDpnt->device_queue = SCnext = SCpnt->next; kfree((char *) SCpnt); } SDpnt->has_cmdblocks = 0; SDpnt->queue_depth = 0; - spin_unlock_irqrestore(&device_request_lock, flags); + spin_unlock_irqrestore(q->queue_lock, flags); } /* @@ -1475,8 +1449,9 @@ void scsi_build_commandblocks(Scsi_Devic struct Scsi_Host *host = SDpnt->host; int j; Scsi_Cmnd *SCpnt; + request_queue_t *q = &SDpnt->request_queue; - spin_lock_irqsave(&device_request_lock, flags); + spin_lock_irqsave(q->queue_lock, flags); if (SDpnt->queue_depth == 0) { @@ -1523,7 +1498,7 @@ void scsi_build_commandblocks(Scsi_Devic } else { SDpnt->has_cmdblocks = 1; } - spin_unlock_irqrestore(&device_request_lock, flags); + spin_unlock_irqrestore(q->queue_lock, flags); } void __init scsi_host_no_insert(char *str, int n) @@ -1911,7 +1886,11 @@ static int scsi_register_host(Scsi_Host_ All lame drivers are going to fail due to the following spinlock. For the time beeing let's use it only for drivers using the new scsi code. NOTE: the detect routine could - redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */ + redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) + Since we now allow drivers to specify their own per-host locks, + this attempt at locking is horrible broken. Instead, call into + the detect routine unlocked and let the driver grab/release + the driver specific lock after it's allocated. (DL, 11 Dec 2001 */ if (tpnt->use_new_eh_code) { spin_lock_irqsave(&io_request_lock, flags); @@ -2035,6 +2014,7 @@ static int scsi_register_host(Scsi_Host_ (scsi_memory_upper_value - scsi_init_memory_start) / 1024); #endif + if (out_of_space) { scsi_unregister_host(tpnt); /* easiest way to clean up?? */ return 1; @@ -2485,7 +2465,7 @@ static void scsi_dump_status(int level) for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { printk(KERN_INFO " %d %d %d : %d %d\n", shpnt->host_failed, - shpnt->host_busy, + atomic_read(&shpnt->host_busy), atomic_read(&shpnt->host_active), shpnt->host_blocked, shpnt->host_self_blocked); @@ -2711,10 +2691,10 @@ Scsi_Device * scsi_get_host_dev(struct S SDpnt->type = -1; SDpnt->queue_depth = 1; - scsi_build_commandblocks(SDpnt); - scsi_initialize_queue(SDpnt, SHpnt); + scsi_build_commandblocks(SDpnt); + SDpnt->online = TRUE; /* @@ -2841,9 +2821,9 @@ scsi_reset_provider(Scsi_Device *dev, in } else { unsigned long flags; - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(dev->host->lock, flags); rtn = scsi_old_reset(SCpnt, flag); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(dev->host->lock, flags); } scsi_delete_timer(SCpnt); diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi.h x/drivers/scsi/scsi.h --- x-ref/drivers/scsi/scsi.h 2003-07-17 08:38:13.000000000 +0200 +++ x/drivers/scsi/scsi.h 2003-07-17 08:38:21.000000000 +0200 @@ -554,10 +554,11 @@ struct scsi_device { struct Scsi_Host *host; request_queue_t request_queue; atomic_t device_active; /* commands checked out for device */ - volatile unsigned short device_busy; /* commands actually active on low-level */ + atomic_t device_busy; /* commands actually active on low-level */ int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize new request */ Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */ + spinlock_t device_lock; /* public: */ unsigned int id, lun, channel; diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi_error.c x/drivers/scsi/scsi_error.c --- x-ref/drivers/scsi/scsi_error.c 2002-11-29 02:23:07.000000000 +0100 +++ x/drivers/scsi/scsi_error.c 2003-07-17 08:38:21.000000000 +0200 @@ -223,7 +223,7 @@ void scsi_times_out(Scsi_Cmnd * SCpnt) SCSI_LOG_TIMEOUT(3, printk("Command timed out active=%d busy=%d failed=%d\n", atomic_read(&SCpnt->host->host_active), - SCpnt->host->host_busy, + atomic_read(&SCpnt->host->host_busy), SCpnt->host->host_failed)); /* @@ -234,7 +234,7 @@ void scsi_times_out(Scsi_Cmnd * SCpnt) panic("Error handler thread not present at %p %p %s %d", SCpnt, SCpnt->host, __FILE__, __LINE__); } - if (SCpnt->host->host_busy == SCpnt->host->host_failed) { + if (atomic_read(&SCpnt->host->host_busy) == SCpnt->host->host_failed) { up(SCpnt->host->eh_wait); } } @@ -423,7 +423,7 @@ STATIC int scsi_request_sense(Scsi_Cmnd unsigned char scsi_result0[256], *scsi_result = NULL; int saved_result; - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(SCpnt->host->lock, 0); memcpy((void *) SCpnt->cmnd, (void *) generic_sense, sizeof(generic_sense)); @@ -585,10 +585,10 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd unsigned long flags; struct Scsi_Host *host; - ASSERT_LOCK(&io_request_lock, 0); - host = SCpnt->host; + ASSERT_LOCK(host->lock, 0); + retry: /* * We will use a queued command if possible, otherwise we will emulate the @@ -609,9 +609,9 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd SCpnt->host->eh_action = &sem; SCpnt->request.rq_status = RQ_SCSI_BUSY; - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); host->hostt->queuecommand(SCpnt, scsi_eh_done); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(host->lock, flags); down(&sem); @@ -634,10 +634,10 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * abort a timed out command or not. Not sure how * we should treat them differently anyways. */ - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); if (SCpnt->host->hostt->eh_abort_handler) SCpnt->host->hostt->eh_abort_handler(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(host->lock, flags); SCpnt->request.rq_status = RQ_SCSI_DONE; SCpnt->owner = SCSI_OWNER_ERROR_HANDLER; @@ -654,9 +654,9 @@ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * protection here, since we would end up waiting in the actual low * level driver, we don't know how to wake it up. */ - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); temp = host->hostt->command(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(host->lock, flags); SCpnt->result = temp; /* Fall through to code below to examine status. */ @@ -761,12 +761,13 @@ STATIC int scsi_try_to_abort_command(Scs { int rtn; unsigned long flags; + struct Scsi_Host *host = SCpnt->host; SCpnt->eh_state = FAILED; /* Until we come up with something better */ - if (SCpnt->host->hostt->eh_abort_handler == NULL) { + if (host->hostt->eh_abort_handler == NULL) return FAILED; - } + /* * scsi_done was called just after the command timed out and before * we had a chance to process it. (DB) @@ -776,9 +777,9 @@ STATIC int scsi_try_to_abort_command(Scs SCpnt->owner = SCSI_OWNER_LOWLEVEL; - spin_lock_irqsave(&io_request_lock, flags); - rtn = SCpnt->host->hostt->eh_abort_handler(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); + rtn = host->hostt->eh_abort_handler(SCpnt); + spin_unlock_irqrestore(host->lock, flags); return rtn; } @@ -798,19 +799,20 @@ STATIC int scsi_try_to_abort_command(Scs */ STATIC int scsi_try_bus_device_reset(Scsi_Cmnd * SCpnt, int timeout) { + struct Scsi_Host *host = SCpnt->host; unsigned long flags; int rtn; SCpnt->eh_state = FAILED; /* Until we come up with something better */ - if (SCpnt->host->hostt->eh_device_reset_handler == NULL) { + if (host->hostt->eh_device_reset_handler == NULL) return FAILED; - } + SCpnt->owner = SCSI_OWNER_LOWLEVEL; - spin_lock_irqsave(&io_request_lock, flags); - rtn = SCpnt->host->hostt->eh_device_reset_handler(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); + rtn = host->hostt->eh_device_reset_handler(SCpnt); + spin_unlock_irqrestore(host->lock, flags); if (rtn == SUCCESS) SCpnt->eh_state = SUCCESS; @@ -830,6 +832,7 @@ STATIC int scsi_try_bus_device_reset(Scs */ STATIC int scsi_try_bus_reset(Scsi_Cmnd * SCpnt) { + struct Scsi_Host *host = SCpnt->host; unsigned long flags; int rtn; @@ -837,13 +840,12 @@ STATIC int scsi_try_bus_reset(Scsi_Cmnd SCpnt->owner = SCSI_OWNER_LOWLEVEL; SCpnt->serial_number_at_timeout = SCpnt->serial_number; - if (SCpnt->host->hostt->eh_bus_reset_handler == NULL) { + if (host->hostt->eh_bus_reset_handler == NULL) return FAILED; - } - spin_lock_irqsave(&io_request_lock, flags); - rtn = SCpnt->host->hostt->eh_bus_reset_handler(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); + rtn = host->hostt->eh_bus_reset_handler(SCpnt); + spin_unlock_irqrestore(host->lock, flags); if (rtn == SUCCESS) SCpnt->eh_state = SUCCESS; @@ -855,7 +857,7 @@ STATIC int scsi_try_bus_reset(Scsi_Cmnd scsi_sleep(BUS_RESET_SETTLE_TIME); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; - for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { + for (SDloop = host->host_queue; SDloop; SDloop = SDloop->next) { if (SCpnt->channel == SDloop->channel) { SDloop->was_reset = 1; SDloop->expecting_cc_ua = 1; @@ -877,6 +879,7 @@ STATIC int scsi_try_bus_reset(Scsi_Cmnd */ STATIC int scsi_try_host_reset(Scsi_Cmnd * SCpnt) { + struct Scsi_Host *host = SCpnt->host; unsigned long flags; int rtn; @@ -884,12 +887,12 @@ STATIC int scsi_try_host_reset(Scsi_Cmnd SCpnt->owner = SCSI_OWNER_LOWLEVEL; SCpnt->serial_number_at_timeout = SCpnt->serial_number; - if (SCpnt->host->hostt->eh_host_reset_handler == NULL) { + if (host->hostt->eh_host_reset_handler == NULL) return FAILED; - } - spin_lock_irqsave(&io_request_lock, flags); - rtn = SCpnt->host->hostt->eh_host_reset_handler(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); + + spin_lock_irqsave(host->lock, flags); + rtn = host->hostt->eh_host_reset_handler(SCpnt); + spin_unlock_irqrestore(host->lock, flags); if (rtn == SUCCESS) SCpnt->eh_state = SUCCESS; @@ -901,7 +904,7 @@ STATIC int scsi_try_host_reset(Scsi_Cmnd scsi_sleep(HOST_RESET_SETTLE_TIME); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; - for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { + for (SDloop = host->host_queue; SDloop; SDloop = SDloop->next) { SDloop->was_reset = 1; SDloop->expecting_cc_ua = 1; } @@ -1238,7 +1241,7 @@ STATIC void scsi_restart_operations(stru Scsi_Device *SDpnt; unsigned long flags; - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(host->lock, 0); /* * Next free up anything directly waiting upon the host. This will be @@ -1255,19 +1258,23 @@ STATIC void scsi_restart_operations(stru * now that error recovery is done, we will need to ensure that these * requests are started. */ - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(host->lock, flags); for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) { request_queue_t *q; - if ((host->can_queue > 0 && (host->host_busy >= host->can_queue)) + if ((host->can_queue > 0 && (atomic_read(&host->host_busy) >= host->can_queue)) || (host->host_blocked) || (host->host_self_blocked) || (SDpnt->device_blocked)) { break; } q = &SDpnt->request_queue; + spin_lock(q->queue_lock); + spin_unlock(host->lock); q->request_fn(q); + spin_lock(host->lock); + spin_unlock(q->queue_lock); } - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(host->lock, flags); } /* @@ -1314,7 +1321,7 @@ STATIC int scsi_unjam_host(struct Scsi_H Scsi_Cmnd *SCdone; int timed_out; - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(host->lock, 0); SCdone = NULL; diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi_lib.c x/drivers/scsi/scsi_lib.c --- x-ref/drivers/scsi/scsi_lib.c 2003-07-15 02:05:49.000000000 +0200 +++ x/drivers/scsi/scsi_lib.c 2003-07-17 08:38:21.000000000 +0200 @@ -70,7 +70,7 @@ static void __scsi_insert_special(reques { unsigned long flags; - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(q->queue_lock, 0); rq->cmd = SPECIAL; rq->special = data; @@ -84,15 +84,15 @@ static void __scsi_insert_special(reques * head of the queue for things like a QUEUE_FULL message from a * device, or a host that is unable to accept a particular command. */ - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(q->queue_lock, flags); if (at_head) list_add(&rq->queue, &q->queue_head); else list_add_tail(&rq->queue, &q->queue_head); q->request_fn(q); - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(q->queue_lock, flags); } @@ -167,7 +167,7 @@ int scsi_insert_special_req(Scsi_Request */ int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt) { - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(q->queue_lock, 0); SCpnt->owner = SCSI_OWNER_MIDLEVEL; SCpnt->reset_chain = NULL; @@ -246,13 +246,12 @@ int scsi_init_cmd_errh(Scsi_Cmnd * SCpnt void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) { int all_clear; - unsigned long flags; Scsi_Device *SDpnt; struct Scsi_Host *SHpnt; + unsigned long flags; - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(q->queue_lock, 0); - spin_lock_irqsave(&io_request_lock, flags); if (SCpnt != NULL) { /* @@ -262,13 +261,17 @@ void scsi_queue_next_request(request_que * the bad sector. */ SCpnt->request.special = (void *) SCpnt; + spin_lock_irqsave(q->queue_lock, flags); list_add(&SCpnt->request.queue, &q->queue_head); + spin_unlock_irqrestore(q->queue_lock, flags); } /* * Just hit the requeue function for the queue. */ + spin_lock_irqsave(q->queue_lock, flags); q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); SDpnt = (Scsi_Device *) q->queuedata; SHpnt = SDpnt->host; @@ -282,21 +285,24 @@ void scsi_queue_next_request(request_que */ if (SDpnt->single_lun && list_empty(&q->queue_head) - && SDpnt->device_busy == 0) { + && atomic_read(&SDpnt->device_busy) == 0) { request_queue_t *q; for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) { if (((SHpnt->can_queue > 0) - && (SHpnt->host_busy >= SHpnt->can_queue)) + && (atomic_read(&SHpnt->host_busy) + >= SHpnt->can_queue)) || (SHpnt->host_blocked) || (SHpnt->host_self_blocked) || (SDpnt->device_blocked)) { break; } q = &SDpnt->request_queue; + spin_lock_irqsave(q->queue_lock, flags); q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); } } @@ -312,7 +318,7 @@ void scsi_queue_next_request(request_que if (SHpnt->some_device_starved) { for (SDpnt = SHpnt->host_queue; SDpnt; SDpnt = SDpnt->next) { request_queue_t *q; - if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) + if ((SHpnt->can_queue > 0 && (atomic_read(&SHpnt->host_busy) >= SHpnt->can_queue)) || (SHpnt->host_blocked) || (SHpnt->host_self_blocked)) { break; @@ -321,14 +327,15 @@ void scsi_queue_next_request(request_que continue; } q = &SDpnt->request_queue; + spin_lock_irqsave(q->queue_lock, flags); q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); all_clear = 0; } if (SDpnt == NULL && all_clear) { SHpnt->some_device_starved = 0; } } - spin_unlock_irqrestore(&io_request_lock, flags); } /* @@ -366,7 +373,7 @@ static Scsi_Cmnd *__scsi_end_request(Scs unsigned long flags; int nsect; - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(q->queue_lock, 0); req = &SCpnt->request; req->errors = 0; @@ -432,7 +439,7 @@ static Scsi_Cmnd *__scsi_end_request(Scs * This will goose the queue request function at the end, so we don't * need to worry about launching another command. */ - __scsi_release_command(SCpnt); + scsi_release_command(SCpnt); if (frequeue) scsi_queue_next_request(q, NULL); @@ -484,8 +491,6 @@ Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * */ static void scsi_release_buffers(Scsi_Cmnd * SCpnt) { - ASSERT_LOCK(&io_request_lock, 0); - /* * Free up any indirection buffers we allocated for DMA purposes. */ @@ -558,7 +563,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpn * would be used if we just wanted to retry, for example. * */ - ASSERT_LOCK(&io_request_lock, 0); + ASSERT_LOCK(q->queue_lock, 0); /* * Free up any indirection buffers we allocated for DMA purposes. @@ -786,7 +791,7 @@ struct Scsi_Device_Template *scsi_get_re kdev_t dev = req->rq_dev; int major = MAJOR(dev); - ASSERT_LOCK(&io_request_lock, 1); + ASSERT_LOCK(q->queue_lock, 1); for (spnt = scsi_devicelist; spnt; spnt = spnt->next) { /* @@ -840,17 +845,11 @@ void scsi_request_fn(request_queue_t * q struct request *req; Scsi_Cmnd *SCpnt; Scsi_Request *SRpnt; - Scsi_Device *SDpnt; - struct Scsi_Host *SHpnt; + Scsi_Device *SDpnt = q->queuedata; + struct Scsi_Host *SHpnt = SDpnt->host; struct Scsi_Device_Template *STpnt; - ASSERT_LOCK(&io_request_lock, 1); - - SDpnt = (Scsi_Device *) q->queuedata; - if (!SDpnt) { - panic("Missing device"); - } - SHpnt = SDpnt->host; + ASSERT_LOCK(q->queue_lock, 1); /* * To start with, we keep looping until the queue is empty, or until @@ -863,15 +862,29 @@ void scsi_request_fn(request_queue_t * q * we need to check to see if the queue is plugged or not. */ if (SHpnt->in_recovery || q->plugged) - return; + break; /* * If the device cannot accept another request, then quit. */ - if (SDpnt->device_blocked) { + if (SDpnt->device_blocked) break; - } - if ((SHpnt->can_queue > 0 && (SHpnt->host_busy >= SHpnt->can_queue)) + + /* + * If we couldn't find a request that could be queued, then we + * can quit. + */ + if (list_empty(&q->queue_head)) + break; + + /* + * at this point, q->queue_lock is already held and interrupts + * are disabled, so spin_lock() is fine + */ + spin_lock(SHpnt->lock); + + if ((SHpnt->can_queue > 0 + && (atomic_read(&SHpnt->host_busy) >= SHpnt->can_queue)) || (SHpnt->host_blocked) || (SHpnt->host_self_blocked)) { /* @@ -882,15 +895,22 @@ void scsi_request_fn(request_queue_t * q * little help getting it started again * once the host isn't quite so busy. */ - if (SDpnt->device_busy == 0) { + if (atomic_read(&SDpnt->device_busy) == 0) { SDpnt->starved = 1; SHpnt->some_device_starved = 1; } + + spin_unlock(SHpnt->lock); break; - } else { - SDpnt->starved = 0; } + /* + * clear starved, and bump busy count + */ + SDpnt->starved = 0; + atomic_inc(&SHpnt->host_busy); + spin_unlock(SHpnt->lock); + /* * FIXME(eric) * I am not sure where the best place to do this is. We need @@ -911,21 +931,15 @@ void scsi_request_fn(request_queue_t * q */ SDpnt->was_reset = 0; if (SDpnt->removable && !in_interrupt()) { - spin_unlock_irq(&io_request_lock); + atomic_dec(&SHpnt->host_busy); + spin_unlock_irq(q->queue_lock); scsi_ioctl(SDpnt, SCSI_IOCTL_DOORLOCK, 0); - spin_lock_irq(&io_request_lock); + spin_lock_irq(q->queue_lock); continue; } } /* - * If we couldn't find a request that could be queued, then we - * can also quit. - */ - if (list_empty(&q->queue_head)) - break; - - /* * Loop through all of the requests in this queue, and find * one that is queueable. */ @@ -947,9 +961,11 @@ void scsi_request_fn(request_queue_t * q SRpnt = (Scsi_Request *) req->special; if( SRpnt->sr_magic == SCSI_REQ_MAGIC ) { - SCpnt = scsi_allocate_device(SRpnt->sr_device, + SCpnt = scsi_allocate_device(SRpnt->sr_device, FALSE, FALSE); - if( !SCpnt ) { + + if (!SCpnt) { + atomic_dec(&SHpnt->host_busy); break; } scsi_init_cmd_from_req(SCpnt, SRpnt); @@ -964,7 +980,7 @@ void scsi_request_fn(request_queue_t * q /* * Now try and find a command block that we can use. */ - if( req->special != NULL ) { + if (req->special) { SCpnt = (Scsi_Cmnd *) req->special; /* * We need to recount the number of @@ -984,46 +1000,14 @@ void scsi_request_fn(request_queue_t * q * loop. Otherwise loop around and try another request. */ if (!SCpnt) { + atomic_dec(&SHpnt->host_busy); break; } } - /* - * Now bump the usage count for both the host and the - * device. - */ - SHpnt->host_busy++; - SDpnt->device_busy++; - - /* - * Finally, before we release the lock, we copy the - * request to the command block, and remove the - * request from the request list. Note that we always - * operate on the queue head - there is absolutely no - * reason to search the list, because all of the commands - * in this queue are for the same device. - */ - blkdev_dequeue_request(req); - - if (req != &SCpnt->request && req != &SRpnt->sr_request ) { + if (req != &SCpnt->request && req != &SRpnt->sr_request) memcpy(&SCpnt->request, req, sizeof(struct request)); - /* - * We have copied the data out of the request block - - * it is now in a field in SCpnt. Release the request - * block. - */ - blkdev_release_request(req); - } - /* - * Now it is finally safe to release the lock. We are - * not going to noodle the request list until this - * request has been queued and we loop back to queue - * another. - */ - req = NULL; - spin_unlock_irq(&io_request_lock); - if (SCpnt->request.cmd != SPECIAL) { /* * This will do a couple of things: @@ -1037,31 +1021,19 @@ void scsi_request_fn(request_queue_t * q * some kinds of consistency checking may cause the * request to be rejected immediately. */ - if (STpnt == NULL) { + if (STpnt == NULL) STpnt = scsi_get_request_dev(req); - } + /* * This sets up the scatter-gather table (allocating if * required). Hosts that need bounce buffers will also - * get those allocated here. + * get those allocated here. This call will fail if + * there isn't enough scsi DMA memory available. */ if (!SDpnt->scsi_init_io_fn(SCpnt)) { - /* - * probably we ran out of sgtable memory, or - * __init_io() wanted to revert to a single - * segment request. this would require bouncing - * on highmem i/o, so mark the device as - * starved and continue later instead - */ - spin_lock_irq(&io_request_lock); - SHpnt->host_busy--; - SDpnt->device_busy--; - if (SDpnt->device_busy == 0) { - SDpnt->starved = 1; - SHpnt->some_device_starved = 1; - } - SCpnt->request.special = SCpnt; - list_add(&SCpnt->request.queue, &q->queue_head); + if (req->special != SCpnt) + __scsi_release_command(SCpnt); + atomic_dec(&SHpnt->host_busy); break; } @@ -1069,16 +1041,20 @@ void scsi_request_fn(request_queue_t * q * Initialize the actual SCSI command for this request. */ if (!STpnt->init_command(SCpnt)) { + blkdev_dequeue_request(req); + if (req != &SCpnt->request && + req != &SRpnt->sr_request ) { + blkdev_release_request(req); + } + req = NULL; + spin_unlock_irq(q->queue_lock); scsi_release_buffers(SCpnt); - SCpnt = __scsi_end_request(SCpnt, 0, + SCpnt = __scsi_end_request(SCpnt, 0, SCpnt->request.nr_sectors, 0, 0); - if( SCpnt != NULL ) - { + if (SCpnt) panic("Should not have leftover blocks\n"); - } - spin_lock_irq(&io_request_lock); - SHpnt->host_busy--; - SDpnt->device_busy--; + atomic_dec(&SHpnt->host_busy); + spin_lock_irq(q->queue_lock); continue; } } @@ -1089,15 +1065,37 @@ void scsi_request_fn(request_queue_t * q scsi_init_cmd_errh(SCpnt); /* - * Dispatch the command to the low-level driver. + * Now bump the usage count for the device. */ - scsi_dispatch_cmd(SCpnt); + atomic_inc(&SDpnt->device_busy); /* - * Now we need to grab the lock again. We are about to mess - * with the request queue and try to find another command. + * Finally, before we release the lock, we copy the + * request to the command block, and remove the + * request from the request list. Note that we always + * operate on the queue head - there is absolutely no + * reason to search the list, because all of the commands + * in this queue are for the same device. */ - spin_lock_irq(&io_request_lock); + blkdev_dequeue_request(req); + + if (req != &SCpnt->request && req != &SRpnt->sr_request ) { + /* + * Above, we copied the data out of the request block - + * it is now in a field in SCpnt. Release the request + * block. + */ + blkdev_release_request(req); + } + + req = NULL; + + /* + * Dispatch the command to the low-level driver. + */ + spin_unlock_irq(q->queue_lock); + scsi_dispatch_cmd(SCpnt); + spin_lock_irq(q->queue_lock); } } diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi_obsolete.c x/drivers/scsi/scsi_obsolete.c --- x-ref/drivers/scsi/scsi_obsolete.c 2002-08-09 14:52:18.000000000 +0200 +++ x/drivers/scsi/scsi_obsolete.c 2003-07-17 08:38:21.000000000 +0200 @@ -147,7 +147,7 @@ void scsi_old_times_out(Scsi_Cmnd * SCpn { unsigned long flags; - spin_lock_irqsave(&io_request_lock, flags); + spin_lock_irqsave(SCpnt->host->lock, flags); /* Set the serial_number_at_timeout to the current serial_number */ SCpnt->serial_number_at_timeout = SCpnt->serial_number; @@ -202,14 +202,14 @@ void scsi_old_times_out(Scsi_Cmnd * SCpn break; } - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irqrestore(SCpnt->host->lock, flags); } /* * From what I can find in scsi_obsolete.c, this function is only called * by scsi_old_done and scsi_reset. Both of these functions run with the - * io_request_lock already held, so we need do nothing here about grabbing + * host->lock already held, so we need do nothing here about grabbing * any locks. */ static void scsi_request_sense(Scsi_Cmnd * SCpnt) @@ -238,9 +238,9 @@ static void scsi_request_sense(Scsi_Cmnd * Ugly, ugly. The newer interfaces all assume that the lock * isn't held. Mustn't disappoint, or we deadlock the system. */ - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(SCpnt->host->lock); scsi_dispatch_cmd(SCpnt); - spin_lock_irq(&io_request_lock); + spin_lock_irq(SCpnt->host->lock); } @@ -661,9 +661,9 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) * assume that the lock isn't held. Mustn't * disappoint, or we deadlock the system. */ - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(host->lock); scsi_dispatch_cmd(SCpnt); - spin_lock_irq(&io_request_lock); + spin_lock_irq(host->lock); } break; default: @@ -675,8 +675,8 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) #ifdef DEBUG printk("Calling done function - at address %p\n", SCpnt->done); #endif - host->host_busy--; /* Indicate that we are free */ - device->device_busy--; /* Decrement device usage counter. */ + atomic_dec(&host->host_busy); /* Indicate that we are free */ + atomic_dec(&device->device_busy);/* Decrement device usage counter. */ SCpnt->result = result | ((exit & 0xff) << 24); SCpnt->use_sg = SCpnt->old_use_sg; @@ -689,7 +689,7 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) * use, the upper code is run from a bottom half handler, so * it isn't an issue. */ - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(host->lock); SRpnt = SCpnt->sc_request; if( SRpnt != NULL ) { SRpnt->sr_result = SRpnt->sr_command->result; @@ -701,7 +701,7 @@ void scsi_old_done(Scsi_Cmnd * SCpnt) } SCpnt->done(SCpnt); - spin_lock_irq(&io_request_lock); + spin_lock_irq(host->lock); } #undef CMD_FINISHED #undef REDO @@ -740,10 +740,10 @@ static int scsi_abort(Scsi_Cmnd * SCpnt, return 0; } if (SCpnt->internal_timeout & IN_ABORT) { - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(host->lock); while (SCpnt->internal_timeout & IN_ABORT) barrier(); - spin_lock_irq(&io_request_lock); + spin_lock_irq(host->lock); } else { SCpnt->internal_timeout |= IN_ABORT; oldto = update_timeout(SCpnt, ABORT_TIMEOUT); @@ -756,7 +756,7 @@ static int scsi_abort(Scsi_Cmnd * SCpnt, " the bus was reset\n", SCpnt->channel, SCpnt->target, SCpnt->lun); } - if (!host->host_busy) { + if (atomic_read(&host->host_busy) == 0) { SCpnt->internal_timeout &= ~IN_ABORT; update_timeout(SCpnt, oldto); return 0; @@ -923,17 +923,17 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, return 0; } if (SCpnt->internal_timeout & IN_RESET) { - spin_unlock_irq(&io_request_lock); + spin_unlock_irq(host->lock); while (SCpnt->internal_timeout & IN_RESET) barrier(); - spin_lock_irq(&io_request_lock); + spin_lock_irq(host->lock); } else { SCpnt->internal_timeout |= IN_RESET; update_timeout(SCpnt, RESET_TIMEOUT); if (reset_flags & SCSI_RESET_SYNCHRONOUS) SCpnt->flags |= SYNC_RESET; - if (host->host_busy) { + if (atomic_read(&host->host_busy) != 0) { for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) { SCpnt1 = SDpnt->device_queue; while (SCpnt1) { @@ -970,7 +970,7 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, if (host->last_reset - jiffies > 20UL * HZ) host->last_reset = jiffies; } else { - host->host_busy++; + atomic_inc(&host->host_busy); host->last_reset = jiffies; host->resetting = 1; SCpnt->flags |= (WAS_RESET | IS_RESETTING); @@ -983,7 +983,7 @@ static int scsi_reset(Scsi_Cmnd * SCpnt, if (time_before(host->last_reset, jiffies) || (time_after(host->last_reset, jiffies + 20 * HZ))) host->last_reset = jiffies; - host->host_busy--; + atomic_dec(&host->host_busy); } if (reset_flags & SCSI_RESET_SYNCHRONOUS) SCpnt->flags &= ~SYNC_RESET; diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi_queue.c x/drivers/scsi/scsi_queue.c --- x-ref/drivers/scsi/scsi_queue.c 2001-02-22 03:45:06.000000000 +0100 +++ x/drivers/scsi/scsi_queue.c 2003-07-17 08:38:21.000000000 +0200 @@ -79,7 +79,6 @@ static const char RCSid[] = "$Header: /m int scsi_mlqueue_insert(Scsi_Cmnd * cmd, int reason) { struct Scsi_Host *host; - unsigned long flags; SCSI_LOG_MLQUEUE(1, printk("Inserting command %p into mlqueue\n", cmd)); @@ -103,7 +102,7 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, * If a host is inactive and cannot queue any commands, I don't see * how things could possibly work anyways. */ - if (host->host_busy == 0) { + if (atomic_read(&host->host_busy) == 0) { if (scsi_retry_command(cmd) == 0) { return 0; } @@ -118,7 +117,7 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, * If a host is inactive and cannot queue any commands, I don't see * how things could possibly work anyways. */ - if (cmd->device->device_busy == 0) { + if (atomic_read(&cmd->device->device_busy) == 0) { if (scsi_retry_command(cmd) == 0) { return 0; } @@ -137,10 +136,8 @@ int scsi_mlqueue_insert(Scsi_Cmnd * cmd, * Decrement the counters, since these commands are no longer * active on the host/device. */ - spin_lock_irqsave(&io_request_lock, flags); - cmd->host->host_busy--; - cmd->device->device_busy--; - spin_unlock_irqrestore(&io_request_lock, flags); + atomic_dec(&cmd->host->host_busy); + atomic_dec(&cmd->device->device_busy); /* * Insert this command at the head of the queue for it's device. diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/scsi_scan.c x/drivers/scsi/scsi_scan.c --- x-ref/drivers/scsi/scsi_scan.c 2003-07-17 08:38:13.000000000 +0200 +++ x/drivers/scsi/scsi_scan.c 2003-07-17 08:38:21.000000000 +0200 @@ -341,13 +341,9 @@ void scan_scsis(struct Scsi_Host *shpnt, memset(SDpnt, 0, sizeof(Scsi_Device)); /* * Register the queue for the device. All I/O requests will - * come in through here. We also need to register a pointer to - * ourselves, since the queue handler won't know what device - * the queue actually represents. We could look it up, but it - * is pointless work. + * come in through here. */ scsi_initialize_queue(SDpnt, shpnt); - SDpnt->request_queue.queuedata = (void *) SDpnt; /* Make sure we have something that is valid for DMA purposes */ scsi_result = ((!shpnt->unchecked_isa_dma) ? &scsi_result0[0] : kmalloc(512, GFP_DMA)); @@ -698,7 +694,7 @@ static int scan_scsis_single(unsigned in } SDpnt->device_blocked = FALSE; - SDpnt->device_busy = 0; + atomic_set(&SDpnt->device_busy,0); SDpnt->single_lun = 0; SDpnt->soft_reset = (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2); diff -urNp --exclude CVS --exclude BitKeeper x-ref/drivers/scsi/sg.c x/drivers/scsi/sg.c --- x-ref/drivers/scsi/sg.c 2003-06-13 22:07:32.000000000 +0200 +++ x/drivers/scsi/sg.c 2003-07-17 08:38:21.000000000 +0200 @@ -3028,7 +3028,7 @@ static int sg_proc_host_info(char * buff for ( ; k < shp->host_no; ++k) PRINT_PROC("-1\t-1\t-1\t-1\t-1\t-1\n"); PRINT_PROC("%u\t%hu\t%hd\t%hu\t%d\t%d\n", - shp->unique_id, shp->host_busy, shp->cmd_per_lun, + shp->unique_id, atomic_read(&shp->host_busy), shp->cmd_per_lun, shp->sg_tablesize, (int)shp->unchecked_isa_dma, (int)shp->hostt->emulated); } diff -urNp --exclude CVS --exclude BitKeeper x-ref/include/linux/blkdev.h x/include/linux/blkdev.h --- x-ref/include/linux/blkdev.h 2003-07-17 08:38:19.000000000 +0200 +++ x/include/linux/blkdev.h 2003-07-17 08:38:21.000000000 +0200 @@ -150,7 +150,7 @@ struct request_queue * Is meant to protect the queue in the future instead of * io_request_lock */ - spinlock_t queue_lock; + spinlock_t *queue_lock; /* * Tasks wait here for free read and write requests @@ -274,6 +274,7 @@ extern char * blkdev_varyio[MAX_BLKDEV]; #define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev) #define blkdev_next_request(req) blkdev_entry_to_request((req)->queue.next) #define blkdev_prev_request(req) blkdev_entry_to_request((req)->queue.prev) +#define blkdev_free_rq(list) list_entry((list)->next, struct request, queue) extern void drive_stat_acct (kdev_t dev, int rw, unsigned long nr_sectors, int new_io);