From: Go Taniguchi Make it build (and work) again. --- 25-akpm/drivers/scsi/Kconfig | 2 25-akpm/drivers/scsi/dpt_i2o.c | 156 +++++++++++++++++++++++++++++++---------- 25-akpm/drivers/scsi/dpti.h | 4 - 3 files changed, 121 insertions(+), 41 deletions(-) diff -puN drivers/scsi/dpt_i2o.c~dpt_i2o drivers/scsi/dpt_i2o.c --- 25/drivers/scsi/dpt_i2o.c~dpt_i2o 2004-04-27 01:58:25.000000000 -0700 +++ 25-akpm/drivers/scsi/dpt_i2o.c 2004-04-27 01:58:25.000000000 -0700 @@ -20,17 +20,21 @@ * (at your option) any later version. * * * ***************************************************************************/ +/*************************************************************************** + * Sat Dec 20 2003 Go Taniguchi + - Support 2.6 kernel and DMA-mapping + - ioctl fix for raid tools + - use schedule_timeout in long long loop + **************************************************************************/ -//#define DEBUG 1 -//#define UARTDELAY 1 +/*#define DEBUG 1 */ +/*#define UARTDELAY 1 */ -// On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates -// high pages. Keep the macro around because of the broken unmerged ia64 tree +/* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates + high pages. Keep the macro around because of the broken unmerged ia64 tree */ #define ADDR32 (0) -#error Please convert me to Documentation/DMA-mapping.txt - #include #include @@ -53,6 +57,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Dri #include /* for printk */ #include #include +#include #include #include @@ -86,7 +91,7 @@ static dpt_sig_S DPTI_sig = { #elif defined(__alpha__) PROC_ALPHA , #else - (-1),(-1) + (-1),(-1), #endif FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL, ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION, @@ -227,7 +232,7 @@ rebuild_sys_tab: /* Active IOPs now in OPERATIONAL state */ PDEBUG("HBA's in OPERATIONAL state\n"); - printk(KERN_INFO"dpti: If you have a lot of devices this could take a few minutes.\n"); + printk("dpti: If you have a lot of devices this could take a few minutes.\n"); for (pHba = hba_chain; pHba; pHba = pHba->next) { printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); if (adpt_i2o_lct_get(pHba) < 0){ @@ -270,6 +275,7 @@ static int adpt_release(struct Scsi_Host adpt_hba* pHba = (adpt_hba*) host->hostdata[0]; // adpt_i2o_quiesce_hba(pHba); adpt_i2o_delete_hba(pHba); + scsi_unregister(host); return 0; } @@ -340,6 +346,8 @@ static void adpt_inquiry(adpt_hba* pHba) if (rcode != 0) { sprintf(pHba->detail, "Adaptec I2O RAID"); printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); + if (rcode != -ETIME && rcode != -EINTR) + kfree(buf); } else { memset(pHba->detail, 0, sizeof(pHba->detail)); memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); @@ -348,8 +356,8 @@ static void adpt_inquiry(adpt_hba* pHba) memcpy(&(pHba->detail[40]), " FW: ", 4); memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); pHba->detail[48] = '\0'; /* precautionary */ + kfree(buf); } - kfree(buf); adpt_i2o_status_get(pHba); return ; } @@ -479,7 +487,7 @@ static int adpt_bios_param(struct scsi_d heads = 255; sectors = 63; } - cylinders = capacity / (heads * sectors); + cylinders = sector_div(capacity, heads * sectors); // Special case if CDROM if(sdev->type == 5) { // CDROM @@ -872,6 +880,9 @@ static int adpt_install_hba(Scsi_Host_Te return -EINVAL; } pci_set_master(pDev); + if (pci_set_dma_mask(pDev, 0xffffffffffffffffULL) && + pci_set_dma_mask(pDev, 0xffffffffULL)) + return -EINVAL; base_addr0_phys = pci_resource_start(pDev,0); hba_map0_area_size = pci_resource_len(pDev,0); @@ -964,6 +975,7 @@ static int adpt_install_hba(Scsi_Host_Te // Initializing the spinlocks spin_lock_init(&pHba->state_lock); + spin_lock_init(&adpt_post_wait_lock); if(raptorFlag == 0){ printk(KERN_INFO"Adaptec I2O RAID controller %d at %lx size=%x irq=%d\n", @@ -1065,7 +1077,7 @@ static int adpt_init(void) { int i; - printk(KERN_INFO"Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n"); + printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n"); for (i = 0; i < DPTI_MAX_HBA; i++) { hbas[i] = NULL; } @@ -1153,12 +1165,22 @@ static int adpt_i2o_post_wait(adpt_hba* timeout *= HZ; if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){ set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irq(pHba->host->host_lock); + if(pHba->host) + spin_unlock_irq(pHba->host->host_lock); if (!timeout) schedule(); - else + else{ + timeout = schedule_timeout(timeout); + if (timeout == 0) { + // I/O issued, but cannot get result in + // specified time. Freeing resorces is + // dangerous. + status = -ETIME; + } schedule_timeout(timeout*HZ); - spin_lock_irq(pHba->host->host_lock); + } + if(pHba->host) + spin_lock_irq(pHba->host->host_lock); } spin_lock_irq(&adpt_wq_i2o_post.lock); __remove_wait_queue(&adpt_wq_i2o_post, &wait); @@ -1210,6 +1232,8 @@ static s32 adpt_i2o_post_this(adpt_hba* printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit); return -ETIMEDOUT; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } while(m == EMPTY_QUEUE); msg = (u32*) (pHba->msg_addr_virt + m); @@ -1284,6 +1308,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* printk(KERN_WARNING"Timeout waiting for message!\n"); return -ETIMEDOUT; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } while (m == EMPTY_QUEUE); status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32); @@ -1315,6 +1341,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* return -ETIMEDOUT; } rmb(); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) { @@ -1331,6 +1359,8 @@ static s32 adpt_i2o_reset_hba(adpt_hba* printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); return -ETIMEDOUT; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } while (m == EMPTY_QUEUE); // Flush the offset adpt_send_nop(pHba, m); @@ -1696,15 +1726,20 @@ static int adpt_i2o_passthru(adpt_hba* p } do { - spin_lock_irqsave(pHba->host->host_lock, flags); + if(pHba->host) + spin_lock_irqsave(pHba->host->host_lock, flags); // This state stops any new commands from enterring the // controller while processing the ioctl // pHba->state |= DPTI_STATE_IOCTL; // We can't set this now - The scsi subsystem sets host_blocked and // the queue empties and stops. We need a way to restart the queue rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); + if (rcode != 0) + printk("adpt_i2o_passthru: post wait failed %d %p\n", + rcode, reply); // pHba->state &= ~DPTI_STATE_IOCTL; - spin_unlock_irqrestore(pHba->host->host_lock, flags); + if(pHba->host) + spin_unlock_irqrestore(pHba->host->host_lock, flags); } while(rcode == -ETIMEDOUT); if(rcode){ @@ -1765,10 +1800,12 @@ static int adpt_i2o_passthru(adpt_hba* p cleanup: - kfree (reply); + if (rcode != -ETIME && rcode != -EINTR) + kfree (reply); while(sg_index) { if(sg_list[--sg_index]) { - kfree((void*)(sg_list[sg_index])); + if (rcode != -ETIME && rcode != -EINTR) + kfree((void*)(sg_list[sg_index])); } } return rcode; @@ -1876,7 +1913,7 @@ static int adpt_ioctl(struct inode *inod int minor; int error = 0; adpt_hba* pHba; - ulong flags; + ulong flags = 0; minor = iminor(inode); if (minor >= DPTI_MAX_HBA){ @@ -1942,9 +1979,11 @@ static int adpt_ioctl(struct inode *inod break; } case I2ORESETCMD: - spin_lock_irqsave(pHba->host->host_lock, flags); + if(pHba->host) + spin_lock_irqsave(pHba->host->host_lock, flags); adpt_hba_reset(pHba); - spin_unlock_irqrestore(pHba->host->host_lock, flags); + if(pHba->host) + spin_unlock_irqrestore(pHba->host->host_lock, flags); break; case I2ORESCANCMD: adpt_rescan(pHba); @@ -1957,7 +1996,7 @@ static int adpt_ioctl(struct inode *inod } -static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs) +static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs) { Scsi_Cmnd* cmd; adpt_hba* pHba = dev_id; @@ -1966,12 +2005,15 @@ static void adpt_isr(int irq, void *dev_ u32 status=0; u32 context; ulong flags = 0; + int handled = 0; - if (pHba == NULL ){ + if (pHba == NULL){ printk(KERN_WARNING"adpt_isr: NULL dev_id\n"); - return; + return IRQ_NONE; } - spin_lock_irqsave(pHba->host->host_lock, flags); + if(pHba->host) + spin_lock_irqsave(pHba->host->host_lock, flags); + while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) { m = readl(pHba->reply_port); if(m == EMPTY_QUEUE){ @@ -2036,7 +2078,10 @@ static void adpt_isr(int irq, void *dev_ wmb(); rmb(); } -out: spin_unlock_irqrestore(pHba->host->host_lock, flags); + handled = 1; +out: if(pHba->host) + spin_unlock_irqrestore(pHba->host->host_lock, flags); + return IRQ_RETVAL(handled); } static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* d) @@ -2111,15 +2156,19 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH /* Now fill in the SGList and command */ if(cmd->use_sg) { struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer; + int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg, + scsi_to_pci_dma_dir(cmd->sc_data_direction)); + + len = 0; - for(i = 0 ; i < cmd->use_sg; i++) { - *mptr++ = direction|0x10000000|sg->length; - len+=sg->length; - *mptr++ = virt_to_bus(sg->address); + for(i = 0 ; i < sg_count; i++) { + *mptr++ = direction|0x10000000|sg_dma_len(sg); + len+=sg_dma_len(sg); + *mptr++ = sg_dma_address(sg); sg++; } /* Make this an end of list */ - mptr[-2] = direction|0xD0000000|(sg-1)->length; + mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1); reqlen = mptr - msg; *lenptr = len; @@ -2133,7 +2182,10 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH reqlen = 12; } else { *mptr++ = 0xD0000000|direction|cmd->request_bufflen; - *mptr++ = virt_to_bus(cmd->request_buffer); + *mptr++ = pci_map_single(pHba->pDev, + cmd->request_buffer, + cmd->request_bufflen, + scsi_to_pci_dma_dir(cmd->sc_data_direction)); } } @@ -2306,15 +2358,17 @@ static s32 adpt_i2o_to_scsi(ulong reply, static s32 adpt_rescan(adpt_hba* pHba) { s32 rcode; - ulong flags; + ulong flags = 0; - spin_lock_irqsave(pHba->host->host_lock, flags); + if(pHba->host) + spin_lock_irqsave(pHba->host->host_lock, flags); if ((rcode=adpt_i2o_lct_get(pHba)) < 0) goto out; if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0) goto out; rcode = 0; -out: spin_unlock_irqrestore(pHba->host->host_lock, flags); +out: if(pHba->host) + spin_unlock_irqrestore(pHba->host->host_lock, flags); return rcode; } @@ -2596,6 +2650,8 @@ static s32 adpt_send_nop(adpt_hba*pHba,u printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name); return 2; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } msg = (u32*)(pHba->msg_addr_virt + m); writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); @@ -2629,6 +2685,8 @@ static s32 adpt_i2o_init_outbound_q(adpt printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name); return -ETIMEDOUT; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } while(m == EMPTY_QUEUE); msg=(u32 *)(pHba->msg_addr_virt+m); @@ -2664,9 +2722,10 @@ static s32 adpt_i2o_init_outbound_q(adpt rmb(); if(time_after(jiffies,timeout)){ printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); - kfree((void*)status); return -ETIMEDOUT; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } while (1); // If the command was successful, fill the fifo with our reply @@ -2744,6 +2803,8 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba->name); return -ETIMEDOUT; } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } while(m==EMPTY_QUEUE); @@ -2770,6 +2831,8 @@ static s32 adpt_i2o_status_get(adpt_hba* return -ETIMEDOUT; } rmb(); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); } // Set up our number of outbound and inbound messages @@ -3095,17 +3158,33 @@ static int adpt_i2o_query_scalar(adpt_hb int group, int field, void *buf, int buflen) { u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; - u8 resblk[8+buflen]; /* 8 bytes for header */ + u8 *resblk; + int size; + /* 8 bytes for header */ + resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); + if (resblk == NULL) { + printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); + return -ENOMEM; + } + if (field == -1) /* whole group */ opblk[4] = -1; size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, - opblk, sizeof(opblk), resblk, sizeof(resblk)); + opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); + if (size == -ETIME) { + printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); + return -ETIME; + } else if (size == -EINTR) { + printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); + return -EINTR; + } memcpy(buf, resblk+8, buflen); /* cut off header */ + kfree(resblk); if (size < 0) return size; @@ -3139,6 +3218,7 @@ static int adpt_i2o_issue_params(int cmd msg[8] = virt_to_bus(resblk); if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { + printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); return wait_status; /* -DetailedStatus */ } diff -puN drivers/scsi/dpti.h~dpt_i2o drivers/scsi/dpti.h --- 25/drivers/scsi/dpti.h~dpt_i2o 2004-04-27 01:58:25.000000000 -0700 +++ 25-akpm/drivers/scsi/dpti.h 2004-04-27 01:58:25.000000000 -0700 @@ -65,7 +65,7 @@ static int adpt_device_reset(Scsi_Cmnd* #include "dpt/dpti_i2o.h" #include "dpt/dpti_ioctl.h" -#define DPT_I2O_VERSION "2.4 Build 5" +#define DPT_I2O_VERSION "2.4 Build 5go" #define DPT_VERSION 2 #define DPT_REVISION '4' #define DPT_SUBREVISION '5' @@ -272,7 +272,7 @@ struct sg_simple_element { static void adpt_i2o_sys_shutdown(void); static int adpt_init(void); static int adpt_i2o_build_sys_table(void); -static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t adpt_isr(int irq, void *dev_id, struct pt_regs *regs); #ifdef REBOOT_NOTIFIER static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p); #endif diff -puN drivers/scsi/Kconfig~dpt_i2o drivers/scsi/Kconfig --- 25/drivers/scsi/Kconfig~dpt_i2o 2004-04-27 01:58:25.000000000 -0700 +++ 25-akpm/drivers/scsi/Kconfig 2004-04-27 01:58:25.000000000 -0700 @@ -363,7 +363,7 @@ source "drivers/scsi/aic7xxx/Kconfig.aic # All the I2O code and drivers do not seem to be 64bit safe. config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " - depends on !64BIT && SCSI && BROKEN + depends on !64BIT && SCSI help This driver supports all of Adaptec's I2O based RAID controllers as well as the DPT SmartRaid V cards. This is an Adaptec maintained _