From: Markus Lidel generic: - split i2o_core into several files, grouped by same function - I2O devices are now registered as devices and show up in sysfs - the various I2O OSM's (e.g. i2o_scsi) now register in the I2O core and also use the 2.6 driver mechanism. - I2O messages will be created in the message frame instead of creating it in local memory and copying it over later on. - context list for 64 pointer to 32 context conversion now uses a double linked list PCI: - driver now registers as a PCI device driver and uses probe function to get the possible controllers. (needed for hotplugging) - converted DMA handling from pci_* to generic dma_* functions Block OSM: - use one request queue per I2O block device instead of one per controller - I2O block devices and queues are allocated dynamically and therefore no more limit of block devices SCSI OSM: - corrected bug in SCSI reply function which caused the memory to be freed before the done function was called. - one I2O controller registers as one scsi host instead of one scsi host per channel - no more ch,id,lun => tid mapping table Config OSM: - added ioctl32 for passthru and getiops. - removed ioctl_html Documentation: - removed TODO entries from README - moved docs under Documentation/i2o Signed-off-by: Andrew Morton --- /dev/null |11922 ------------------------------- 25-akpm/Documentation/i2o/README | 63 25-akpm/Documentation/i2o/ioctl | 394 + 25-akpm/drivers/message/i2o/Makefile | 5 25-akpm/drivers/message/i2o/block-osm.c | 1444 +++ 25-akpm/drivers/message/i2o/block-osm.h | 99 25-akpm/drivers/message/i2o/config-osm.c | 1206 +++ 25-akpm/drivers/message/i2o/debug.c | 576 + 25-akpm/drivers/message/i2o/device.c | 700 + 25-akpm/drivers/message/i2o/driver.c | 290 25-akpm/drivers/message/i2o/exec-osm.c | 532 + 25-akpm/drivers/message/i2o/iop.c | 1261 +++ 25-akpm/drivers/message/i2o/pci.c | 541 + 25-akpm/drivers/message/i2o/proc-osm.c | 2123 +++++ 25-akpm/drivers/message/i2o/scsi-osm.c | 1012 ++ 25-akpm/include/linux/i2o-dev.h | 36 25-akpm/include/linux/i2o.h | 570 + 17 files changed, 10657 insertions(+), 12117 deletions(-) diff -puN /dev/null Documentation/i2o/ioctl --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/Documentation/i2o/ioctl Tue Jul 27 14:06:32 2004 @@ -0,0 +1,394 @@ + +Linux I2O User Space Interface +rev 0.3 - 04/20/99 + +============================================================================= +Originally written by Deepak Saxena(deepak@plexity.net) +Currently maintained by Deepak Saxena(deepak@plexity.net) +============================================================================= + +I. Introduction + +The Linux I2O subsystem provides a set of ioctl() commands that can be +utilized by user space applications to communicate with IOPs and devices +on individual IOPs. This document defines the specific ioctl() commands +that are available to the user and provides examples of their uses. + +This document assumes the reader is familiar with or has access to the +I2O specification as no I2O message parameters are outlined. For information +on the specification, see http://www.i2osig.org + +This document and the I2O user space interface are currently maintained +by Deepak Saxena. Please send all comments, errata, and bug fixes to +deepak@csociety.purdue.edu + +II. IOP Access + +Access to the I2O subsystem is provided through the device file named +/dev/i2o/ctl. This file is a character file with major number 10 and minor +number 166. It can be created through the following command: + + mknod /dev/i2o/ctl c 10 166 + +III. Determining the IOP Count + + SYNOPSIS + + ioctl(fd, I2OGETIOPS, int *count); + + u8 count[MAX_I2O_CONTROLLERS]; + + DESCRIPTION + + This function returns the system's active IOP table. count should + point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon + returning, each entry will contain a non-zero value if the given + IOP unit is active, and NULL if it is inactive or non-existent. + + RETURN VALUE. + + Returns 0 if no errors occur, and -1 otherwise. If an error occurs, + errno is set appropriately: + + EFAULT Invalid user space pointer was passed + +IV. Getting Hardware Resource Table + + SYNOPSIS + + ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt); + + struct i2o_cmd_hrtlct + { + u32 iop; /* IOP unit number */ + void *resbuf; /* Buffer for result */ + u32 *reslen; /* Buffer length in bytes */ + }; + + DESCRIPTION + + This function returns the Hardware Resource Table of the IOP specified + by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of + the data is written into *(hrt->reslen). + + RETURNS + + This function returns 0 if no errors occur. If an error occurs, -1 + is returned and errno is set appropriately: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ENOBUFS Buffer not large enough. If this occurs, the required + buffer length is written into *(hrt->reslen) + +V. Getting Logical Configuration Table + + SYNOPSIS + + ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct); + + struct i2o_cmd_hrtlct + { + u32 iop; /* IOP unit number */ + void *resbuf; /* Buffer for result */ + u32 *reslen; /* Buffer length in bytes */ + }; + + DESCRIPTION + + This function returns the Logical Configuration Table of the IOP specified + by lct->iop in the buffer pointed to by lct->resbuf. The actual size of + the data is written into *(lct->reslen). + + RETURNS + + This function returns 0 if no errors occur. If an error occurs, -1 + is returned and errno is set appropriately: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ENOBUFS Buffer not large enough. If this occurs, the required + buffer length is written into *(lct->reslen) + +VI. Settting Parameters + + SYNOPSIS + + ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops); + + struct i2o_cmd_psetget + { + u32 iop; /* IOP unit number */ + u32 tid; /* Target device TID */ + void *opbuf; /* Operation List buffer */ + u32 oplen; /* Operation List buffer length in bytes */ + void *resbuf; /* Result List buffer */ + u32 *reslen; /* Result List buffer length in bytes */ + }; + + DESCRIPTION + + This function posts a UtilParamsSet message to the device identified + by ops->iop and ops->tid. The operation list for the message is + sent through the ops->opbuf buffer, and the result list is written + into the buffer pointed to by ops->resbuf. The number of bytes + written is placed into *(ops->reslen). + + RETURNS + + The return value is the size in bytes of the data written into + ops->resbuf if no errors occur. If an error occurs, -1 is returned + and errno is set appropriatly: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ENOBUFS Buffer not large enough. If this occurs, the required + buffer length is written into *(ops->reslen) + ETIMEDOUT Timeout waiting for reply message + ENOMEM Kernel memory allocation error + + A return value of 0 does not mean that the value was actually + changed properly on the IOP. The user should check the result + list to determine the specific status of the transaction. + +VII. Getting Parameters + + SYNOPSIS + + ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops); + + struct i2o_parm_setget + { + u32 iop; /* IOP unit number */ + u32 tid; /* Target device TID */ + void *opbuf; /* Operation List buffer */ + u32 oplen; /* Operation List buffer length in bytes */ + void *resbuf; /* Result List buffer */ + u32 *reslen; /* Result List buffer length in bytes */ + }; + + DESCRIPTION + + This function posts a UtilParamsGet message to the device identified + by ops->iop and ops->tid. The operation list for the message is + sent through the ops->opbuf buffer, and the result list is written + into the buffer pointed to by ops->resbuf. The actual size of data + written is placed into *(ops->reslen). + + RETURNS + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ENOBUFS Buffer not large enough. If this occurs, the required + buffer length is written into *(ops->reslen) + ETIMEDOUT Timeout waiting for reply message + ENOMEM Kernel memory allocation error + + A return value of 0 does not mean that the value was actually + properly retreived. The user should check the result list + to determine the specific status of the transaction. + +VIII. Downloading Software + + SYNOPSIS + + ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw); + + struct i2o_sw_xfer + { + u32 iop; /* IOP unit number */ + u8 flags; /* DownloadFlags field */ + u8 sw_type; /* Software type */ + u32 sw_id; /* Software ID */ + void *buf; /* Pointer to software buffer */ + u32 *swlen; /* Length of software buffer */ + u32 *maxfrag; /* Number of fragments */ + u32 *curfrag; /* Current fragment number */ + }; + + DESCRIPTION + + This function downloads a software fragment pointed by sw->buf + to the iop identified by sw->iop. The DownloadFlags, SwID, SwType + and SwSize fields of the ExecSwDownload message are filled in with + the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen). + + The fragments _must_ be sent in order and be 8K in size. The last + fragment _may_ be shorter, however. The kernel will compute its + size based on information in the sw->swlen field. + + Please note that SW transfers can take a long time. + + RETURNS + + This function returns 0 no errors occur. If an error occurs, -1 + is returned and errno is set appropriatly: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ETIMEDOUT Timeout waiting for reply message + ENOMEM Kernel memory allocation error + +IX. Uploading Software + + SYNOPSIS + + ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw); + + struct i2o_sw_xfer + { + u32 iop; /* IOP unit number */ + u8 flags; /* UploadFlags */ + u8 sw_type; /* Software type */ + u32 sw_id; /* Software ID */ + void *buf; /* Pointer to software buffer */ + u32 *swlen; /* Length of software buffer */ + u32 *maxfrag; /* Number of fragments */ + u32 *curfrag; /* Current fragment number */ + }; + + DESCRIPTION + + This function uploads a software fragment from the IOP identified + by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields. + The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload + message are filled in with the values of sw->flags, sw->sw_id, + sw->sw_type and *(sw->swlen). + + The fragments _must_ be requested in order and be 8K in size. The + user is responsible for allocating memory pointed by sw->buf. The + last fragment _may_ be shorter. + + Please note that SW transfers can take a long time. + + RETURNS + + This function returns 0 if no errors occur. If an error occurs, -1 + is returned and errno is set appropriatly: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ETIMEDOUT Timeout waiting for reply message + ENOMEM Kernel memory allocation error + +X. Removing Software + + SYNOPSIS + + ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw); + + struct i2o_sw_xfer + { + u32 iop; /* IOP unit number */ + u8 flags; /* RemoveFlags */ + u8 sw_type; /* Software type */ + u32 sw_id; /* Software ID */ + void *buf; /* Unused */ + u32 *swlen; /* Length of the software data */ + u32 *maxfrag; /* Unused */ + u32 *curfrag; /* Unused */ + }; + + DESCRIPTION + + This function removes software from the IOP identified by sw->iop. + The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message + are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and + *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses + *(sw->swlen) value to verify correct identication of the module to remove. + The actual size of the module is written into *(sw->swlen). + + RETURNS + + This function returns 0 if no errors occur. If an error occurs, -1 + is returned and errno is set appropriatly: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ETIMEDOUT Timeout waiting for reply message + ENOMEM Kernel memory allocation error + +X. Validating Configuration + + SYNOPSIS + + ioctl(fd, I2OVALIDATE, int *iop); + u32 iop; + + DESCRIPTION + + This function posts an ExecConfigValidate message to the controller + identified by iop. This message indicates that the current + configuration is accepted. The iop changes the status of suspect drivers + to valid and may delete old drivers from its store. + + RETURNS + + This function returns 0 if no erro occur. If an error occurs, -1 is + returned and errno is set appropriatly: + + ETIMEDOUT Timeout waiting for reply message + ENXIO Invalid IOP number + +XI. Configuration Dialog + + SYNOPSIS + + ioctl(fd, I2OHTML, struct i2o_html *htquery); + struct i2o_html + { + u32 iop; /* IOP unit number */ + u32 tid; /* Target device ID */ + u32 page; /* HTML page */ + void *resbuf; /* Buffer for reply HTML page */ + u32 *reslen; /* Length in bytes of reply buffer */ + void *qbuf; /* Pointer to HTTP query string */ + u32 qlen; /* Length in bytes of query string buffer */ + }; + + DESCRIPTION + + This function posts an UtilConfigDialog message to the device identified + by htquery->iop and htquery->tid. The requested HTML page number is + provided by the htquery->page field, and the resultant data is stored + in the buffer pointed to by htquery->resbuf. If there is an HTTP query + string that is to be sent to the device, it should be sent in the buffer + pointed to by htquery->qbuf. If there is no query string, this field + should be set to NULL. The actual size of the reply received is written + into *(htquery->reslen). + + RETURNS + + This function returns 0 if no error occur. If an error occurs, -1 + is returned and errno is set appropriatly: + + EFAULT Invalid user space pointer was passed + ENXIO Invalid IOP number + ENOBUFS Buffer not large enough. If this occurs, the required + buffer length is written into *(ops->reslen) + ETIMEDOUT Timeout waiting for reply message + ENOMEM Kernel memory allocation error + +XII. Events + + In the process of determining this. Current idea is to have use + the select() interface to allow user apps to periodically poll + the /dev/i2o/ctl device for events. When select() notifies the user + that an event is available, the user would call read() to retrieve + a list of all the events that are pending for the specific device. + +============================================================================= +Revision History +============================================================================= + +Rev 0.1 - 04/01/99 +- Initial revision + +Rev 0.2 - 04/06/99 +- Changed return values to match UNIX ioctl() standard. Only return values + are 0 and -1. All errors are reported through errno. +- Added summary of proposed possible event interfaces + +Rev 0.3 - 04/20/99 +- Changed all ioctls() to use pointers to user data instead of actual data +- Updated error values to match the code diff -puN /dev/null Documentation/i2o/README --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/Documentation/i2o/README Tue Jul 27 14:06:32 2004 @@ -0,0 +1,63 @@ + + Linux I2O Support (c) Copyright 1999 Red Hat Software + and others. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; either version + 2 of the License, or (at your option) any later version. + +AUTHORS (so far) + +Alan Cox, Building Number Three Ltd. + Core code, SCSI and Block OSMs + +Steve Ralston, LSI Logic Corp. + Debugging SCSI and Block OSM + +Deepak Saxena, Intel Corp. + Various core/block extensions + /proc interface, bug fixes + Ioctl interfaces for control + Debugging LAN OSM + +Philip Rumpf + Fixed assorted dumb SMP locking bugs + +Juha Sievanen, University of Helsinki Finland + LAN OSM code + /proc interface to LAN class + Bug fixes + Core code extensions + +Auvo Häkkinen, University of Helsinki Finland + LAN OSM code + /Proc interface to LAN class + Bug fixes + Core code extensions + +Taneli Vähäkangas, University of Helsinki Finland + Fixes to i2o_config + +CREDITS + + This work was made possible by + +Red Hat Software + Funding for the Building #3 part of the project + +Symbios Logic (Now LSI) + Host adapters, hints, known to work platforms when I hit + compatibility problems + +BoxHill Corporation + Loan of initial FibreChannel disk array used for development work. + +European Comission + Funding the work done by the University of Helsinki + +SysKonnect + Loan of FDDI and Gigabit Ethernet cards + +ASUSTeK + Loan of I2O motherboard diff -puN /dev/null drivers/message/i2o/block-osm.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/block-osm.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,1444 @@ +/* + * Block OSM + * + * Copyright (C) 1999-2002 Red Hat Software + * + * Written by Alan Cox, Building Number Three Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * For the purpose of avoiding doubt the preferred form of the work + * for making modifications shall be a standards compliant form such + * gzipped tar and not one requiring a proprietary or patent encumbered + * tool to unpack. + * + * Fixes/additions: + * Steve Ralston: + * Multiple device handling error fixes, + * Added a queue depth. + * Alan Cox: + * FC920 has an rmw bug. Dont or in the end marker. + * Removed queue walk, fixed for 64bitness. + * Rewrote much of the code over time + * Added indirect block lists + * Handle 64K limits on many controllers + * Don't use indirects on the Promise (breaks) + * Heavily chop down the queue depths + * Deepak Saxena: + * Independent queues per IOP + * Support for dynamic device creation/deletion + * Code cleanup + * Support for larger I/Os through merge* functions + * (taken from DAC960 driver) + * Boji T Kannanthanam: + * Set the I2O Block devices to be detected in increasing + * order of TIDs during boot. + * Search and set the I2O block device that we boot off + * from as the first device to be claimed (as /dev/i2o/hda) + * Properly attach/detach I2O gendisk structure from the + * system gendisk list. The I2O block devices now appear in + * /proc/partitions. + * Markus Lidel : + * Minor bugfixes for 2.6. + */ + +#include +#include + +#include + +#include +#include +#include + +#include "block-osm.h" + + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +/* Internal used functions */ +static struct i2o_block_device *i2o_block_device_alloc(void); +static void i2o_block_device_free(struct i2o_block_device *); + +static int i2o_block_probe(struct device *); +static int i2o_block_remove(struct device *); + +static int i2o_block_device_flush(struct i2o_device *); +static int i2o_block_device_mount(struct i2o_device *, u32); +static int i2o_block_device_lock(struct i2o_device *, u32); +static int i2o_block_device_unlock(struct i2o_device *, u32); +static int i2o_block_device_power(struct i2o_block_device *, u8); + +static inline struct i2o_block_request *i2o_block_request_alloc(void); +static inline void i2o_block_request_free(struct i2o_block_request *); + +static inline int i2o_block_sglist_alloc(struct i2o_block_request *); +static inline void i2o_block_sglist_free(struct i2o_block_request *); + +static int i2o_block_prep_req_fn(struct request_queue *, struct request *); +static void i2o_block_delayed_request_fn(void *); +static void i2o_block_request_fn(struct request_queue *); +static int i2o_block_transfer(struct request *); +static int i2o_block_reply(struct i2o_controller *, u32, struct i2o_message *); +static void i2o_block_event(struct i2o_event *); + +static void i2o_block_biosparam(unsigned long, unsigned short *, + unsigned char *, unsigned char *); + +static int i2o_block_open(struct inode *, struct file *); +static int i2o_block_release(struct inode *, struct file *); +static int i2o_block_ioctl(struct inode *, struct file *, unsigned int, + unsigned long); +static int i2o_block_media_changed(struct gendisk *); + +static int __init i2o_block_init(void); +static void __exit i2o_block_exit(void); + +/* global Block OSM request mempool */ +static struct i2o_block_mempool i2o_blk_req_pool; + +/* I2O Block device operations definition */ +static struct block_device_operations i2o_block_fops = +{ + .owner = THIS_MODULE, + .open = i2o_block_open, + .release = i2o_block_release, + .ioctl = i2o_block_ioctl, + .media_changed = i2o_block_media_changed +}; + +/* Block OSM class handling definition */ +static struct i2o_class_id i2o_block_class_id[] = { + { I2O_CLASS_RANDOM_BLOCK_STORAGE }, + { I2O_CLASS_END } +}; + +/* Block OSM driver struct */ +static struct i2o_driver i2o_block_driver = { + .name = "block-osm", + .event = i2o_block_event, + .reply = i2o_block_reply, + .classes = i2o_block_class_id, + .driver.probe = i2o_block_probe, + .driver.remove = i2o_block_remove +}; + + +/** + * i2o_block_device_alloc - Allocate memory for a I2O Block device + * + * Allocate memory for the i2o_block_device struct, gendisk and request + * queue and initialize them as far as no additional information is needed. + * + * Returns a pointer to the allocated I2O Block device on succes or a + * negative error code on failure. + */ +static struct i2o_block_device *i2o_block_device_alloc(void) +{ + struct i2o_block_device *dev; + struct gendisk *gd; + struct request_queue *queue; + int rc; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if(!dev) { + printk(KERN_ERR "block-osm: Insufficient memory to allocate " + "I2O Block disk.\n"); + rc = -ENOMEM; + goto exit; + } + memset(dev, 0, sizeof(*dev)); + + INIT_LIST_HEAD(&dev->open_queue); + spin_lock_init(&dev->lock); + dev->rcache = CACHE_PREFETCH; + dev->wcache = CACHE_WRITEBACK; + + /* allocate a gendisk with 16 partitions */ + gd = alloc_disk(16); + if(!gd) { + printk(KERN_ERR "block-osm: Insufficient memory to allocate " + "gendisk.\n"); + rc = -ENOMEM; + goto cleanup_dev; + } + + /* initialize the request queue */ + queue = blk_init_queue(i2o_block_request_fn, &dev->lock); + if(!queue) + { + printk(KERN_ERR "block-osm: Insufficient memory to allocate " + "request queue.\n"); + rc = -ENOMEM; + goto cleanup_queue; + } + + blk_queue_prep_rq(queue, i2o_block_prep_req_fn); + + gd->major = I2O_MAJOR; + gd->queue = queue; + gd->fops = &i2o_block_fops; + gd->private_data = dev; + + dev->gd = gd; + + return dev; + +cleanup_queue: + put_disk(gd); + +cleanup_dev: + kfree(dev); + +exit: + return ERR_PTR(rc); +}; + +/** + * i2o_block_device_free - free the memory of the I2O Block device + * @dev: I2O Block device, which should be cleaned up + * + * Frees the request queue, gendisk and the i2o_block_device structure. + */ +static void i2o_block_device_free(struct i2o_block_device *dev) +{ + blk_cleanup_queue(dev->gd->queue); + + put_disk(dev->gd); + + kfree(dev); +}; + +/** + * i2o_block_probe - verify if dev is a I2O Block device and install it + * @dev: device to verify if it is a I2O Block device + * + * We only verify if the user_tid of the device is 0xfff and then install + * the device. Otherwise it is used by some other device (e. g. RAID). + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_probe(struct device *dev) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + struct i2o_block_device *i2o_blk_dev; + struct i2o_controller *c = i2o_dev->iop; + struct gendisk *gd; + struct request_queue *queue; + static int unit = 0; + int rc; + u64 size; + u32 blocksize; + u16 power; + u32 flags, status; + int segments; + + /* skip devices which are used by IOP */ + if(i2o_dev->lct_data.user_tid != 0xfff) { + DBG("skipping used device %03x\n", i2o_dev->lct_data.tid); + return -ENODEV; + } + + printk(KERN_INFO "block-osm: New device detected (TID: %03x)\n", + i2o_dev->lct_data.tid); + + if(i2o_device_claim(i2o_dev)) { + printk(KERN_WARNING "block-osm: Unable to claim device. " + "Installation aborted\n"); + rc = -EFAULT; + goto exit; + } + + i2o_blk_dev = i2o_block_device_alloc(); + if(IS_ERR(i2o_blk_dev)) + { + printk(KERN_ERR "block-osm: could not alloc a new I2O block" + "device"); + rc = PTR_ERR(i2o_blk_dev); + goto claim_release; + } + + i2o_blk_dev->i2o_dev = i2o_dev; + dev_set_drvdata(dev, i2o_blk_dev); + + /* setup gendisk */ + gd = i2o_blk_dev->gd; + gd->first_minor = unit<<4; + sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); + + /* setup request queue */ + queue = gd->queue; + queue->queuedata = i2o_blk_dev; + + blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); + blk_queue_max_sectors(queue, I2O_MAX_SECTORS); + + if(c->short_req) + segments = 8; + else { + i2o_status_block *sb; + + sb = c->status_block.virt; + + segments = (sb->inbound_frame_size - + sizeof(struct i2o_message)/4 - 4) / 2; + } + + blk_queue_max_hw_segments(queue, segments); + + DBG("max sectors: %d\n", I2O_MAX_SECTORS); + DBG("phys segments: %d\n", I2O_MAX_SEGMENTS); + DBG("hw segments: %d\n", segments); + + /* + * Ask for the current media data. If that isn't supported + * then we ask for the device capacity data + */ + if(i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0 + || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) !=0 ) + { + i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4); + i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8); + } + DBG("blocksize: %d\n", blocksize); + + if(i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) + power = 0; + i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); + i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); + + set_capacity(gd, size>>9); + + i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); + + add_disk(gd); + + unit ++; + + return 0; + +claim_release: + i2o_device_claim_release(i2o_dev); + +exit: + return rc; +}; + +/** + * i2o_block_remove - remove the I2O Block device from the system again + * @dev: I2O Block device which should be removed + * + * Remove gendisk from system and free all allocated memory. + * + * Always returns 0. + */ +static int i2o_block_remove(struct device *dev) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); + + printk(KERN_INFO "block-osm: Device removed %s\n", + i2o_blk_dev->gd->disk_name); + + i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); + + del_gendisk(i2o_blk_dev->gd); + + dev_set_drvdata(dev, NULL); + + i2o_device_claim_release(i2o_dev); + + i2o_block_device_free(i2o_blk_dev); + + return 0; +}; + +/** + * i2o_block_device flush - Flush all dirty data of I2O device dev + * @dev: I2O device which should be flushed + * + * Flushes all dirty data on device dev. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_device_flush(struct i2o_device *dev) +{ + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->lct_data.tid, + &msg->head[1]); + writel(60<<16, &msg->body[0]); + DBG("Flushing...\n"); + + return i2o_msg_post_wait(dev->iop, m, 60); +}; + +/** + * i2o_block_device_mount - Mount (load) the media of device dev + * @dev: I2O device which should receive the mount request + * @media_id: Media Identifier + * + * Load a media into drive. Identifier should be set to -1, because the + * spec does not support any other value. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) +{ + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->lct_data.tid, + &msg->head[1]); + writel(-1, &msg->body[0]); + writel(0, &msg->body[1]); + DBG("Mounting...\n"); + + return i2o_msg_post_wait(dev->iop, m, 2); +}; + +/** + * i2o_block_device_lock - Locks the media of device dev + * @dev: I2O device which should receive the lock request + * @media_id: Media Identifier + * + * Lock media of device dev to prevent removal. The media identifier + * should be set to -1, because the spec does not support any other value. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) +{ + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->lct_data.tid, + &msg->head[1]); + writel(-1, &msg->body[0]); + DBG("Locking...\n"); + + return i2o_msg_post_wait(dev->iop, m, 2); +}; + +/** + * i2o_block_device_unlock - Unlocks the media of device dev + * @dev: I2O device which should receive the unlocked request + * @media_id: Media Identifier + * + * Unlocks the media in device dev. The media identifier should be set to + * -1, because the spec does not support any other value. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) +{ + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->lct_data.tid, + &msg->head[1]); + writel(media_id, &msg->body[0]); + DBG("Unlocking...\n"); + + return i2o_msg_post_wait(dev->iop, m, 2); +}; + +/** + * i2o_block_device_power - Power management for device dev + * @dev: I2O device which should receive the power management request + * @operation: Operation which should be send + * + * Send a power management request to the device dev. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) +{ + struct i2o_device *i2o_dev = dev->i2o_dev; + struct i2o_controller *c = i2o_dev->iop; + struct i2o_message *msg; + u32 m; + int rc; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_BLOCK_POWER<<24|HOST_TID<<12|i2o_dev->lct_data.tid, + &msg->head[1]); + writel(op << 24, &msg->body[0]); + DBG("Power...\n"); + + rc = i2o_msg_post_wait(c, m, 60); + if(!rc) + dev->power = op; + + return rc; +}; + +/** + * i2o_block_request_alloc - Allocate an I2O block request struct + * + * Allocates an I2O block request struct and initialize the list. + * + * Returns a i2o_block_request pointer on success or negative error code + * on failure. + */ +static inline struct i2o_block_request *i2o_block_request_alloc(void) +{ + struct i2o_block_request *ireq; + + ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); + if(!ireq) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ireq->queue); + + return ireq; +}; + +/** + * i2o_block_request_free - Frees a I2O block request + * @ireq: I2O block request which should be freed + * + * Fres the allocated memory (give it back to the request mempool). + */ +static inline void i2o_block_request_free(struct i2o_block_request *ireq) +{ + mempool_free(ireq, i2o_blk_req_pool.pool); +}; + +/** + * i2o_block_sglist_alloc - Allocate the SG list and map it + * @ireq: I2O block request + * + * Builds the SG list and map it into to be accessable by the controller. + * + * Returns the number of elements in the SG list or 0 on failure. + */ +static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) +{ + struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; + int nents; + + nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); + + if(rq_data_dir(ireq->req) == READ) + ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; + else + ireq->sg_dma_direction = PCI_DMA_TODEVICE; + + ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents, + ireq->sg_dma_direction); + + return ireq->sg_nents; +}; + +/** + * i2o_block_sglist_free - Frees the SG list + * @ireq: I2O block request from which the SG should be freed + * + * Frees the SG list from the I2O block request. + */ +static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) +{ + struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; + + dma_unmap_sg(dev, ireq->sg_table,ireq->sg_nents,ireq->sg_dma_direction); +}; + +/** + * i2o_block_prep_req_fn - Allocates I2O block device specific struct + * @q: request queue for the request + * @req: the request to prepare + * + * Allocate the necessary i2o_block_request struct and connect it to + * the request. This is needed that we not loose the SG list later on. + * + * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. + */ +static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) +{ + struct i2o_block_device *i2o_blk_dev = q->queuedata; + struct i2o_block_request *ireq; + + /* request is already processed by us, so return */ + if(req->flags & REQ_SPECIAL) { + DBG("REQ_SPECIAL already set!\n"); + req->flags |= REQ_DONTPREP; + return BLKPREP_OK; + } + + /* connect the i2o_block_request to the request */ + if(!req->special) { + ireq = i2o_block_request_alloc(); + if(unlikely(IS_ERR(ireq))) { + DBG("unable to allocate i2o_block_request!\n"); + return BLKPREP_DEFER; + } + + ireq->i2o_blk_dev = i2o_blk_dev; + req->special = ireq; + ireq->req = req; + } else + ireq = req->special; + + /* do not come back here */ + req->flags |= REQ_DONTPREP | REQ_SPECIAL; + + return BLKPREP_OK; +}; + +/** + * i2o_block_delayed_request_fn - delayed request queue function + * delayed_request: the delayed request with the queue to start + * + * If the request queue is stopped for a disk, and there is no open + * request, a new event is created, which calls this function to start + * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never + * be started again. + */ +static void i2o_block_delayed_request_fn(void *delayed_request) +{ + struct i2o_block_delayed_request *dreq = delayed_request; + struct request_queue *q = dreq->queue; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + kfree(dreq); +}; + +/** + * i2o_block_request_fn - request queue handling function + * q: request queue from which the request could be fetched + * + * Takes the next request from the queue, transfers it and if no error + * occurs dequeue it from the queue. On arrival of the reply the message + * will be processed further. If an error occurs requeue the request. + */ +static void i2o_block_request_fn(struct request_queue *q) +{ + struct request *req; + + while(!blk_queue_plugged(q)) { + req = elv_next_request(q); + if(!req) + break; + + if(blk_fs_request(req)) { + struct i2o_block_delayed_request *dreq; + struct i2o_block_request *ireq = req->special; + unsigned int queue_depth; + + queue_depth = ireq->i2o_blk_dev->open_queue_depth; + + if(queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) + if(!i2o_block_transfer(req)) { + blkdev_dequeue_request(req); + continue; + } + + if(queue_depth) + break; + + /* stop the queue and retry later */ + dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); + if(!dreq) + continue; + + dreq->queue = q; + INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, + dreq); + + printk(KERN_INFO "block-osm: transfer error\n"); + if(!queue_delayed_work(i2o_block_driver.event_queue, + &dreq->work,I2O_BLOCK_RETRY_TIME)) + kfree(dreq); + else { + blk_stop_queue(q); + break; + } + } else + end_request(req, 0); + } +}; + +/** + * i2o_block_transfer - Transfer a request to/from the I2O controller + * @req: the request which should be transfered + * + * This function converts the request into a I2O message. The necessary + * DMA buffers are allocated and after everything is setup post the message + * to the I2O controller. No cleanup is done by this function. It is done + * on the interrupt side when the reply arrives. + * + * Return 0 on success or negative error code on failure. + */ +static int i2o_block_transfer(struct request *req) +{ + struct i2o_block_device *dev = req->rq_disk->private_data; + struct i2o_controller *c = dev->i2o_dev->iop; + int tid = dev->i2o_dev->lct_data.tid; + struct i2o_message *msg; + void *mptr; + struct i2o_block_request *ireq = req->special; + struct scatterlist *sg; + int sgnum; + int i; + u32 m; + u32 tcntxt; + u32 sg_flags; + int rc; + + m=i2o_msg_get(c, &msg); + if(m==I2O_QUEUE_EMPTY) { + rc = -EBUSY; + goto exit; + } + + tcntxt = i2o_cntxt_list_add(c, req); + if(!tcntxt) { + rc = -ENOMEM; + goto nop_msg; + } + + if((sgnum=i2o_block_sglist_alloc(ireq)) <= 0) { + rc = -ENOMEM; + goto context_remove; + } + + /* Build the message based on the request. */ + writel(i2o_block_driver.context, &msg->icntxt); + writel(tcntxt, &msg->tcntxt); + writel(req->nr_sectors << 9, &msg->body[1]); + + writel((((u64)req->sector) << 9) & 0xffffffff, &msg->body[2]); + writel(req->sector>>23, &msg->body[3]); + + mptr=&msg->body[4]; + + sg = ireq->sg_table; + + if(rq_data_dir(req) == READ) { + writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, &msg->head[1]); + sg_flags = 0x10000000; + switch(dev->rcache) { + case CACHE_NULL: + writel(0, &msg->body[0]);break; + case CACHE_PREFETCH: + writel(0x201F0008, &msg->body[0]);break; + case CACHE_SMARTFETCH: + if(req->nr_sectors > 16) + writel(0x201F0008, &msg->body[0]); + else + writel(0x001F0000, &msg->body[0]); + break; + } + } else { + writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, &msg->head[1]); + sg_flags = 0x14000000; + switch(dev->wcache) { + case CACHE_NULL: + writel(0, &msg->body[0]);break; + case CACHE_WRITETHROUGH: + writel(0x001F0008, &msg->body[0]);break; + case CACHE_WRITEBACK: + writel(0x001F0010, &msg->body[0]);break; + case CACHE_SMARTBACK: + if(req->nr_sectors > 16) + writel(0x001F0004, &msg->body[0]); + else + writel(0x001F0010, &msg->body[0]); + break; + case CACHE_SMARTTHROUGH: + if(req->nr_sectors > 16) + writel(0x001F0004, &msg->body[0]); + else + writel(0x001F0010, &msg->body[0]); + } + } + + for(i = sgnum; i > 0; i--) { + if(i == 1) + sg_flags |= 0x80000000; + writel(sg_flags|sg_dma_len(sg), mptr); + writel(sg_dma_address(sg), mptr+4); + mptr += 8; + sg++; + } + + writel(I2O_MESSAGE_SIZE(((unsigned long)mptr-(unsigned long)&msg->head[0])>>2) | SGL_OFFSET_8, &msg->head[0]); + + i2o_msg_post(c,m); + + list_add_tail(&ireq->queue, &dev->open_queue); + dev->open_queue_depth ++; + + return 0; + + +context_remove: + i2o_cntxt_list_remove(c, req); + +nop_msg: + i2o_msg_nop(c, m); + +exit: + return rc; +}; + +/** + * i2o_block_reply - Block OSM reply handler. + * @c: I2O controller from which the message arrives + * @m: message id of reply + * qmsg: the actuall I2O message reply + * + * This function gets all the message replies. + * + */ +static int i2o_block_reply(struct i2o_controller *c, u32 m, + struct i2o_message *msg) +{ + struct i2o_block_request *ireq; + struct request *req; + struct i2o_block_device *dev; + struct request_queue *q; + u8 st; + unsigned long flags; + + /* FAILed message */ + if(unlikely(readl(&msg->head[0]) & (1<<13))) { + struct i2o_message *pmsg; + u32 pm; + + printk(KERN_WARNING "FAIL"); + /* + * FAILed message from controller + * We increment the error count and abort it + * + * In theory this will never happen. The I2O block class + * specification states that block devices never return + * FAILs but instead use the REQ status field...but + * better be on the safe side since no one really follows + * the spec to the book :) + */ + pm = readl(&msg->body[3]); + pmsg = c->in_queue.virt + pm; + + req = i2o_cntxt_list_get(c, readl(&pmsg->tcntxt)); + if(unlikely(!req)) { + printk(KERN_ERR "block-osm: NULL reply received!\n"); + return -1; + } + + ireq = req->special; + dev = ireq->i2o_blk_dev; + q = dev->gd->queue; + + req->errors++; + + spin_lock_irqsave(q->queue_lock, flags); + + while(end_that_request_chunk(req, !req->errors, + readl(&pmsg->body[1]))); + end_that_request_last(req); + + dev->open_queue_depth --; + list_del(&ireq->queue); + blk_start_queue(q); + + spin_unlock_irqrestore(q->queue_lock, flags); + + /* Now flush the message by making it a NOP */ + i2o_msg_nop(c, pm); + + return -1; + } + + req = i2o_cntxt_list_get(c, readl(&msg->tcntxt)); + if(unlikely(!req)) { + printk(KERN_ERR "block-osm: NULL reply received!\n"); + return -1; + } + + ireq = req->special; + dev = ireq->i2o_blk_dev; + q = dev->gd->queue; + + if(unlikely(!dev->i2o_dev)) { + /* + * This is HACK, but Intel Integrated RAID allows user + * to delete a volume that is claimed, locked, and in use + * by the OS. We have to check for a reply from a + * non-existent device and flag it as an error or the system + * goes kaput... + */ + req->errors++; + printk(KERN_WARNING "I2O Block: Data transfer to deleted device!\n"); + spin_lock_irqsave(q->queue_lock, flags); + while(end_that_request_chunk(req, !req->errors, readl(&msg->body[1]))); + end_that_request_last(req); + + dev->open_queue_depth --; + list_del(&ireq->queue); + blk_start_queue(q); + + spin_unlock_irqrestore(q->queue_lock, flags); + return -1; + } + + /* + * Lets see what is cooking. We stuffed the + * request in the context. + */ + + st=readl(&msg->body[0])>>24; + + if(st!=0) { + int err; + char *bsa_errors[] = + { + "Success", + "Media Error", + "Failure communicating to device", + "Device Failure", + "Device is not ready", + "Media not present", + "Media is locked by another user", + "Media has failed", + "Failure communicating to device", + "Device bus failure", + "Device is locked by another user", + "Device is write protected", + "Device has reset", + "Volume has changed, waiting for acknowledgement" + }; + + err = readl(&msg->body[0])&0xffff; + + /* + * Device not ready means two things. One is that the + * the thing went offline (but not a removal media) + * + * The second is that you have a SuperTrak 100 and the + * firmware got constipated. Unlike standard i2o card + * setups the supertrak returns an error rather than + * blocking for the timeout in these cases. + * + * Don't stick a supertrak100 into cache aggressive modes + */ + + + printk(KERN_ERR "\n/dev/%s error: %s", dev->gd->disk_name, + bsa_errors[readl(&msg->body[0])&0xffff]); + if(readl(&msg->body[0])&0x00ff0000) + printk(" - DDM attempted %d retries", (readl(&msg->body[0])>>16)&0x00ff); + printk(".\n"); + req->errors++; + } else + req->errors = 0; + + if(!end_that_request_chunk(req, !req->errors, readl(&msg->body[1]))) { + add_disk_randomness(req->rq_disk); + spin_lock_irqsave(q->queue_lock, flags); + + end_that_request_last(req); + + dev->open_queue_depth --; + list_del(&ireq->queue); + blk_start_queue(q); + + spin_unlock_irqrestore(q->queue_lock, flags); + + i2o_block_sglist_free(ireq); + i2o_block_request_free(ireq); + } else + printk(KERN_ERR "still remaining chunks\n"); + + return 1; +}; + +static void i2o_block_event(struct i2o_event *evt) +{ + printk(KERN_INFO "block-osm: event received\n"); +}; + +#if 0 +static int i2o_block_event(void *dummy) +{ + unsigned int evt; + unsigned long flags; + struct i2o_block_device *dev; + int unit; + //The only event that has data is the SCSI_SMART event. + struct i2o_reply { + u32 header[4]; + u32 evt_indicator; + u8 ASC; + u8 ASCQ; + u16 pad; + u8 data[16]; + } *evt_local; + + daemonize("i2oblock"); + allow_signal(SIGKILL); + + evt_running = 1; + + while(1) + { + if(down_interruptible(&i2ob_evt_sem)) + { + evt_running = 0; + printk("exiting..."); + break; + } + + /* + * Keep another CPU/interrupt from overwriting the + * message while we're reading it + * + * We stuffed the unit in the TxContext and grab the event mask + * None of the BSA we care about events have EventData + */ + spin_lock_irqsave(&i2ob_evt_lock, flags); + evt_local = (struct i2o_reply *)evt_msg; + spin_unlock_irqrestore(&i2ob_evt_lock, flags); + + unit = le32_to_cpu(evt_local->header[3]); + evt = le32_to_cpu(evt_local->evt_indicator); + + dev = &i2o_blk_dev[unit]; + switch(evt) + { + /* + * New volume loaded on same TID, so we just re-install. + * The TID/controller don't change as it is the same + * I2O device. It's just new media that we have to + * rescan. + */ + case I2O_EVT_IND_BSA_VOLUME_LOAD: + { + i2ob_install_device(dev->i2o_device->iop, + dev->i2o_device, unit); + add_disk(dev->gendisk); + break; + } + + /* + * No media, so set all parameters to 0 and set the media + * change flag. The I2O device is still valid, just doesn't + * have media, so we don't want to clear the controller or + * device pointer. + */ + case I2O_EVT_IND_BSA_VOLUME_UNLOAD: + { + struct gendisk *p = dev->gendisk; + blk_queue_max_sectors(dev->gendisk->queue, 0); + del_gendisk(p); + put_disk(p); + dev->gendisk = NULL; + dev->media_change_flag = 1; + break; + } + + case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ: + printk(KERN_WARNING "%s: Attempt to eject locked media\n", + dev->i2o_device->dev_name); + break; + + /* + * The capacity has changed and we are going to be + * updating the max_sectors and other information + * about this disk. We try a revalidate first. If + * the block device is in use, we don't want to + * do that as there may be I/Os bound for the disk + * at the moment. In that case we read the size + * from the device and update the information ourselves + * and the user can later force a partition table + * update through an ioctl. + */ + case I2O_EVT_IND_BSA_CAPACITY_CHANGE: + { + u64 size; + + if(i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 ) + i2ob_query_device(dev, 0x0000, 4, &size, 8); + + spin_lock_irqsave(dev->req_queue->queue_lock, flags); + set_capacity(dev->gendisk, size>>9); + spin_unlock_irqrestore(dev->req_queue->queue_lock, flags); + break; + } + + /* + * We got a SCSI SMART event, we just log the relevant + * information and let the user decide what they want + * to do with the information. + */ + case I2O_EVT_IND_BSA_SCSI_SMART: + { + char buf[16]; + printk(KERN_INFO "I2O Block: %s received a SCSI SMART Event\n",dev->i2o_device->dev_name); + evt_local->data[16]='\0'; + sprintf(buf,"%s",&evt_local->data[0]); + printk(KERN_INFO " Disk Serial#:%s\n",buf); + printk(KERN_INFO " ASC 0x%02x \n",evt_local->ASC); + printk(KERN_INFO " ASCQ 0x%02x \n",evt_local->ASCQ); + break; + } + + /* + * Non event + */ + + case 0: + break; + + /* + * An event we didn't ask for. Call the card manufacturer + * and tell them to fix their firmware :) + */ + + case 0x20: + /* + * If a promise card reports 0x20 event then the brown stuff + * hit the fan big time. The card seems to recover but loses + * the pending writes. Deeply ungood except for testing fsck + */ + if(dev->i2o_device->iop->promise) + panic("I2O controller firmware failed. Reboot and force a filesystem check.\n"); + default: + printk(KERN_INFO "%s: Received event 0x%X we didn't register for\n" + KERN_INFO " Blame the I2O card manufacturer 8)\n", + dev->i2o_device->dev_name, evt); + break; + } + }; + + complete_and_exit(&i2ob_thread_dead,0); + return 0; +} +#endif + +/* + * SCSI-CAM for ioctl geometry mapping + * Duplicated with SCSI - this should be moved into somewhere common + * perhaps genhd ? + * + * LBA -> CHS mapping table taken from: + * + * "Incorporating the I2O Architecture into BIOS for Intel Architecture + * Platforms" + * + * This is an I2O document that is only available to I2O members, + * not developers. + * + * From my understanding, this is how all the I2O cards do this + * + * Disk Size | Sectors | Heads | Cylinders + * ---------------+---------+-------+------------------- + * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) + * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) + * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) + * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) + * + */ +#define BLOCK_SIZE_528M 1081344 +#define BLOCK_SIZE_1G 2097152 +#define BLOCK_SIZE_21G 4403200 +#define BLOCK_SIZE_42G 8806400 +#define BLOCK_SIZE_84G 17612800 + +static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, + unsigned char *hds, unsigned char *secs) +{ + unsigned long heads, sectors, cylinders; + + sectors = 63L; /* Maximize sectors per track */ + if(capacity <= BLOCK_SIZE_528M) + heads = 16; + else if(capacity <= BLOCK_SIZE_1G) + heads = 32; + else if(capacity <= BLOCK_SIZE_21G) + heads = 64; + else if(capacity <= BLOCK_SIZE_42G) + heads = 128; + else + heads = 255; + + cylinders = (unsigned long)capacity / (heads * sectors); + + *cyls = (unsigned short) cylinders; /* Stuff return values */ + *secs = (unsigned char) sectors; + *hds = (unsigned char) heads; +} + + +/** + * i2o_block_open - Open the block device + * + * Power up the device, mount and lock the media. This function is called, + * if the block device is opened for access. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_open(struct inode *inode, struct file *file) +{ + struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data; + + if(!dev->i2o_dev) + return -ENODEV; + + if(dev->power > 0x1f) + i2o_block_device_power(dev, 0x02); + + i2o_block_device_mount(dev->i2o_dev, -1); + + i2o_block_device_lock(dev->i2o_dev, -1); + + DBG("Ready.\n"); + + return 0; +}; + +/** + * i2o_block_release - Release the I2O block device + * + * Unlock and unmount the media, and power down the device. Gets called if + * the block device is closed. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_block_release(struct inode *inode, struct file *file) +{ + struct gendisk *disk = inode->i_bdev->bd_disk; + struct i2o_block_device *dev = disk->private_data; + u8 operation; + + /* + * This is to deail with the case of an application + * opening a device and then the device dissapears while + * it's in use, and then the application tries to release + * it. ex: Unmounting a deleted RAID volume at reboot. + * If we send messages, it will just cause FAILs since + * the TID no longer exists. + */ + if(!dev->i2o_dev) + return 0; + + i2o_block_device_flush(dev->i2o_dev); + + i2o_block_device_unlock(dev->i2o_dev, -1); + + if(dev->flags & (1<<3|1<<4)) /* Removable */ + operation = 0x21; + else + operation = 0x24; + + i2o_block_device_power(dev, operation); + + return 0; +} + +/** + * i2o_block_ioctl - Issue device specific ioctl calls. + * @cmd: ioctl command + * @arg: arg + * + * Handles ioctl request for the block device. + * + * Return 0 on success or negative error on failure. + */ +static int i2o_block_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct gendisk *disk = inode->i_bdev->bd_disk; + struct i2o_block_device *dev = disk->private_data; + void __user *argp = (void __user *)arg; + + /* Anyone capable of this syscall can do *real bad* things */ + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + switch (cmd) { + case HDIO_GETGEO: + { + struct hd_geometry g; + i2o_block_biosparam(get_capacity(disk), + &g.cylinders, &g.heads, &g.sectors); + g.start = get_start_sect(inode->i_bdev); + return copy_to_user(argp, &g, sizeof(g))?-EFAULT:0; + } + + case BLKI2OGRSTRAT: + return put_user(dev->rcache, (int __user *)arg); + case BLKI2OGWSTRAT: + return put_user(dev->wcache, (int __user *)arg); + case BLKI2OSRSTRAT: + if(arg<0||arg>CACHE_SMARTFETCH) + return -EINVAL; + dev->rcache = arg; + break; + case BLKI2OSWSTRAT: + if(arg!=0 && (argCACHE_SMARTBACK)) + return -EINVAL; + dev->wcache = arg; + break; + } + return -ENOTTY; +}; + +/** + * i2o_block_media_changed - Have we seen a media change? + * @disk: gendisk which should be verified + * + * Verifies if the media has changed. + * + * Returns 1 if the media was changed or 0 otherwise. + */ +static int i2o_block_media_changed(struct gendisk *disk) +{ + struct i2o_block_device *p = disk->private_data; + + if(p->media_change_flag) { + p->media_change_flag=0; + return 1; + } + return 0; +} + +/** + * i2o_block_init - Block OSM initialization function + * + * Allocate the slab and mempool for request structs, registers i2o_block + * block device and finally register the Block OSM in the I2O core. + * + * Returns 0 on success or negative error code on failure. + */ +static int __init i2o_block_init(void) +{ + int rc; + int size; + + printk(KERN_INFO "I2O Block Storage OSM v0.9\n"); + printk(KERN_INFO " (c) Copyright 1999-2001 Red Hat Software.\n"); + + /* Allocate request mempool and slab */ + size = sizeof(struct i2o_block_request); + i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!i2o_blk_req_pool.slab) { + printk(KERN_ERR "block-osm: can't init request slab\n"); + rc = -ENOMEM; + goto exit; + } + + i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, + mempool_alloc_slab, mempool_free_slab, i2o_blk_req_pool.slab); + if (!i2o_blk_req_pool.pool) { + printk(KERN_ERR "block-osm: can't init request mempool\n"); + rc = -ENOMEM; + goto free_slab; + } + + /* Register the block device interfaces */ + rc = register_blkdev(I2O_MAJOR, "i2o_block"); + if(rc) { + printk(KERN_ERR "block-osm: unable to register block device\n"); + goto free_mempool; + } + +#ifdef MODULE + printk(KERN_INFO "block-osm: registered device at major %d\n", + I2O_MAJOR); +#endif + + /* Register Block OSM into I2O core */ + rc = i2o_driver_register(&i2o_block_driver); + if(rc) { + printk(KERN_ERR "block-osm: Could not register Block driver\n"); + goto unregister_blkdev; + } + + return 0; + +unregister_blkdev: + unregister_blkdev(I2O_MAJOR, "i2o_block"); + +free_mempool: + mempool_destroy(i2o_blk_req_pool.pool); + +free_slab: + kmem_cache_destroy(i2o_blk_req_pool.slab); + +exit: + return rc; +}; + +/** + * i2o_block_exit - Block OSM exit function + * + * Unregisters Block OSM from I2O core, unregisters i2o_block block device + * and frees the mempool and slab. + */ +static void __exit i2o_block_exit(void) +{ + /* Unregister I2O Block OSM from I2O core */ + i2o_driver_unregister(&i2o_block_driver); + + /* Unregister block device */ + unregister_blkdev(I2O_MAJOR, "i2o_block"); + + /* Free request mempool and slab */ + mempool_destroy(i2o_blk_req_pool.pool); + kmem_cache_destroy(i2o_blk_req_pool.slab); +}; + + +MODULE_AUTHOR("Red Hat"); +MODULE_DESCRIPTION("I2O Block Device OSM"); +MODULE_LICENSE("GPL"); + + +module_init(i2o_block_init); +module_exit(i2o_block_exit); diff -puN /dev/null drivers/message/i2o/block-osm.h --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/block-osm.h Tue Jul 27 14:06:32 2004 @@ -0,0 +1,99 @@ +/* + * Block OSM structures/API + * + * Copyright (C) 1999-2002 Red Hat Software + * + * Written by Alan Cox, Building Number Three Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * For the purpose of avoiding doubt the preferred form of the work + * for making modifications shall be a standards compliant form such + * gzipped tar and not one requiring a proprietary or patent encumbered + * tool to unpack. + * + * Fixes/additions: + * Steve Ralston: + * Multiple device handling error fixes, + * Added a queue depth. + * Alan Cox: + * FC920 has an rmw bug. Dont or in the end marker. + * Removed queue walk, fixed for 64bitness. + * Rewrote much of the code over time + * Added indirect block lists + * Handle 64K limits on many controllers + * Don't use indirects on the Promise (breaks) + * Heavily chop down the queue depths + * Deepak Saxena: + * Independent queues per IOP + * Support for dynamic device creation/deletion + * Code cleanup + * Support for larger I/Os through merge* functions + * (taken from DAC960 driver) + * Boji T Kannanthanam: + * Set the I2O Block devices to be detected in increasing + * order of TIDs during boot. + * Search and set the I2O block device that we boot off + * from as the first device to be claimed (as /dev/i2o/hda) + * Properly attach/detach I2O gendisk structure from the + * system gendisk list. The I2O block devices now appear in + * /proc/partitions. + * Markus Lidel : + * Minor bugfixes for 2.6. + */ + +#ifndef I2O_BLOCK_OSM_H +#define I2O_BLOCK_OSM_H + +#define I2O_BLOCK_RETRY_TIME HZ/4 +#define I2O_BLOCK_MAX_OPEN_REQUESTS 50 + +/* I2O Block OSM mempool struct */ +struct i2o_block_mempool { + kmem_cache_t *slab; + mempool_t *pool; +}; + +/* I2O Block device descriptor */ +struct i2o_block_device { + struct i2o_device *i2o_dev; /* pointer to I2O device */ + struct gendisk *gd; + spinlock_t lock; /* queue lock */ + struct list_head open_queue; /* list of transfered, but unfinished + requests */ + unsigned int open_queue_depth; /* number of requests in the queue */ + + int rcache; /* read cache flags */ + int wcache; /* write cache flags */ + int flags; + int power; /* power state */ + int media_change_flag; /* media changed flag */ +}; + +/* I2O Block device request */ +struct i2o_block_request +{ + struct list_head queue; + struct request *req; /* corresponding request */ + struct i2o_block_device *i2o_blk_dev; /* I2O block device */ + int sg_dma_direction; /* direction of DMA buffer read/write */ + int sg_nents; /* number of SG elements */ + struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */ +}; + +/* I2O Block device delayed request */ +struct i2o_block_delayed_request +{ + struct work_struct work; + struct request_queue *queue; +}; + +#endif diff -puN /dev/null drivers/message/i2o/config-osm.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/config-osm.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,1206 @@ +/* + * I2O Configuration Interface Driver + * + * (C) Copyright 1999-2002 Red Hat + * + * Written by Alan Cox, Building Number Three Ltd + * + * Fixes/additions: + * Deepak Saxena (04/20/1999): + * Added basic ioctl() support + * Deepak Saxena (06/07/1999): + * Added software download ioctl (still testing) + * Auvo Häkkinen (09/10/1999): + * Changes to i2o_cfg_reply(), ioctl_parms() + * Added ioct_validate() + * Taneli Vähäkangas (09/30/1999): + * Fixed ioctl_swdl() + * Taneli Vähäkangas (10/04/1999): + * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() + * Deepak Saxena (11/18/1999): + * Added event managmenet support + * Alan Cox : + * 2.4 rewrite ported to 2.5 + * Markus Lidel : + * Added pass-thru support for Adaptec's raidutils + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +#undef DEBUG +#define DEBUG 1 + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); + +static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED; +struct wait_queue *i2o_wait_queue; + +#define MODINC(x,y) ((x) = ((x) + 1) % (y)) + +struct sg_simple_element { + u32 flag_count; + u32 addr_bus; +}; + +struct i2o_cfg_info +{ + struct file* fp; + struct fasync_struct *fasync; + struct i2o_evt_info event_q[I2O_EVT_Q_LEN]; + u16 q_in; // Queue head index + u16 q_out; // Queue tail index + u16 q_len; // Queue length + u16 q_lost; // Number of lost events + ulong q_id; // Event queue ID...used as tx_context + struct i2o_cfg_info *next; +}; +static struct i2o_cfg_info *open_files = NULL; +static ulong i2o_cfg_info_id = 0; + +static int i2o_cfg_getiops(unsigned long); +static int i2o_cfg_gethrt(unsigned long); +static int i2o_cfg_getlct(unsigned long); +static int i2o_cfg_parms(unsigned long, unsigned int); +static int i2o_cfg_swdl(unsigned long); +static int i2o_cfg_swul(unsigned long); +static int i2o_cfg_swdel(unsigned long); +static int i2o_cfg_validate(unsigned long); +static int i2o_cfg_evt_reg(unsigned long, struct file *); +static int i2o_cfg_evt_get(unsigned long, struct file *); +#if BITS_PER_LONG != 64 +static int i2o_cfg_passthru(unsigned long); +#endif +static int cfg_fasync(int, struct file*, int); + +#if 0 +/* + * This is the callback for any message we have posted. The message itself + * will be returned to the message pool when we return from the IRQ + * + * This runs in irq context so be short and sweet. + */ +static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m) +{ + u32 *msg = (u32 *)m; + + if (msg[0] & MSG_FAIL) { + u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]); + + printk(KERN_ERR "i2o_config: IOP failed to process the msg.\n"); + + /* Release the preserved msg frame by resubmitting it as a NOP */ + + preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0; + preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0; + preserved_msg[2] = 0; + i2o_post_message(c, msg[7]); + } + + if (msg[4] >> 24) // ReqStatus != SUCCESS + i2o_report_status(KERN_INFO,"i2o_config", msg); + + if(m->function == I2O_CMD_UTIL_EVT_REGISTER) + { + struct i2o_cfg_info *inf; + + for(inf = open_files; inf; inf = inf->next) + if(inf->q_id == i2o_cntxt_list_get(c, msg[3])) + break; + + // + // If this is the case, it means that we're getting + // events for a file descriptor that's been close()'d + // w/o the user unregistering for events first. + // The code currently assumes that the user will + // take care of unregistering for events before closing + // a file. + // + // TODO: + // Should we track event registartion and deregister + // for events when a file is close()'d so this doesn't + // happen? That would get rid of the search through + // the linked list since file->private_data could point + // directly to the i2o_config_info data structure...but + // it would mean having all sorts of tables to track + // what each file is registered for...I think the + // current method is simpler. - DS + // + if(!inf) + return; + + inf->event_q[inf->q_in].id.iop = c->unit; + inf->event_q[inf->q_in].id.tid = m->target_tid; + inf->event_q[inf->q_in].id.evt_mask = msg[4]; + + // + // Data size = msg size - reply header + // + inf->event_q[inf->q_in].data_size = (m->size - 5) * 4; + if(inf->event_q[inf->q_in].data_size) + memcpy(inf->event_q[inf->q_in].evt_data, + (unsigned char *)(msg + 5), + inf->event_q[inf->q_in].data_size); + + spin_lock(&i2o_config_lock); + MODINC(inf->q_in, I2O_EVT_Q_LEN); + if(inf->q_len == I2O_EVT_Q_LEN) + { + MODINC(inf->q_out, I2O_EVT_Q_LEN); + inf->q_lost++; + } + else + { + // Keep I2OEVTGET on another CPU from touching this + inf->q_len++; + } + spin_unlock(&i2o_config_lock); + + +// printk(KERN_INFO "File %p w/id %d has %d events\n", +// inf->fp, inf->q_id, inf->q_len); + + kill_fasync(&inf->fasync, SIGIO, POLL_IN); + } + + return; +} +#endif + +/* + * Each of these describes an i2o message handler. They are + * multiplexed by the i2o_core code + */ + +struct i2o_driver i2o_config_driver = { + .name = "Config-OSM" +}; + +/* + * IOCTL Handler + */ +static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, + unsigned long arg) +{ + int ret; + + switch(cmd) + { + case I2OGETIOPS: + ret = i2o_cfg_getiops(arg); + break; + + case I2OHRTGET: + ret = i2o_cfg_gethrt(arg); + break; + + case I2OLCTGET: + ret = i2o_cfg_getlct(arg); + break; + + case I2OPARMSET: + ret = i2o_cfg_parms(arg, I2OPARMSET); + break; + + case I2OPARMGET: + ret = i2o_cfg_parms(arg, I2OPARMGET); + break; + + case I2OSWDL: + ret = i2o_cfg_swdl(arg); + break; + + case I2OSWUL: + ret = i2o_cfg_swul(arg); + break; + + case I2OSWDEL: + ret = i2o_cfg_swdel(arg); + break; + + case I2OVALIDATE: + ret = i2o_cfg_validate(arg); + break; + + case I2OEVTREG: + ret = i2o_cfg_evt_reg(arg, fp); + break; + + case I2OEVTGET: + ret = i2o_cfg_evt_get(arg, fp); + break; + +#if BITS_PER_LONG != 64 + case I2OPASSTHRU: + ret = i2o_cfg_passthru(arg); + break; +#endif + + default: + DBG("i2o_config: unknown ioctl called!\n"); + ret = -EINVAL; + } + + return ret; +} + +static int i2o_cfg_getiops(unsigned long arg) +{ + struct i2o_controller *c; + u8 __user *user_iop_table = (void __user *)arg; + u8 tmp[MAX_I2O_CONTROLLERS]; + + memset(tmp, 0, MAX_I2O_CONTROLLERS); + + if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS)) + return -EFAULT; + + list_for_each_entry(c, &i2o_controllers, list) + tmp[c->unit] = 1; + + __copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS); + + return 0; +}; + +static int i2o_cfg_gethrt(unsigned long arg) +{ + struct i2o_controller *c; + struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; + struct i2o_cmd_hrtlct kcmd; + i2o_hrt *hrt; + int len; + u32 reslen; + int ret = 0; + + if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) + return -EFAULT; + + if(get_user(reslen, kcmd.reslen) < 0) + return -EFAULT; + + if(kcmd.resbuf == NULL) + return -EFAULT; + + c = i2o_find_iop(kcmd.iop); + if(!c) + return -ENXIO; + + hrt = (i2o_hrt *)c->hrt.virt; + + len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); + + /* We did a get user...so assuming mem is ok...is this bad? */ + put_user(len, kcmd.reslen); + if(len > reslen) + ret = -ENOBUFS; + if(copy_to_user(kcmd.resbuf, (void*)hrt, len)) + ret = -EFAULT; + + return ret; +}; + +static int i2o_cfg_getlct(unsigned long arg) +{ + struct i2o_controller *c; + struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; + struct i2o_cmd_hrtlct kcmd; + i2o_lct *lct; + int len; + int ret = 0; + u32 reslen; + + if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) + return -EFAULT; + + if(get_user(reslen, kcmd.reslen) < 0) + return -EFAULT; + + if(kcmd.resbuf == NULL) + return -EFAULT; + + c = i2o_find_iop(kcmd.iop); + if(!c) + return -ENXIO; + + lct = (i2o_lct *)c->lct; + + len = (unsigned int)lct->table_size << 2; + put_user(len, kcmd.reslen); + if(len > reslen) + ret = -ENOBUFS; + else if(copy_to_user(kcmd.resbuf, lct, len)) + ret = -EFAULT; + + return ret; +}; + +static int i2o_cfg_parms(unsigned long arg, unsigned int type) +{ + int ret = 0; + struct i2o_controller *c; + struct i2o_device *dev; + struct i2o_cmd_psetget __user *cmd =(struct i2o_cmd_psetget __user*)arg; + struct i2o_cmd_psetget kcmd; + u32 reslen; + u8 *ops; + u8 *res; + int len = 0; + + u32 i2o_cmd = (type == I2OPARMGET ? + I2O_CMD_UTIL_PARAMS_GET : + I2O_CMD_UTIL_PARAMS_SET); + + if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) + return -EFAULT; + + if(get_user(reslen, kcmd.reslen)) + return -EFAULT; + + c = i2o_find_iop(kcmd.iop); + if(!c) + return -ENXIO; + + dev = i2o_iop_find_device(c, kcmd.tid); + if(!dev) + return -ENXIO; + + ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL); + if(!ops) + return -ENOMEM; + + if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) { + kfree(ops); + return -EFAULT; + } + + /* + * It's possible to have a _very_ large table + * and that the user asks for all of it at once... + */ + res = (u8*)kmalloc(65536, GFP_KERNEL); + if(!res) { + kfree(ops); + return -ENOMEM; + } + + len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536); + kfree(ops); + + if (len < 0) { + kfree(res); + return -EAGAIN; + } + + put_user(len, kcmd.reslen); + if(len > reslen) + ret = -ENOBUFS; + else if(copy_to_user(kcmd.resbuf, res, len)) + ret = -EFAULT; + + kfree(res); + + return ret; +}; + +static int i2o_cfg_swdl(unsigned long arg) +{ + struct i2o_sw_xfer kxfer; + struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; + unsigned char maxfrag = 0, curfrag = 1; + struct i2o_dma buffer; + struct i2o_message *msg; + u32 m; + unsigned int status = 0, swlen = 0, fragsize = 8192; + struct i2o_controller *c; + + if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) + return -EFAULT; + + if(get_user(swlen, kxfer.swlen) < 0) + return -EFAULT; + + if(get_user(maxfrag, kxfer.maxfrag) < 0) + return -EFAULT; + + if(get_user(curfrag, kxfer.curfrag) < 0) + return -EFAULT; + + if(curfrag==maxfrag) + fragsize = swlen-(maxfrag-1)*8192; + + if(!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) + return -EFAULT; + + c = i2o_find_iop(kxfer.iop); + if(!c) + return -ENXIO; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -EBUSY; + + if(i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { + i2o_msg_nop(c, m); + return -ENOMEM; + } + + __copy_from_user(buffer.virt, kxfer.buf, fragsize); + + writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->head[0]); + writel(I2O_CMD_SW_DOWNLOAD<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + writel(i2o_config_driver.context, &msg->head[2]); + writel(0, &msg->head[3]); + writel((((u32)kxfer.flags)<<24) | (((u32)kxfer.sw_type)<<16) | + (((u32)maxfrag)<<8) | (((u32)curfrag)), &msg->body[0]); + writel(swlen, &msg->body[1]); + writel(kxfer.sw_id, &msg->body[2]); + writel(0xD0000000 | fragsize, &msg->body[3]); + writel(buffer.phys, &msg->body[4]); + +// printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); + status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + + if(status != -ETIMEDOUT) + i2o_dma_free(&c->pdev->dev, &buffer); + + if (status != I2O_POST_WAIT_OK) { + // it fails if you try and send frags out of order + // and for some yet unknown reasons too + printk(KERN_INFO "i2o_config: swdl failed, DetailedStatus = %d\n", status); + return status; + } + + return 0; +}; + +static int i2o_cfg_swul(unsigned long arg) +{ + struct i2o_sw_xfer kxfer; + struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; + unsigned char maxfrag = 0, curfrag = 1; + struct i2o_dma buffer; + struct i2o_message *msg; + u32 m; + unsigned int status = 0, swlen = 0, fragsize = 8192; + struct i2o_controller *c; + + if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) + return -EFAULT; + + if(get_user(swlen, kxfer.swlen) < 0) + return -EFAULT; + + if(get_user(maxfrag, kxfer.maxfrag) < 0) + return -EFAULT; + + if(get_user(curfrag, kxfer.curfrag) < 0) + return -EFAULT; + + if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192; + + if(!kxfer.buf || !access_ok(VERIFY_WRITE, kxfer.buf, fragsize)) + return -EFAULT; + + c = i2o_find_iop(kxfer.iop); + if(!c) + return -ENXIO; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -EBUSY; + + if(i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { + i2o_msg_nop(c, m); + return -ENOMEM; + } + + writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->head[0]); + writel(I2O_CMD_SW_UPLOAD<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + writel(i2o_config_driver.context, &msg->head[2]); + writel(0, &msg->head[3]); + writel((u32)kxfer.flags<<24|(u32)kxfer.sw_type<<16|(u32)maxfrag<<8|(u32)curfrag, &msg->body[0]); + writel(swlen, &msg->body[1]); + writel(kxfer.sw_id, &msg->body[2]); + writel(0xD0000000 | fragsize, &msg->body[3]); + writel(buffer.phys, &msg->body[4]); + +// printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); + status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + + if (status != I2O_POST_WAIT_OK) { + if(status != -ETIMEDOUT) + i2o_dma_free(&c->pdev->dev, &buffer); + + printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status); + return status; + } + + __copy_to_user(kxfer.buf, buffer.virt, fragsize); + i2o_dma_free(&c->pdev->dev, &buffer); + + return 0; +}; + +static int i2o_cfg_swdel(unsigned long arg) +{ + struct i2o_controller *c; + struct i2o_sw_xfer kxfer; + struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user*)arg; + struct i2o_message *msg; + u32 m; + unsigned int swlen; + int token; + + if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) + return -EFAULT; + + if (get_user(swlen, kxfer.swlen) < 0) + return -EFAULT; + + c = i2o_find_iop(kxfer.iop); + if (!c) + return -ENXIO; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -EBUSY; + + writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_SW_REMOVE<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + writel(i2o_config_driver.context, &msg->head[2]); + writel(0, &msg->head[3]); + writel((u32)kxfer.flags<<24 | (u32)kxfer.sw_type<<16, &msg->body[0]); + writel(swlen, &msg->body[1]); + writel(kxfer.sw_id, &msg->body[2]); + + token = i2o_msg_post_wait(c, m, 10); + + if (token != I2O_POST_WAIT_OK) { + printk(KERN_INFO "i2o_config: swdel failed, DetailedStatus = %d\n", token); + return -ETIMEDOUT; + } + + return 0; +}; + +static int i2o_cfg_validate(unsigned long arg) +{ + int token; + int iop = (int)arg; + struct i2o_message *msg; + u32 m; + struct i2o_controller *c; + + c=i2o_find_iop(iop); + if(!c) + return -ENXIO; + + m=i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -EBUSY; + + writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_CONFIG_VALIDATE<<24|HOST_TID<<12|iop, &msg->head[1]); + writel(i2o_config_driver.context, &msg->head[2]); + writel(0, &msg->head[3]); + + token = i2o_msg_post_wait(c, m, 10); + + if(token != I2O_POST_WAIT_OK) { + printk(KERN_INFO "Can't validate configuration, ErrorStatus = " + "%d\n", token); + return -ETIMEDOUT; + } + + return 0; +}; + +static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) +{ + struct i2o_message *msg; + u32 m; + struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; + struct i2o_evt_id kdesc; + struct i2o_controller *c; + struct i2o_device *d; + + if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) + return -EFAULT; + + /* IOP exists? */ + c = i2o_find_iop(kdesc.iop); + if(!c) + return -ENXIO; + + /* Device exists? */ + d = i2o_iop_find_device(c, kdesc.tid); + if(!d) + return -ENODEV; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -EBUSY; + + writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_UTIL_EVT_REGISTER<<24|HOST_TID<<12|kdesc.tid, + &msg->head[1]); + writel(i2o_config_driver.context, &msg->head[2]); + writel(i2o_cntxt_list_add(c, fp->private_data), &msg->head[3]); + writel(kdesc.evt_mask, &msg->body[0]); + + i2o_msg_post(c, m); + + return 0; +} + +static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) +{ + struct i2o_cfg_info *p = NULL; + struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg; + struct i2o_evt_get kget; + unsigned long flags; + + for(p = open_files; p; p = p->next) + if(p->q_id == (ulong)fp->private_data) + break; + + if(!p->q_len) + return -ENOENT; + + memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); + MODINC(p->q_out, I2O_EVT_Q_LEN); + spin_lock_irqsave(&i2o_config_lock, flags); + p->q_len--; + kget.pending = p->q_len; + kget.lost = p->q_lost; + spin_unlock_irqrestore(&i2o_config_lock, flags); + + if(copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) + return -EFAULT; + return 0; +} + +#if BITS_PER_LONG == 64 +static int i2o_cfg_passthru32(unsigned fd, unsigned cmnd, unsigned long arg, struct file *file) +{ + struct i2o_cmd_passthru32 *cmd = (struct i2o_cmd_passthru32 *)arg; + struct i2o_controller *c; + u32 *user_msg = (u32*)(u64)cmd->msg; + u32 *reply = NULL; + u32 *user_reply = NULL; + u32 size = 0; + u32 reply_size = 0; + u32 rcode = 0; + struct i2o_dma sg_list[SG_TABLESIZE]; + u32 sg_offset = 0; + u32 sg_count = 0; + u32 i = 0; + i2o_status_block *sb; + struct i2o_message *msg; + u32 m; + + c = i2o_find_iop(cmd->iop); + if(!c) { + DBG("controller %d not found\n", cmd->iop); + return -ENXIO; + } + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + + sb = c->status_block.virt; + + if(get_user(size, &user_msg[0])) { + printk(KERN_WARNING "unable to get size!\n"); + return -EFAULT; + } + size = size>>16; + + if(size > sb->inbound_frame_size) { + DBG("size of message > inbound_frame_size"); + return -EFAULT; + } + + user_reply = &user_msg[size]; + + size <<= 2; // Convert to bytes + + /* Copy in the user's I2O command */ + if(copy_from_user(msg, user_msg, size)) { + printk(KERN_WARNING "unable to copy user message\n"); + return -EFAULT; + } + i2o_dump_message(msg); + + if(get_user(reply_size, &user_reply[0])<0) + return -EFAULT; + + reply_size >>= 16; + reply_size <<= 2; + + reply = kmalloc(reply_size, GFP_KERNEL); + if(!reply) { + printk(KERN_WARNING"%s: Could not allocate reply buffer\n",c->name); + return -ENOMEM; + } + memset(reply, 0, reply_size); + + sg_offset = (msg->head[0]>>4)&0x0f; + + writel(i2o_config_driver.context, &msg->icntxt); + writel(i2o_cntxt_list_add(c, reply), &msg->tcntxt); + + memset(sg_list,0, sizeof(sg_list[0])*SG_TABLESIZE); + if(sg_offset) { + struct sg_simple_element *sg; + + if(sg_offset * 4 >= size) { + rcode = -EFAULT; + goto cleanup; + } + // TODO 64bit fix + sg = (struct sg_simple_element*) ((&msg->head[0])+sg_offset); + sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); + if (sg_count > SG_TABLESIZE) { + printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", c->name,sg_count); + kfree (reply); + return -EINVAL; + } + + for(i = 0; i < sg_count; i++) { + int sg_size; + struct i2o_dma *p; + + if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { + printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",c->name,i, sg[i].flag_count); + rcode = -EINVAL; + goto cleanup; + } + sg_size = sg[i].flag_count & 0xffffff; + p = &(sg_list[i]); + /* Allocate memory for the transfer */ + if(i2o_dma_alloc(&c->pdev->dev, p, sg_size, PCI_DMA_BIDIRECTIONAL)) { + printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name,sg_size,i,sg_count); + rcode = -ENOMEM; + goto cleanup; + } + /* Copy in the user's SG buffer if necessary */ + if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { + // TODO 64bit fix + if (copy_from_user(p->virt,(void*)(u64)sg[i].addr_bus, sg_size)) { + printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",c->name,i); + rcode = -EFAULT; + goto cleanup; + } + } + //TODO 64bit fix + sg[i].addr_bus = (u32)p->phys; + } + } + + rcode = i2o_msg_post_wait(c, m, 60); + if(rcode) + goto cleanup; + + if(sg_offset) { + u32 msg[128]; + /* Copy back the Scatter Gather buffers back to user space */ + u32 j; + // TODO 64bit fix + struct sg_simple_element* sg; + int sg_size; + printk(KERN_INFO "sg_offset\n"); + + // re-acquire the original message to handle correctly the sg copy operation + memset(&msg, 0, MSG_FRAME_SIZE*4); + // get user msg size in u32s + if (get_user(size, &user_msg[0])) { + rcode = -EFAULT; + goto cleanup; + } + size = size>>16; + size *= 4; + /* Copy in the user's I2O command */ + if (copy_from_user (msg, user_msg, size)) { + rcode = -EFAULT; + goto cleanup; + } + sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); + + // TODO 64bit fix + sg = (struct sg_simple_element*)(msg + sg_offset); + for (j = 0; j < sg_count; j++) { + /* Copy out the SG list to user's buffer if necessary */ + if (!(sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { + sg_size = sg[j].flag_count & 0xffffff; + // TODO 64bit fix + if (copy_to_user((void __user *)(u64)sg[j].addr_bus,sg_list[j].virt, sg_size)) { + printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",c->name, sg_list[j].virt, sg[j].addr_bus); + rcode = -EFAULT; + goto cleanup; + } + } + } + } + + /* Copy back the reply to user space */ + if (reply_size) { + // we wrote our own values for context - now restore the user supplied ones + printk(KERN_INFO "reply_size\n"); + if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { + printk(KERN_WARNING"%s: Could not copy message context FROM user\n",c->name); + rcode = -EFAULT; + } + if(copy_to_user(user_reply, reply, reply_size)) { + printk(KERN_WARNING"%s: Could not copy reply TO user\n",c->name); + rcode = -EFAULT; + } + } + +cleanup: + kfree(reply); + printk(KERN_INFO "rcode: %d\n", rcode); + return rcode; +} + +#else + +static int i2o_cfg_passthru(unsigned long arg) +{ + struct i2o_cmd_passthru __user*cmd=(struct i2o_cmd_passthru __user*)arg; + struct i2o_controller *c; + u32 __user *user_msg = (u32 __user *)cmd->msg; + u32 *reply = NULL; + u32 __user *user_reply = NULL; + u32 size = 0; + u32 reply_size = 0; + u32 rcode = 0; + void *sg_list[SG_TABLESIZE]; + u32 sg_offset = 0; + u32 sg_count = 0; + int sg_index = 0; + u32 i = 0; + void *p = NULL; + i2o_status_block *sb; + struct i2o_message *msg; + u32 m; + + printk(KERN_INFO "iop: %d\n", cmd->iop); + c = i2o_find_iop(cmd->iop); + if(!c) { + DBG("controller %d not found\n", cmd->iop); + return -ENXIO; + } + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + + sb = c->status_block.virt; + + if(get_user(size, &user_msg[0])) + return -EFAULT; + size = size>>16; + + if(size > sb->inbound_frame_size) { + DBG("size of message > inbound_frame_size"); + return -EFAULT; + } + + user_reply = &user_msg[size]; + + size <<= 2; // Convert to bytes + + /* Copy in the user's I2O command */ + if(copy_from_user(msg, user_msg, size)) + return -EFAULT; + + + if(get_user(reply_size, &user_reply[0])<0) + return -EFAULT; + + reply_size >>= 16; + reply_size <<= 2; + + reply = kmalloc(reply_size, GFP_KERNEL); + if(!reply) { + printk(KERN_WARNING"%s: Could not allocate reply buffer\n",c->name); + return -ENOMEM; + } + memset(reply, 0, reply_size); + + sg_offset = (msg->head[0]>>4)&0x0f; + + writel(i2o_config_driver.context, &msg->icntxt); + writel(i2o_cntxt_list_add(c, reply), &msg->tcntxt); + + memset(sg_list,0, sizeof(sg_list[0])*SG_TABLESIZE); + if(sg_offset) { + struct sg_simple_element *sg; + + if(sg_offset * 4 >= size) { + rcode = -EFAULT; + goto cleanup; + } + // TODO 64bit fix + sg = (struct sg_simple_element*) ((&msg->head[0])+sg_offset); + sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); + if (sg_count > SG_TABLESIZE) { + printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", c->name,sg_count); + kfree (reply); + return -EINVAL; + } + + for(i = 0; i < sg_count; i++) { + int sg_size; + + if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { + printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",c->name,i, sg[i].flag_count); + rcode = -EINVAL; + goto cleanup; + } + sg_size = sg[i].flag_count & 0xffffff; + /* Allocate memory for the transfer */ + p = kmalloc(sg_size, GFP_KERNEL); + if (!p) { + printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name,sg_size,i,sg_count); + rcode = -ENOMEM; + goto cleanup; + } + sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. + /* Copy in the user's SG buffer if necessary */ + if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { + // TODO 64bit fix + if(copy_from_user(p,(void __user *)sg[i].addr_bus,sg_size)) { + printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",c->name,i); + rcode = -EFAULT; + goto cleanup; + } + } + //TODO 64bit fix + sg[i].addr_bus = virt_to_bus(p); + } + } + + rcode = i2o_msg_post_wait(c, m, 60); + if(rcode) + goto cleanup; + + if(sg_offset) { + u32 msg[128]; + /* Copy back the Scatter Gather buffers back to user space */ + u32 j; + // TODO 64bit fix + struct sg_simple_element* sg; + int sg_size; + printk(KERN_INFO "sg_offset\n"); + + // re-acquire the original message to handle correctly the sg copy operation + memset(&msg, 0, MSG_FRAME_SIZE*4); + // get user msg size in u32s + if (get_user(size, &user_msg[0])) { + rcode = -EFAULT; + goto cleanup; + } + size = size>>16; + size *= 4; + /* Copy in the user's I2O command */ + if (copy_from_user (msg, user_msg, size)) { + rcode = -EFAULT; + goto cleanup; + } + sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); + + // TODO 64bit fix + sg = (struct sg_simple_element*)(msg + sg_offset); + for (j = 0; j < sg_count; j++) { + /* Copy out the SG list to user's buffer if necessary */ + if (!(sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { + sg_size = sg[j].flag_count & 0xffffff; + // TODO 64bit fix + if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { + printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",c->name, sg_list[j], sg[j].addr_bus); + rcode = -EFAULT; + goto cleanup; + } + } + } + } + + /* Copy back the reply to user space */ + if (reply_size) { + // we wrote our own values for context - now restore the user supplied ones + printk(KERN_INFO "reply_size\n"); + if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { + printk(KERN_WARNING"%s: Could not copy message context FROM user\n",c->name); + rcode = -EFAULT; + } + if(copy_to_user(user_reply, reply, reply_size)) { + printk(KERN_WARNING"%s: Could not copy reply TO user\n",c->name); + rcode = -EFAULT; + } + } + +cleanup: + kfree(reply); + return rcode; +} +#endif + +static int cfg_open(struct inode *inode, struct file *file) +{ + struct i2o_cfg_info *tmp = + (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL); + unsigned long flags; + + if(!tmp) + return -ENOMEM; + + file->private_data = (void*)(i2o_cfg_info_id++); + tmp->fp = file; + tmp->fasync = NULL; + tmp->q_id = (ulong)file->private_data; + tmp->q_len = 0; + tmp->q_in = 0; + tmp->q_out = 0; + tmp->q_lost = 0; + tmp->next = open_files; + + spin_lock_irqsave(&i2o_config_lock, flags); + open_files = tmp; + spin_unlock_irqrestore(&i2o_config_lock, flags); + + return 0; +} + +static int cfg_release(struct inode *inode, struct file *file) +{ + ulong id = (ulong)file->private_data; + struct i2o_cfg_info *p1, *p2; + unsigned long flags; + + lock_kernel(); + p1 = p2 = NULL; + + spin_lock_irqsave(&i2o_config_lock, flags); + for(p1 = open_files; p1; ) + { + if(p1->q_id == id) + { + + if(p1->fasync) + cfg_fasync(-1, file, 0); + if(p2) + p2->next = p1->next; + else + open_files = p1->next; + + kfree(p1); + break; + } + p2 = p1; + p1 = p1->next; + } + spin_unlock_irqrestore(&i2o_config_lock, flags); + unlock_kernel(); + + return 0; +} + +static int cfg_fasync(int fd, struct file *fp, int on) +{ + ulong id = (ulong)fp->private_data; + struct i2o_cfg_info *p; + + for(p = open_files; p; p = p->next) + if(p->q_id == id) + break; + + if(!p) + return -EBADF; + + return fasync_helper(fd, fp, on, &p->fasync); +} + +static struct file_operations config_fops = +{ + .owner = THIS_MODULE, + .llseek = no_llseek, + .ioctl = i2o_cfg_ioctl, + .open = cfg_open, + .release = cfg_release, + .fasync = cfg_fasync, +}; + +static struct miscdevice i2o_miscdev = { + I2O_MINOR, + "i2octl", + &config_fops +}; + +static int __init i2o_config_init(void) +{ + printk(KERN_INFO "I2O configuration manager v 0.04.\n"); + printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n"); + + if(misc_register(&i2o_miscdev) < 0) { + printk(KERN_ERR "i2o_config: can't register device.\n"); + return -EBUSY; + } + /* + * Install our handler + */ + if(i2o_driver_register(&i2o_config_driver)) { + printk(KERN_ERR "i2o_config: handler register failed.\n"); + misc_deregister(&i2o_miscdev); + return -EBUSY; + } + +#if BITS_PER_LONG ==64 + register_ioctl32_conversion(I2OPASSTHRU32, i2o_cfg_passthru32); + register_ioctl32_conversion(I2OGETIOPS, (void *)sys_ioctl); +#endif + return 0; +} + +static void i2o_config_exit(void) +{ +#if BITS_PER_LONG ==64 + unregister_ioctl32_conversion(I2OPASSTHRU32); + unregister_ioctl32_conversion(I2OGETIOPS); +#endif + misc_deregister(&i2o_miscdev); + i2o_driver_unregister(&i2o_config_driver); +} + +MODULE_AUTHOR("Red Hat Software"); +MODULE_DESCRIPTION("I2O Configuration"); +MODULE_LICENSE("GPL"); + +module_init(i2o_config_init); +module_exit(i2o_config_exit); diff -puN /dev/null drivers/message/i2o/debug.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/debug.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,576 @@ +#define DRIVERDEBUG 1 + +#include +#include +#include +#include +#include + +static int verbose; +extern struct i2o_driver **i2o_drivers; +extern unsigned int i2o_max_drivers; +static void i2o_report_util_cmd(u8 cmd); +static void i2o_report_exec_cmd(u8 cmd); +void i2o_report_fail_status(u8 req_status, u32* msg); +void i2o_report_common_status(u8 req_status); +static void i2o_report_common_dsc(u16 detailed_status); + +#define DBG(x...) printk(x) + +void i2o_dump_status_block(i2o_status_block *sb) +{ + DBG("Organization ID: %d\n", sb->org_id); + DBG("IOP ID: %d\n", sb->iop_id); + DBG("Host Unit ID: %d\n", sb->host_unit_id); + DBG("Segment Number: %d\n", sb->segment_number); + DBG("I2O Version: %d\n", sb->i2o_version); + DBG("IOP State: %d\n", sb->iop_state); + DBG("Messanger Type: %d\n", sb->msg_type); + DBG("Inbound Frame Size: %d\n", sb->inbound_frame_size); + DBG("Init Code: %d\n", sb->init_code); + DBG("Max Inbound MFrames: %d\n", sb->max_inbound_frames); + DBG("Current Inbound MFrames: %d\n", sb->cur_inbound_frames); + DBG("Max Outbound MFrames: %d\n", sb->max_outbound_frames); + DBG("Product ID String: %s\n", sb->product_id); + DBG("Expected LCT Size: %d\n", sb->expected_lct_size); + DBG("IOP Capabilities: %d\n", sb->iop_capabilities); + DBG("Desired Private MemSize: %d\n", sb->desired_mem_size); + DBG("Current Private MemSize: %d\n", sb->current_mem_size); + DBG("Current Private MemBase: %d\n", sb->current_mem_base); + DBG("Desired Private IO Size: %d\n", sb->desired_io_size); + DBG("Current Private IO Size: %d\n", sb->current_io_size); + DBG("Current Private IO Base: %d\n", sb->current_io_base); +}; + +/* + * Used for error reporting/debugging purposes. + * Report Cmd name, Request status, Detailed Status. + */ +void i2o_report_status(const char *severity, const char *str, struct i2o_message *m) +{ + u32 *msg = (u32 *)m; + u8 cmd = (msg[1]>>24)&0xFF; + u8 req_status = (msg[4]>>24)&0xFF; + u16 detailed_status = msg[4]&0xFFFF; + //struct i2o_driver *h = i2o_drivers[msg[2] & (i2o_max_drivers-1)]; + + if (cmd == I2O_CMD_UTIL_EVT_REGISTER) + return; // No status in this reply + + printk("%s%s: ", severity, str); + + if (cmd < 0x1F) // Utility cmd + i2o_report_util_cmd(cmd); + + else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd + i2o_report_exec_cmd(cmd); + else + printk("Cmd = %0#2x, ", cmd); // Other cmds + + if (msg[0] & MSG_FAIL) { + i2o_report_fail_status(req_status, msg); + return; + } + + i2o_report_common_status(req_status); + + if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) + i2o_report_common_dsc(detailed_status); + else + printk(" / DetailedStatus = %0#4x.\n", detailed_status); +} + +/* Used to dump a message to syslog during debugging */ +void i2o_dump_message(struct i2o_message *m) +{ + u32 *msg = (u32 *)m; +#ifdef DRIVERDEBUG + int i; + printk(KERN_INFO "Dumping I2O message size %d @ %p\n", + msg[0]>>16&0xffff, msg); + for(i = 0; i < ((msg[0]>>16)&0xffff); i++) + printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]); +#endif +} + +/** + * i2o_report_controller_unit - print information about a tid + * @c: controller + * @d: device + * + * Dump an information block associated with a given unit (TID). The + * tables are read and a block of text is output to printk that is + * formatted intended for the user. + */ + +void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d) +{ + char buf[64]; + char str[22]; + int ret; + + if(verbose==0) + return; + + printk(KERN_INFO "Target ID %03x.\n", d->lct_data.tid); + if((ret=i2o_parm_field_get(d, 0xF100, 3, buf, 16))>=0) { + buf[16]=0; + printk(KERN_INFO " Vendor: %s\n", buf); + } + if((ret=i2o_parm_field_get(d, 0xF100, 4, buf, 16))>=0) { + buf[16]=0; + printk(KERN_INFO " Device: %s\n", buf); + } + if(i2o_parm_field_get(d, 0xF100, 5, buf, 16)>=0) { + buf[16]=0; + printk(KERN_INFO " Description: %s\n", buf); + } + if((ret=i2o_parm_field_get(d, 0xF100, 6, buf, 8))>=0) { + buf[8]=0; + printk(KERN_INFO " Rev: %s\n", buf); + } + + printk(KERN_INFO " Class: "); + //sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id)); + printk("%s\n", str); + + printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class); + printk(KERN_INFO " Flags: "); + + if(d->lct_data.device_flags&(1<<0)) + printk("C"); // ConfigDialog requested + if(d->lct_data.device_flags&(1<<1)) + printk("U"); // Multi-user capable + if(!(d->lct_data.device_flags&(1<<4))) + printk("P"); // Peer service enabled! + if(!(d->lct_data.device_flags&(1<<5))) + printk("M"); // Mgmt service enabled! + printk("\n"); +} + +/* +MODULE_PARM(verbose, "i"); +MODULE_PARM_DESC(verbose, "Verbose diagnostics"); +*/ +/* + * Used for error reporting/debugging purposes. + * Following fail status are common to all classes. + * The preserved message must be handled in the reply handler. + */ +void i2o_report_fail_status(u8 req_status, u32* msg) +{ + static char *FAIL_STATUS[] = { + "0x80", /* not used */ + "SERVICE_SUSPENDED", /* 0x81 */ + "SERVICE_TERMINATED", /* 0x82 */ + "CONGESTION", + "FAILURE", + "STATE_ERROR", + "TIME_OUT", + "ROUTING_FAILURE", + "INVALID_VERSION", + "INVALID_OFFSET", + "INVALID_MSG_FLAGS", + "FRAME_TOO_SMALL", + "FRAME_TOO_LARGE", + "INVALID_TARGET_ID", + "INVALID_INITIATOR_ID", + "INVALID_INITIATOR_CONTEX", /* 0x8F */ + "UNKNOWN_FAILURE" /* 0xFF */ + }; + + if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) + printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status); + else + printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]); + + /* Dump some details */ + + printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n", + (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); + printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n", + (msg[4] >> 8) & 0xFF, msg[4] & 0xFF); + printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", + msg[5] >> 16, msg[5] & 0xFFF); + + printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF); + if (msg[4] & (1<<16)) + printk("(FormatError), " + "this msg can never be delivered/processed.\n"); + if (msg[4] & (1<<17)) + printk("(PathError), " + "this msg can no longer be delivered/processed.\n"); + if (msg[4] & (1<<18)) + printk("(PathState), " + "the system state does not allow delivery.\n"); + if (msg[4] & (1<<19)) + printk("(Congestion), resources temporarily not available;" + "do not retry immediately.\n"); +} + +/* + * Used for error reporting/debugging purposes. + * Following reply status are common to all classes. + */ +void i2o_report_common_status(u8 req_status) +{ + static char *REPLY_STATUS[] = { + "SUCCESS", + "ABORT_DIRTY", + "ABORT_NO_DATA_TRANSFER", + "ABORT_PARTIAL_TRANSFER", + "ERROR_DIRTY", + "ERROR_NO_DATA_TRANSFER", + "ERROR_PARTIAL_TRANSFER", + "PROCESS_ABORT_DIRTY", + "PROCESS_ABORT_NO_DATA_TRANSFER", + "PROCESS_ABORT_PARTIAL_TRANSFER", + "TRANSACTION_ERROR", + "PROGRESS_REPORT" + }; + + if (req_status >= ARRAY_SIZE(REPLY_STATUS)) + printk("RequestStatus = %0#2x", req_status); + else + printk("%s", REPLY_STATUS[req_status]); +} + +/* + * Used for error reporting/debugging purposes. + * Following detailed status are valid for executive class, + * utility class, DDM class and for transaction error replies. + */ +static void i2o_report_common_dsc(u16 detailed_status) +{ + static char *COMMON_DSC[] = { + "SUCCESS", + "0x01", // not used + "BAD_KEY", + "TCL_ERROR", + "REPLY_BUFFER_FULL", + "NO_SUCH_PAGE", + "INSUFFICIENT_RESOURCE_SOFT", + "INSUFFICIENT_RESOURCE_HARD", + "0x08", // not used + "CHAIN_BUFFER_TOO_LARGE", + "UNSUPPORTED_FUNCTION", + "DEVICE_LOCKED", + "DEVICE_RESET", + "INAPPROPRIATE_FUNCTION", + "INVALID_INITIATOR_ADDRESS", + "INVALID_MESSAGE_FLAGS", + "INVALID_OFFSET", + "INVALID_PARAMETER", + "INVALID_REQUEST", + "INVALID_TARGET_ADDRESS", + "MESSAGE_TOO_LARGE", + "MESSAGE_TOO_SMALL", + "MISSING_PARAMETER", + "TIMEOUT", + "UNKNOWN_ERROR", + "UNKNOWN_FUNCTION", + "UNSUPPORTED_VERSION", + "DEVICE_BUSY", + "DEVICE_NOT_AVAILABLE" + }; + + if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) + printk(" / DetailedStatus = %0#4x.\n", detailed_status); + else + printk(" / %s.\n", COMMON_DSC[detailed_status]); +} + +/* + * Used for error reporting/debugging purposes + */ +static void i2o_report_util_cmd(u8 cmd) +{ + switch (cmd) { + case I2O_CMD_UTIL_NOP: + printk("UTIL_NOP, "); + break; + case I2O_CMD_UTIL_ABORT: + printk("UTIL_ABORT, "); + break; + case I2O_CMD_UTIL_CLAIM: + printk("UTIL_CLAIM, "); + break; + case I2O_CMD_UTIL_RELEASE: + printk("UTIL_CLAIM_RELEASE, "); + break; + case I2O_CMD_UTIL_CONFIG_DIALOG: + printk("UTIL_CONFIG_DIALOG, "); + break; + case I2O_CMD_UTIL_DEVICE_RESERVE: + printk("UTIL_DEVICE_RESERVE, "); + break; + case I2O_CMD_UTIL_DEVICE_RELEASE: + printk("UTIL_DEVICE_RELEASE, "); + break; + case I2O_CMD_UTIL_EVT_ACK: + printk("UTIL_EVENT_ACKNOWLEDGE, "); + break; + case I2O_CMD_UTIL_EVT_REGISTER: + printk("UTIL_EVENT_REGISTER, "); + break; + case I2O_CMD_UTIL_LOCK: + printk("UTIL_LOCK, "); + break; + case I2O_CMD_UTIL_LOCK_RELEASE: + printk("UTIL_LOCK_RELEASE, "); + break; + case I2O_CMD_UTIL_PARAMS_GET: + printk("UTIL_PARAMS_GET, "); + break; + case I2O_CMD_UTIL_PARAMS_SET: + printk("UTIL_PARAMS_SET, "); + break; + case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: + printk("UTIL_REPLY_FAULT_NOTIFY, "); + break; + default: + printk("Cmd = %0#2x, ",cmd); + } +} + +/* + * Used for error reporting/debugging purposes + */ +static void i2o_report_exec_cmd(u8 cmd) +{ + switch (cmd) { + case I2O_CMD_ADAPTER_ASSIGN: + printk("EXEC_ADAPTER_ASSIGN, "); + break; + case I2O_CMD_ADAPTER_READ: + printk("EXEC_ADAPTER_READ, "); + break; + case I2O_CMD_ADAPTER_RELEASE: + printk("EXEC_ADAPTER_RELEASE, "); + break; + case I2O_CMD_BIOS_INFO_SET: + printk("EXEC_BIOS_INFO_SET, "); + break; + case I2O_CMD_BOOT_DEVICE_SET: + printk("EXEC_BOOT_DEVICE_SET, "); + break; + case I2O_CMD_CONFIG_VALIDATE: + printk("EXEC_CONFIG_VALIDATE, "); + break; + case I2O_CMD_CONN_SETUP: + printk("EXEC_CONN_SETUP, "); + break; + case I2O_CMD_DDM_DESTROY: + printk("EXEC_DDM_DESTROY, "); + break; + case I2O_CMD_DDM_ENABLE: + printk("EXEC_DDM_ENABLE, "); + break; + case I2O_CMD_DDM_QUIESCE: + printk("EXEC_DDM_QUIESCE, "); + break; + case I2O_CMD_DDM_RESET: + printk("EXEC_DDM_RESET, "); + break; + case I2O_CMD_DDM_SUSPEND: + printk("EXEC_DDM_SUSPEND, "); + break; + case I2O_CMD_DEVICE_ASSIGN: + printk("EXEC_DEVICE_ASSIGN, "); + break; + case I2O_CMD_DEVICE_RELEASE: + printk("EXEC_DEVICE_RELEASE, "); + break; + case I2O_CMD_HRT_GET: + printk("EXEC_HRT_GET, "); + break; + case I2O_CMD_ADAPTER_CLEAR: + printk("EXEC_IOP_CLEAR, "); + break; + case I2O_CMD_ADAPTER_CONNECT: + printk("EXEC_IOP_CONNECT, "); + break; + case I2O_CMD_ADAPTER_RESET: + printk("EXEC_IOP_RESET, "); + break; + case I2O_CMD_LCT_NOTIFY: + printk("EXEC_LCT_NOTIFY, "); + break; + case I2O_CMD_OUTBOUND_INIT: + printk("EXEC_OUTBOUND_INIT, "); + break; + case I2O_CMD_PATH_ENABLE: + printk("EXEC_PATH_ENABLE, "); + break; + case I2O_CMD_PATH_QUIESCE: + printk("EXEC_PATH_QUIESCE, "); + break; + case I2O_CMD_PATH_RESET: + printk("EXEC_PATH_RESET, "); + break; + case I2O_CMD_STATIC_MF_CREATE: + printk("EXEC_STATIC_MF_CREATE, "); + break; + case I2O_CMD_STATIC_MF_RELEASE: + printk("EXEC_STATIC_MF_RELEASE, "); + break; + case I2O_CMD_STATUS_GET: + printk("EXEC_STATUS_GET, "); + break; + case I2O_CMD_SW_DOWNLOAD: + printk("EXEC_SW_DOWNLOAD, "); + break; + case I2O_CMD_SW_UPLOAD: + printk("EXEC_SW_UPLOAD, "); + break; + case I2O_CMD_SW_REMOVE: + printk("EXEC_SW_REMOVE, "); + break; + case I2O_CMD_SYS_ENABLE: + printk("EXEC_SYS_ENABLE, "); + break; + case I2O_CMD_SYS_MODIFY: + printk("EXEC_SYS_MODIFY, "); + break; + case I2O_CMD_SYS_QUIESCE: + printk("EXEC_SYS_QUIESCE, "); + break; + case I2O_CMD_SYS_TAB_SET: + printk("EXEC_SYS_TAB_SET, "); + break; + default: + printk("Cmd = %#02x, ",cmd); + } +} + +void i2o_debug_state(struct i2o_controller *c) +{ + printk(KERN_INFO "%s: State = ", c->name); + switch (((i2o_status_block *)c->status_block.virt)->iop_state) { + case 0x01: + printk("INIT\n"); + break; + case 0x02: + printk("RESET\n"); + break; + case 0x04: + printk("HOLD\n"); + break; + case 0x05: + printk("READY\n"); + break; + case 0x08: + printk("OPERATIONAL\n"); + break; + case 0x10: + printk("FAILED\n"); + break; + case 0x11: + printk("FAULTED\n"); + break; + default: + printk("%x (unknown !!)\n", ((i2o_status_block *)c->status_block.virt)->iop_state); + } +}; + +void i2o_systab_debug(struct i2o_sys_tbl *sys_tbl) +{ + u32 *table; + int count; + u32 size; + + table = (u32*)sys_tbl; + size = sizeof(struct i2o_sys_tbl) + sys_tbl->num_entries + * sizeof(struct i2o_sys_tbl_entry); + + for(count = 0; count < (size >>2); count++) + printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]); +} + +void i2o_dump_hrt(struct i2o_controller *c) +{ + u32 *rows=(u32*)c->hrt.virt; + u8 *p=(u8 *)c->hrt.virt; + u8 *d; + int count; + int length; + int i; + int state; + + if(p[3]!=0) + { + printk(KERN_ERR "%s: HRT table for controller is too new a version.\n", + c->name); + return; + } + + count=p[0]|(p[1]<<8); + length = p[2]; + + printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n", + c->name, count, length<<2); + + rows+=2; + + for(i=0;i>=12; + if(state&(1<<0)) + printk("H"); /* Hidden */ + if(state&(1<<2)) + { + printk("P"); /* Present */ + if(state&(1<<1)) + printk("C"); /* Controlled */ + } + if(state>9) + printk("*"); /* Hard */ + + printk("]:"); + + switch(p[3]&0xFFFF) + { + case 0: + /* Adapter private bus - easy */ + printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", + p[2], d[1]<<8|d[0], *(u32 *)(d+4)); + break; + case 1: + /* ISA bus */ + printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", + p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4)); + break; + + case 2: /* EISA bus */ + printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X", + p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4)); + break; + + case 3: /* MCA bus */ + printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", + p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4)); + break; + + case 4: /* PCI bus */ + printk("PCI %d: Bus %d Device %d Function %d", + p[2], d[2], d[1], d[0]); + break; + + case 0x80: /* Other */ + default: + printk("Unsupported bus type."); + break; + } + printk("\n"); + rows+=length; + } +} + +EXPORT_SYMBOL(i2o_dump_status_block); +EXPORT_SYMBOL(i2o_dump_message); diff -puN /dev/null drivers/message/i2o/device.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/device.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,700 @@ +/* + * Functions to handle I2O devices + * + * Copyright (C) 2004 Markus Lidel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Fixes/additions: + * Markus Lidel + * initial version. + */ + +#include +#include + + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +/* Exec OSM functions */ +extern struct bus_type i2o_bus_type; + +/* Module internal used functions */ +int i2o_device_parse_lct(struct i2o_controller *); +void i2o_device_remove(struct i2o_device *); + +/* Internal used functions */ +static inline int i2o_device_issue_claim(struct i2o_device *, u32, u32); + +static struct i2o_device *i2o_device_add(struct i2o_controller *, + i2o_lct_entry *); +static void i2o_device_release(struct device *); +static struct i2o_device *i2o_device_alloc(void); + +/* I2O device class functions */ +static int i2o_device_class_add(struct class_device *); +static void i2o_device_class_release(struct class_device *); + +static ssize_t i2o_device_class_show_class_id(struct class_device *, char *); +static ssize_t i2o_device_class_show_tid(struct class_device *, char *); + +int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); + + +/* I2O device class */ +static struct class i2o_device_class = { + .name = "i2o_device", + .release= i2o_device_class_release +}; + +/* I2O device class interface */ +static struct class_interface i2o_device_class_interface = { + .class = &i2o_device_class, + .add = i2o_device_class_add +}; + +/* I2O device class attributes */ +static CLASS_DEVICE_ATTR(class_id, S_IRUGO,i2o_device_class_show_class_id,NULL); +static CLASS_DEVICE_ATTR(tid, S_IRUGO, i2o_device_class_show_tid, NULL); + + +/** + * i2o_device_issue_claim - claim or release a device + * @dev: I2O device to claim or release + * @cmd: claim or release command + * @type: type of claim + * + * Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent + * is set by cmd. dev is the I2O device which should be claim or + * released and the type is the claim type (see the I2O spec). + * + * Returs 0 on success or negative error code on failure. + */ +static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, + u32 type) +{ + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(cmd<<24|HOST_TID<<12|dev->lct_data.tid, &msg->head[1]); + writel(type, &msg->body[0]); + + return i2o_msg_post_wait(dev->iop, m, 60); +}; + +/** + * i2o_device_claim - claim a device for use by an OSM + * @dev: I2O device to claim + * @drv: I2O driver which wants to claim the device + * + * Do the leg work to assign a device to a given OSM. If the claim succeed + * the owner of the rimary. If the attempt fails a negative errno code + * is returned. On success zero is returned. + */ +int i2o_device_claim(struct i2o_device *dev) +{ + int rc = 0; + + down(&dev->lock); + + rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY); + if(!rc) + DBG("claim of device %d succeded\n", dev->lct_data.tid); + else + DBG("claim of device %d failed %d\n", dev->lct_data.tid, rc); + + up(&dev->lock); + + return rc; +}; + +/** + * i2o_device_claim_release - release a device that the OSM is using + * @dev: device to release + * @drv: driver which claimed the device + * + * Drop a claim by an OSM on a given I2O device. + * + * AC - some devices seem to want to refuse an unclaim until they have + * finished internal processing. It makes sense since you don't want a + * new device to go reconfiguring the entire system until you are done. + * Thus we are prepared to wait briefly. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_device_claim_release(struct i2o_device *dev) +{ + int tries; + int rc = 0; + + down(&dev->lock); + + /* + * If the controller takes a nonblocking approach to + * releases we have to sleep/poll for a few times. + */ + for(tries=0;tries<10;tries++) { + rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE, + I2O_CLAIM_PRIMARY); + if(!rc) + break; + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ); + } + + if(!rc) + DBG("claim release of device %d succeded\n", dev->lct_data.tid); + else + DBG("claim release of device %d failed %d\n", dev->lct_data.tid, + rc); + + up(&dev->lock); + + return rc; +}; + +/** + * i2o_device_alloc - Allocate a I2O device and initialize it + * + * Allocate the memory for a I2O device and initialize locks and lists + * + * Returns the allocated I2O device or a negative error code if the device + * could not be allocated. + */ +static struct i2o_device *i2o_device_alloc(void) +{ + struct i2o_device *dev; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if(!dev) + return ERR_PTR(-ENOMEM); + + memset(dev, 0, sizeof(*dev)); + + INIT_LIST_HEAD(&dev->list); + init_MUTEX(&dev->lock); + + dev->device.bus = &i2o_bus_type; + dev->device.release = &i2o_device_release; + dev->classdev.class = &i2o_device_class; + dev->classdev.dev = &dev->device; + + return dev; +}; + +/** + * i2o_device_add - allocate a new I2O device and add it to the IOP + * @iop: I2O controller where the device is on + * @entry: LCT entry of the I2O device + * + * Allocate a new I2O device and initialize it with the LCT entry. The + * device is appended to the device list of the controller. + * + * Returns a pointer to the I2O device on success or negative error code + * on failure. + */ +struct i2o_device *i2o_device_add(struct i2o_controller *c,i2o_lct_entry *entry) +{ + struct i2o_device *dev; + + dev = i2o_device_alloc(); + if(IS_ERR(dev)) { + printk(KERN_ERR "i2o: unable to allocate i2o device\n"); + return dev; + } + + dev->lct_data = *entry; + + snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, + dev->lct_data.tid); + + snprintf(dev->classdev.class_id, BUS_ID_SIZE, "%d:%03x", c->unit, + dev->lct_data.tid); + + dev->iop=c; + dev->device.parent = &c->device; + + device_register(&dev->device); + + list_add_tail(&dev->list, &c->devices); + + class_device_register(&dev->classdev); + + DBG("I2O device %s added\n", dev->device.bus_id); + + return dev; +}; + +/** + * i2o_device_remove - remove an I2O device from the I2O core + * @dev: I2O device which should be released + * + * Is used on I2O controller removal or LCT modification, when the device + * is removed from the system. Note that the device could still hang + * around until the refcount reaches 0. + */ +void i2o_device_remove(struct i2o_device *i2o_dev) +{ + class_device_unregister(&i2o_dev->classdev); + list_del(&i2o_dev->list); + device_unregister(&i2o_dev->device); +}; + +/** + * i2o_device_release - release the memory for a I2O device + * @dev: I2O device which should be released + * + * Release the allocated memory. This function is called if refcount of + * device reaches 0 automatically. + */ +static void i2o_device_release(struct device *dev) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + + DBG("Release I2O device %s\n", dev->bus_id); + + kfree(i2o_dev); +}; + +/** + * i2o_device_parse_lct - Parse a previously fetched LCT and create devices + * @c: I2O controller from which the LCT should be parsed. + * + * The Logical Configuration Table tells us what we can talk to on the + * board. For every entry we create an I2O device, which is registered in + * the I2O core. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_device_parse_lct(struct i2o_controller *c) +{ + struct i2o_device *dev, *tmp; + i2o_lct *lct; + int i; + int max; + + down(&c->lct_lock); + + if(c->lct) + kfree(c->lct); + + lct = c->dlct.virt; + + c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); + if(!c->lct) { + up(&c->lct_lock); + return -ENOMEM; + } + + if(lct->table_size * 4 > c->dlct.len) { + memcpy_fromio(c->lct, c->dlct.virt, c->dlct.len); + up(&c->lct_lock); + return -EAGAIN; + } + + memcpy_fromio(c->lct, c->dlct.virt, lct->table_size * 4); + + lct = c->lct; + + max = (lct->table_size - 3) / 9; + + DBG("LCT has %d entries (LCT size: %d)\n", max, lct->table_size); + + /* remove devices, which are not in the LCT anymore */ + list_for_each_entry_safe(dev, tmp, &c->devices, list) { + int found = 0; + + for(i = 0; i < max; i ++) { + if(lct->lct_entry[i].tid == dev->lct_data.tid) { + found = 1; + break; + } + } + + if(!found) + i2o_device_remove(dev); + } + + /* add new devices, which are new in the LCT */ + for(i = 0; i < max; i ++) { + int found = 0; + + list_for_each_entry_safe(dev, tmp, &c->devices, list) { + if(lct->lct_entry[i].tid == dev->lct_data.tid) { + found = 1; + break; + } + } + + if(!found) + i2o_device_add(c, &lct->lct_entry[i]); + } + up(&c->lct_lock); + + return 0; +}; + +/** + * i2o_device_class_add - Adds attributes to the I2O device + * @cd: I2O class device which is added to the I2O device class + * + * This function get called when a I2O device is added to the class. It + * creates the attributes for each device and creates user/parent symlink + * if necessary. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_device_class_add(struct class_device *cd) +{ + struct i2o_device *i2o_dev, *tmp; + struct i2o_controller *c; + + i2o_dev = to_i2o_device(cd->dev); + c = i2o_dev->iop; + + class_device_create_file(cd, &class_device_attr_class_id); + class_device_create_file(cd, &class_device_attr_tid); + + /* create user entries for this device */ + tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); + if(tmp) + sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, + "user"); + + /* create user entries refering to this device */ + list_for_each_entry(tmp, &c->devices, list) + if(tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "user"); + + /* create parent entries for this device */ + tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); + if(tmp) + sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, + "parent"); + + /* create parent entries refering to this device */ + list_for_each_entry(tmp, &c->devices, list) + if(tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + sysfs_create_link(&tmp->device.kobj, + &i2o_dev->device.kobj, "parent"); + + return 0; +}; + +/** + * i2o_device_class_release - Remove I2O device attributes + * @cd: I2O class device which is added to the I2O device class + * + * Removes attributes from the I2O device again. Also search each device + * on the controller for I2O devices which refert to this device as parent + * or user and remove this links also. + */ +static void i2o_device_class_release(struct class_device *cd) +{ + struct i2o_device *i2o_dev, *tmp; + struct i2o_controller *c; + + i2o_dev = to_i2o_device(cd->dev); + c = i2o_dev->iop; + + sysfs_remove_link(&i2o_dev->device.kobj, "parent"); + sysfs_remove_link(&i2o_dev->device.kobj, "user"); + + list_for_each_entry(tmp, &c->devices, list) { + if(tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) + sysfs_remove_link(&tmp->device.kobj, "parent"); + if(tmp->lct_data.user_tid == i2o_dev->lct_data.tid) + sysfs_remove_link(&tmp->device.kobj, "user"); + } +}; + +/** + * i2o_device_class_show_class_id - Displays class id of I2O device + * @cd: class device of which the class id should be displayed + * @buf: buffer into which the class id should be printed + * + * Returns the number of bytes which are printed into the buffer. + */ +static ssize_t i2o_device_class_show_class_id(struct class_device *cd,char *buf) +{ + struct i2o_device *dev = to_i2o_device(cd->dev); + + sprintf(buf, "%03x\n", dev->lct_data.class_id); + return strlen(buf) + 1; +}; + +/** + * i2o_device_class_show_tid - Displays TID of I2O device + * @cd: class device of which the TID should be displayed + * @buf: buffer into which the class id should be printed + * + * Returns the number of bytes which are printed into the buffer. + */ +static ssize_t i2o_device_class_show_tid(struct class_device *cd, char*buf) +{ + struct i2o_device *dev = to_i2o_device(cd->dev); + + sprintf(buf, "%03x\n", dev->lct_data.tid); + return strlen(buf) + 1; +}; + +/* + * Run time support routines + */ + +/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET + * + * This function can be used for all UtilParamsGet/Set operations. + * The OperationList is given in oplist-buffer, + * and results are returned in reslist-buffer. + * Note that the minimum sized reslist is 8 bytes and contains + * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. + */ + +int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, + int oplen, void *reslist, int reslen) +{ + struct i2o_message *msg; + u32 m; + u32 *res32 = (u32*)reslist; + u32 *restmp = (u32*)reslist; + int len = 0; + int i = 0; + int rc; + struct i2o_dma res; + struct i2o_controller *c = i2o_dev->iop; + struct device *dev = &c->pdev->dev; + + res.virt = NULL; + + if(i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) + return -ENOMEM; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) { + i2o_dma_free(dev, &res); + return -ETIMEDOUT; + } + + i = 0; + writel(cmd<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, &msg->head[1]); + writel(0, &msg->body[i ++]); + writel(0x4C000000 | oplen, &msg->body[i ++]); /* OperationList */ + memcpy_toio(&msg->body[i], oplist, oplen); + i += (oplen/4+(oplen%4?1:0)); + writel(0xD0000000 | res.len, &msg->body[i ++]); /* ResultList */ + writel(res.phys, &msg->body[i ++]); + + writel(I2O_MESSAGE_SIZE(i+sizeof(struct i2o_message)/4)| SGL_OFFSET_5, + &msg->head[0]); + + rc = i2o_msg_post_wait_mem(c, m, 10, &res); + + /* This only looks like a memory leak - don't "fix" it. */ + if(rc == -ETIMEDOUT) + return rc; + + memcpy_fromio(reslist, res.virt, res.len); + i2o_dma_free(dev, &res); + + /* Query failed */ + if(rc) + return rc; + /* + * Calculate number of bytes of Result LIST + * We need to loop through each Result BLOCK and grab the length + */ + restmp = res32 + 1; + len = 1; + for(i = 0; i < (res32[0]&0X0000FFFF); i++) + { + if(restmp[0]&0x00FF0000) /* BlockStatus != SUCCESS */ + { + printk(KERN_WARNING "%s - Error:\n ErrorInfoSize = 0x%02x, " + "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", + (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" + : "PARAMS_GET", + res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF); + + /* + * If this is the only request,than we return an error + */ + if((res32[0]&0x0000FFFF) == 1) + { + return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */ + } + } + len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */ + restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */ + } + return (len << 2); /* bytes used by result list */ +} + +/* + * Query one field group value or a whole scalar group. + */ +int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, + void *buf, int buflen) +{ + u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; + u8 resblk[8+buflen]; /* 8 bytes for header */ + int size; + + if (field == -1) /* whole group */ + opblk[4] = -1; + + size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, + sizeof(opblk), resblk, sizeof(resblk)); + + memcpy(buf, resblk+8, buflen); /* cut off header */ + + if(size>buflen) + return buflen; + + return size; +} + +/* + * Set a scalar group value or a whole group. + */ +int i2o_parm_field_set(struct i2o_device *i2o_dev, int group, int field, + void *buf, int buflen) +{ + u16 *opblk; + u8 resblk[8+buflen]; /* 8 bytes for header */ + int size; + + opblk = kmalloc(buflen+64, GFP_KERNEL); + if (opblk == NULL) + { + printk(KERN_ERR "i2o: no memory for operation buffer.\n"); + return -ENOMEM; + } + + opblk[0] = 1; /* operation count */ + opblk[1] = 0; /* pad */ + opblk[2] = I2O_PARAMS_FIELD_SET; + opblk[3] = group; + + if(field == -1) { /* whole group */ + opblk[4] = -1; + memcpy(opblk+5, buf, buflen); + } + else /* single field */ + { + opblk[4] = 1; + opblk[5] = field; + memcpy(opblk+6, buf, buflen); + } + + size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_SET, opblk, + 12+buflen, resblk, sizeof(resblk)); + + kfree(opblk); + if(size>buflen) + return buflen; + + return size; +} + +/* + * if oper == I2O_PARAMS_TABLE_GET, get from all rows + * if fieldcount == -1 return all fields + * ibuf and ibuflen are unused (use NULL, 0) + * else return specific fields + * ibuf contains fieldindexes + * + * if oper == I2O_PARAMS_LIST_GET, get from specific rows + * if fieldcount == -1 return all fields + * ibuf contains rowcount, keyvalues + * else return specific fields + * fieldcount is # of fieldindexes + * ibuf contains fieldindexes, rowcount, keyvalues + * + * You could also use directly function i2o_issue_params(). + */ +int i2o_parm_table_get(struct i2o_device *dev, int oper, int group, + int fieldcount, void *ibuf, int ibuflen, void *resblk, int reslen) +{ + u16 *opblk; + int size; + + size = 10 + ibuflen; + if(size % 4) + size += 4 - size % 4; + + opblk = kmalloc(size, GFP_KERNEL); + if (opblk == NULL) { + printk(KERN_ERR "i2o: no memory for query buffer.\n"); + return -ENOMEM; + } + + opblk[0] = 1; /* operation count */ + opblk[1] = 0; /* pad */ + opblk[2] = oper; + opblk[3] = group; + opblk[4] = fieldcount; + memcpy(opblk+5, ibuf, ibuflen); /* other params */ + + size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk, + size, resblk, reslen); + + kfree(opblk); + if(size>reslen) + return reslen; + + return size; +} + +/** + * i2o_device_init - Initialize I2O devices + * + * Registers the I2O device class. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_device_init(void) +{ + int rc; + + rc = class_register(&i2o_device_class); + if(rc) + return rc; + + return class_interface_register(&i2o_device_class_interface); +}; + +/** + * i2o_device_exit - I2O devices exit function + * + * Unregisters the I2O device class. + */ +void i2o_device_exit(void) +{ + class_interface_register(&i2o_device_class_interface); + class_unregister(&i2o_device_class); +}; + +EXPORT_SYMBOL(i2o_device_claim); +EXPORT_SYMBOL(i2o_device_claim_release); +EXPORT_SYMBOL(i2o_parm_field_get); +EXPORT_SYMBOL(i2o_parm_field_set); +EXPORT_SYMBOL(i2o_parm_table_get); +EXPORT_SYMBOL(i2o_parm_issue); diff -puN /dev/null drivers/message/i2o/driver.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/driver.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,290 @@ +/* + * Functions to handle I2O drivers (OSMs) and I2O bus type for sysfs + * + * Copyright (C) 2004 Markus Lidel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Fixes/additions: + * Markus Lidel + * initial version. + */ + +#include +#include +#include +#include + + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +/* Module internal used functions */ +int i2o_driver_dispatch(struct i2o_controller *, u32, struct i2o_message *); + +/* I2O bus functions */ +static int i2o_bus_match(struct device *dev, struct device_driver *drv); + +/* Init / exit functions */ +int __init i2o_driver_init(void); +void __exit i2o_driver_exit(void); + + +/* max_drivers - Maximum I2O drivers (OSMs) which could be registered */ +unsigned int i2o_max_drivers = I2O_MAX_DRIVERS; +module_param_named(max_drivers, i2o_max_drivers, uint, 0); +MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support"); + + +/* I2O bus type */ +struct bus_type i2o_bus_type = { + .name = "i2o", + .match = i2o_bus_match, +}; + +/* I2O drivers lock and array */ +static spinlock_t i2o_drivers_lock = SPIN_LOCK_UNLOCKED; +static struct i2o_driver **i2o_drivers; + + +/** + * i2o_bus_match - Tell if a I2O device class id match the class ids of + * the I2O driver (OSM) + * + * @dev: device which should be verified + * @drv: the driver to match against + * + * Used by the bus to check if the driver wants to handle the device. + * + * Returns 1 if the class ids of the driver match the class id of the + * device, otherwise 0. + */ +static int i2o_bus_match(struct device *dev, struct device_driver *drv) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + struct i2o_driver *i2o_drv = to_i2o_driver(drv); + struct i2o_class_id *ids = i2o_drv->classes; + + if(ids) + while(ids->class_id != I2O_CLASS_END) + { + if(ids->class_id == i2o_dev->lct_data.class_id) + return 1; + ids ++; + } + return 0; +}; + +/** + * i2o_driver_register - Register a I2O driver (OSM) in the I2O core + * @drv: I2O driver which should be registered + * + * Registers the OSM drv in the I2O core and creates an event queues if + * necessary. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_driver_register(struct i2o_driver *drv) +{ + int i; + int rc = 0; + unsigned long flags; + + DBG("Register driver %s\n", drv->name); + + if(drv->event) { + drv->event_queue = create_workqueue(drv->name); + if(!drv->event_queue) + { + printk(KERN_ERR "i2o: Could not initialize event queue " + "for driver %s\n", drv->name); + return -EFAULT; + } + DBG("Event queue initialized for driver %s\n", drv->name); + } + else + drv->event_queue = NULL; + + drv->driver.name = drv->name; + drv->driver.bus = &i2o_bus_type; + + spin_lock_irqsave(&i2o_drivers_lock, flags); + + for(i=0;i2o_drivers[i];i++) + if(i>=i2o_max_drivers) { + printk(KERN_ERR "i2o: too many drivers registered, " + "increase max_drivers\n"); + spin_unlock_irqrestore(&i2o_drivers_lock, flags); + return -EFAULT; + } + + drv->context = i; + i2o_drivers[i]=drv; + + spin_unlock_irqrestore(&i2o_drivers_lock, flags); + + DBG("driver %s gets context id %d\n", drv->name, drv->context); + + rc = driver_register(&drv->driver); + if(rc) + destroy_workqueue(drv->event_queue); + + return rc; +}; + +/** + * i2o_driver_unregister - Unregister a I2O driver (OSM) from the I2O core + * @drv: I2O driver which should be unregistered + * + * Unregisters the OSM drv from the I2O core and cleanup event queues if + * necessary. + */ +void i2o_driver_unregister(struct i2o_driver *drv) +{ + unsigned long flags; + + DBG("unregister driver %s\n", drv->name); + + driver_unregister(&drv->driver); + + spin_lock_irqsave(&i2o_drivers_lock, flags); + i2o_drivers[drv->context]=NULL; + spin_unlock_irqrestore(&i2o_drivers_lock, flags); + + if(drv->event_queue) { + destroy_workqueue(drv->event_queue); + drv->event_queue = NULL; + DBG("event queue removed for %s\n", drv->name); + } +}; + +/** + * i2o_driver_dispatch - dispatch an I2O reply message + * @c: I2O controller of the message + * @m: I2O message number + * @msg: I2O message to be delivered + * + * The reply is delivered to the driver from which the original message + * was. This function is only called from interrupt context. + * + * Returns 0 on success and the message should not be flushed. Returns > 0 + * on success and if the message should be flushed afterwords. Returns + * negative error code on failure (the message will be flushed too). + */ +int i2o_driver_dispatch(struct i2o_controller *c, u32 m,struct i2o_message *msg) +{ + struct i2o_driver *drv; + u32 context = readl(&msg->icntxt); + + if(likely(context < i2o_max_drivers)) { + spin_lock(&i2o_drivers_lock); + drv = i2o_drivers[context]; + spin_unlock(&i2o_drivers_lock); + + if(unlikely(!drv)) { + printk(KERN_WARNING "i2o: Spurious reply to unknown " + "driver %d\n", context); + return -EIO; + } + + if((readl(&msg->head[1])>>24) == I2O_CMD_UTIL_EVT_REGISTER) { + struct i2o_device *dev, *tmp; + struct i2o_event *evt; + u16 size; + u16 tid; + + tid = readl(&msg->head[1])&0x1fff; + + DBG("%s: event received from device %d\n", c->name,tid); + + /* cut of header from message size (in 32-bit words) */ + size = (readl(&msg->head[0])>>16)-5; + + evt = kmalloc(size*4 + sizeof(*evt), GFP_ATOMIC); + if(!evt) + return -ENOMEM; + memset(evt, 0, size*4 + sizeof(*evt)); + + evt->size = size; + memcpy_fromio(&evt->tcntxt, &msg->tcntxt, (size+2)*4); + + + list_for_each_entry_safe(dev, tmp, &c->devices, list) + if(dev->lct_data.tid == tid) { + evt->i2o_dev = dev; + break; + } + + INIT_WORK(&evt->work, (void (*)(void*))drv->event, evt); + queue_work(drv->event_queue, &evt->work); + return 1; + } + + if(likely(drv->reply)) + return drv->reply(c, m, msg); + else + DBG("%s: Reply to driver %s, but no reply function " + "defined!\n", c->name, drv->name); + return -EIO; + } else + printk(KERN_WARNING "i2o: Spurious reply to unknown driver " + "%d\n", readl(&msg->icntxt)); + return -EIO; +} + +/** + * i2o_driver_init - initialize I2O drivers (OSMs) + * + * Registers the I2O bus and allocate memory for the array of OSMs. + * + * Returns 0 on success or negative error code on failure. + */ +int __init i2o_driver_init(void) +{ + int rc = 0; + + if((i2o_max_drivers < 2) || (i2o_max_drivers > 64) || + ((i2o_max_drivers^(i2o_max_drivers-1)) != (2*i2o_max_drivers-1))) + { + printk(KERN_WARNING "i2o: max_drivers set to %d, but must be " + ">=2 and <= 64 and a power of 2\n", i2o_max_drivers); + i2o_max_drivers = I2O_MAX_DRIVERS; + } + printk(KERN_INFO "i2o: max_drivers=%d\n", i2o_max_drivers); + + i2o_drivers = kmalloc(i2o_max_drivers*sizeof(*i2o_drivers), GFP_KERNEL); + if(!i2o_drivers) + return -ENOMEM; + + memset(i2o_drivers, 0, i2o_max_drivers*sizeof(*i2o_drivers)); + + rc = bus_register(&i2o_bus_type); + + if(rc<0) + kfree(i2o_drivers); + + return rc; +}; + +/** + * i2o_driver_exit - clean up I2O drivers (OSMs) + * + * Unregisters the I2O bus and free driver array. + */ +void __exit i2o_driver_exit(void) +{ + bus_unregister(&i2o_bus_type); + kfree(i2o_drivers); +}; + +EXPORT_SYMBOL(i2o_driver_register); +EXPORT_SYMBOL(i2o_driver_unregister); diff -puN /dev/null drivers/message/i2o/exec-osm.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/exec-osm.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,532 @@ +/* + * Executive OSM + * + * Copyright (C) 1999-2002 Red Hat Software + * + * Written by Alan Cox, Building Number Three Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * A lot of the I2O message side code from this is taken from the Red + * Creek RCPCI45 adapter driver by Red Creek Communications + * + * Fixes/additions: + * Philipp Rumpf + * Juha Sievänen + * Auvo Häkkinen + * Deepak Saxena + * Boji T Kannanthanam + * Alan Cox : + * Ported to Linux 2.5. + * Markus Lidel : + * Minor fixes for 2.6. + * Markus Lidel : + * Support for sysfs included. + */ + +#include +#include + + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +/* Module internal functions from other sources */ +extern int i2o_device_parse_lct(struct i2o_controller *); + +/* Internal used functions */ +static int i2o_msg_post_wait_complete(struct i2o_controller *, u32, + struct i2o_message *); + +static int i2o_exec_probe(struct device *); +static int i2o_exec_remove(struct device *); + +static struct i2o_exec_wait *i2o_exec_wait_alloc(void); +static void i2o_exec_wait_free(struct i2o_exec_wait *); + +static int i2o_exec_reply(struct i2o_controller *, u32, struct i2o_message *); +static void i2o_exec_event(struct i2o_event *); + +static void i2o_exec_lct_modified(struct i2o_controller *); + +/* Module init and exit functions */ +int __init i2o_exec_init(void); +void __exit i2o_exec_exit(void); + + +/* global wait list for POST WAIT */ +static LIST_HEAD(i2o_exec_wait_list); + +/* Wait struct needed for POST WAIT */ +struct i2o_exec_wait { + wait_queue_head_t *wq; /* Pointer to Wait queue */ + struct i2o_dma dma; /* DMA buffers to free on failure */ + u32 tcntxt; /* transaction context from reply */ + int complete; /* 1 if reply received otherwise 0 */ + u32 m; /* message id */ + struct i2o_message *msg; /* pointer to the reply message */ + struct list_head list; /* node in global wait list */ +}; + +/* Exec OSM class handling definition */ +static struct i2o_class_id i2o_exec_class_id[] = { + { I2O_CLASS_EXECUTIVE }, + { I2O_CLASS_END } +}; + +/* Exec OSM driver struct */ +struct i2o_driver i2o_exec_driver = { + .name = "exec-osm", + .reply = i2o_exec_reply, + .event = i2o_exec_event, + .classes = i2o_exec_class_id, + .driver.probe = i2o_exec_probe, + .driver.remove = i2o_exec_remove +}; + + +/** + * i2o_msg_post_wait_mem - Post and wait a message with DMA buffers + * @c: controller + * @m: message to post + * @timeout: time in seconds to wait + * @dma: i2o_dma struct of the DMA buffer to free on failure + * + * This API allows an OSM to post a message and then be told whether or + * not the system received a successful reply. If the message times out + * then the value '-ETIMEDOUT' is returned. This is a special case. In + * this situation the message may (should) complete at an indefinite time + * in the future. When it completes it will use the memory buffer + * attached to the request. If -ETIMEDOUT is returned then the memory + * buffer must not be freed. Instead the event completion will free them + * for you. In all other cases the buffer are your problem. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long + timeout, struct i2o_dma *dma) +{ + DECLARE_WAIT_QUEUE_HEAD(wq); + DEFINE_WAIT(wait); + struct i2o_exec_wait *iwait; + static u32 tcntxt = 0x80000000; + struct i2o_message *msg = c->in_queue.virt + m; + int rc = 0; + + iwait = i2o_exec_wait_alloc(); + if(!iwait) + return -ENOMEM; + + if(tcntxt == 0xffffffff) + tcntxt = 0x80000000; + + + if(dma) + iwait->dma = *dma; + + /* + * Fill in the message initiator context and transaction context. + * We will only use transaction contexts >= 0x80000000 for POST WAIT, + * so we could find a POST WAIT reply easier in the reply handler. + */ + writel(i2o_exec_driver.context, &msg->icntxt); + iwait->tcntxt = tcntxt ++; + writel(iwait->tcntxt, &msg->tcntxt); + + /* + * Post the message to the controller. At some point later it will + * return. If we time out before it returns then complete will be zero. + */ + i2o_msg_post(c,m); + + if(!iwait->complete) { + iwait->wq = &wq; + /* + * we add elements add the head, because if a entry in the list + * will never be removed, we have to iterate over it every time + */ + list_add(&iwait->list, &i2o_exec_wait_list); + + prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); + + if(!iwait->complete) + schedule_timeout(timeout * HZ); + + finish_wait(&wq, &wait); + + iwait->wq = NULL; + } + + barrier(); + + if(iwait->complete) { + if(readl(&iwait->msg->body[0]) >> 24) + rc = readl(&iwait->msg->body[0]) & 0xff; + i2o_flush_reply(c, iwait->m); + i2o_exec_wait_free(iwait); + } else { + /* + * We cannot remove it now. This is important. When it does + * terminate (which it must do if the controller has not + * died...) then it will otherwise scribble on stuff. + * + * FIXME: try abort message + */ + if(dma) + dma->virt = NULL; + + rc = -ETIMEDOUT; + } + + return rc; +}; + +/** + * i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP + * @c: I2O controller which answers + * @m: message id + * @msg: pointer to the I2O reply message + * + * This function is called in interrupt context only. If the reply reached + * before the timeout, the i2o_exec_wait struct is filled with the message + * and the task will be waked up. The task is now responsible for returning + * the message m back to the controller! If the message reaches us after + * the timeout clean up the i2o_exec_wait struct (including allocated + * DMA buffer). + * + * Return 0 on success and if the message m should not be given back to the + * I2O controller, or >0 on success and if the message should be given back + * afterwords. Returns negative error code on failure. In this case the + * message must also be given back to the controller. + */ +static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, + struct i2o_message *msg) +{ + struct i2o_exec_wait *wait, *tmp; + static spinlock_t lock = SPIN_LOCK_UNLOCKED; + int rc = 1; + u32 context; + + context = readl(&msg->tcntxt); + + /* + * We need to search through the i2o_exec_wait_list to see if the given + * message is still outstanding. If not, it means that the IOP took + * longer to respond to the message than we had allowed and timer has + * already expired. Not much we can do about that except log it for + * debug purposes, increase timeout, and recompile. + */ + spin_lock(&lock); + list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { + if(wait->tcntxt == context) { + list_del(&wait->list); + + wait->m = m; + wait->msg = msg; + wait->complete = 1; + + barrier(); + + if(wait->wq) { + wake_up_interruptible(wait->wq); + rc = 0; + } else { + struct device *dev; + + dev = &c->pdev->dev; + + DBG("timedout reply received!\n"); + i2o_dma_free(dev, &wait->dma); + i2o_exec_wait_free(wait); + rc = -1; + } + + spin_unlock(&lock); + + return rc; + } + } + + spin_unlock(&lock); + + DBG("i2o: Bogus reply in POST WAIT (tr-context: %08x)!\n", context); + + return -1; +}; + +/** + * i2o_exec_probe - Called if a new I2O device (executive class) appears + * @dev: I2O device which should be probed + * + * Registers event notification for every event from Executive device. The + * return is always 0, because we want all devices of class Executive. + * + * Returns 0 on success. + */ +static int i2o_exec_probe(struct device *dev) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + + i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); + + i2o_dev->iop->exec = i2o_dev; + + return 0; +}; + +/** + * i2o_exec_remove - Called on I2O device removal + * @dev: I2O device which was removed + * + * Unregisters event notification from Executive I2O device. + * + * Returns 0 on success. + */ +static int i2o_exec_remove(struct device *dev) +{ + i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); + + return 0; +}; + +/** + * i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it + * + * Allocate the i2o_exec_wait struct and initialize the wait. + * + * Returns i2o_exec_wait pointer on success or negative error code on + * failure. + */ +static struct i2o_exec_wait *i2o_exec_wait_alloc(void) +{ + struct i2o_exec_wait *wait; + + wait = kmalloc(sizeof(*wait), GFP_KERNEL); + if(!wait) + return ERR_PTR(-ENOMEM); + + memset(wait, 0, sizeof(*wait)); + + INIT_LIST_HEAD(&wait->list); + + return wait; +}; + +/** + * i2o_exec_wait_free - Free a i2o_exec_wait struct + * @i2o_exec_wait: I2O wait data which should be cleaned up + */ +static void i2o_exec_wait_free(struct i2o_exec_wait *wait) +{ + kfree(wait); +}; + +/** + * i2o_exec_reply - I2O Executive reply handler + * @c: I2O controller from which the reply comes + * @m: message id + * @msg: pointer to the I2O reply message + * + * This function is always called from interrupt context. If a POST WAIT + * reply was received, pass it to the complete function. If a LCT NOTIFY + * reply was received, a new event is created to handle the update. + * + * Returns 0 on success and if the reply should not be flushed or > 0 + * on success and if the reply should be flushed. Returns negative error + * code on failure and if the reply should be flushed. + */ +static int i2o_exec_reply(struct i2o_controller *c, u32 m, + struct i2o_message *msg) +{ + if (readl(&msg->head[0]) & MSG_FAIL) { // Fail bit is set + struct i2o_message *pmsg; /* preserved message */ + u32 pm; + + pm = readl(&msg->body[3]); + + pmsg = c->in_queue.virt + pm; + + i2o_report_status(KERN_INFO, "i2o_core", msg); + + /* Release the preserved msg by resubmitting it as a NOP */ + i2o_msg_nop(c, pm); + + /* If reply to i2o_post_wait failed, return causes a timeout */ + return -1; + } + + if(readl(&msg->tcntxt) & 0x80000000) + return i2o_msg_post_wait_complete(c, m, msg); + + if((readl(&msg->head[1])>>24) == I2O_CMD_LCT_NOTIFY) { + struct work_struct *work; + + DBG("%s: LCT notify received\n", c->name); + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if(!work) + return -ENOMEM; + + INIT_WORK(work, (void (*)(void *))i2o_exec_lct_modified, c); + queue_work(i2o_exec_driver.event_queue, work); + return 1; + } + + /* + * If this happens, we want to dump the message to the syslog so + * it can be sent back to the card manufacturer by the end user + * to aid in debugging. + * + */ + printk(KERN_WARNING "%s: Unsolicited message reply sent to core!" + "Message dumped to syslog\n", c->name); + i2o_dump_message(msg); + + return -EFAULT; +} + +/** + * i2o_exec_event - Event handling function + * @evt: Event which occurs + * + * Handles events send by the Executive device. At the moment does not do + * anything useful. + */ +static void i2o_exec_event(struct i2o_event *evt) +{ + printk(KERN_INFO "Event received from device: %d\n", + evt->i2o_dev->lct_data.tid); + kfree(evt); +}; + +/** + * i2o_exec_lct_get - Get the IOP's Logical Configuration Table + * @c: I2O controller from which the LCT should be fetched + * + * Send a LCT NOTIFY request to the controller, and wait + * I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is + * to large, retry it. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_exec_lct_get(struct i2o_controller *c) +{ + struct i2o_message *msg; + u32 m; + int i = 0; + int rc = -EAGAIN; + + for(i = 1; i <= I2O_LCT_GET_TRIES; i ++) { + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6, &msg->head[0]); + writel(I2O_CMD_LCT_NOTIFY<<24|HOST_TID<<12|ADAPTER_TID, + &msg->head[1]); + writel(0xffffffff, &msg->body[0]); + writel(0x00000000, &msg->body[1]); + writel(0xd0000000|c->dlct.len, &msg->body[2]); + writel(c->dlct.phys, &msg->body[3]); + + rc=i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); + if(rc<0) + break; + + rc = i2o_device_parse_lct(c); + if(rc != -EAGAIN) + break; + } + + return rc; +} + +/** + * i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request + * @c: I2O controller to which the request should be send + * @change_ind: change indicator + * + * This function sends a LCT NOTIFY request to the I2O controller with + * the change indicator change_ind. If the change_ind == 0 the controller + * replies immediately after the request. If change_ind > 0 the reply is + * send after change indicator of the LCT is > change_ind. + */ +int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) +{ + i2o_status_block *sb = c->status_block.virt; + struct device *dev; + struct i2o_message *msg; + u32 m; + + dev = &c->pdev->dev; + + if(i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) + return -ENOMEM; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6, &msg->head[0]); + writel(I2O_CMD_LCT_NOTIFY<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + writel(i2o_exec_driver.context, &msg->icntxt); + writel(0, &msg->tcntxt); /* FIXME */ + writel(0xffffffff, &msg->body[0]); + writel(change_ind, &msg->body[1]); + writel(0xd0000000|c->dlct.len, &msg->body[2]); + writel(c->dlct.phys, &msg->body[3]); + + i2o_msg_post(c, m); + + return 0; +}; + +/** + * i2o_exec_lct_modified - Called on LCT NOTIFY reply + * @c: I2O controller on which the LCT has modified + * + * This function handles asynchronus LCT NOTIFY replies. It parses the + * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY + * again. + */ +static void i2o_exec_lct_modified(struct i2o_controller *c) +{ + if(i2o_device_parse_lct(c) == -EAGAIN) + i2o_exec_lct_notify(c, 0); +}; + +/** + * i2o_exec_init - Registers the Exec OSM + * + * Registers the Exec OSM in the I2O core. + * + * Returns 0 on success or negative error code on failure. + */ +int __init i2o_exec_init(void) +{ + return i2o_driver_register(&i2o_exec_driver); +}; + +/** + * i2o_exec_exit - Removes the Exec OSM + * + * Unregisters the Exec OSM from the I2O core. + */ +void __exit i2o_exec_exit(void) +{ + i2o_driver_unregister(&i2o_exec_driver); +}; + + +EXPORT_SYMBOL(i2o_msg_post_wait_mem); +EXPORT_SYMBOL(i2o_exec_lct_get); +EXPORT_SYMBOL(i2o_exec_lct_notify); diff -L drivers/message/i2o/i2o_block.c -puN drivers/message/i2o/i2o_block.c~i2o-build_99 /dev/null --- 25/drivers/message/i2o/i2o_block.c +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,1693 +0,0 @@ -/* - * I2O Random Block Storage Class OSM - * - * (C) Copyright 1999-2002 Red Hat - * - * Written by Alan Cox, Building Number Three Ltd - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * For the purpose of avoiding doubt the preferred form of the work - * for making modifications shall be a standards compliant form such - * gzipped tar and not one requiring a proprietary or patent encumbered - * tool to unpack. - * - * This is a beta test release. Most of the good code was taken - * from the nbd driver by Pavel Machek, who in turn took some of it - * from loop.c. Isn't free software great for reusability 8) - * - * Fixes/additions: - * Steve Ralston: - * Multiple device handling error fixes, - * Added a queue depth. - * Alan Cox: - * FC920 has an rmw bug. Dont or in the end marker. - * Removed queue walk, fixed for 64bitness. - * Rewrote much of the code over time - * Added indirect block lists - * Handle 64K limits on many controllers - * Don't use indirects on the Promise (breaks) - * Heavily chop down the queue depths - * Deepak Saxena: - * Independent queues per IOP - * Support for dynamic device creation/deletion - * Code cleanup - * Support for larger I/Os through merge* functions - * (taken from DAC960 driver) - * Boji T Kannanthanam: - * Set the I2O Block devices to be detected in increasing - * order of TIDs during boot. - * Search and set the I2O block device that we boot off from as - * the first device to be claimed (as /dev/i2o/hda) - * Properly attach/detach I2O gendisk structure from the system - * gendisk list. The I2O block devices now appear in - * /proc/partitions. - * Markus Lidel : - * Minor bugfixes for 2.6. - * - * To do: - * Serial number scanning to find duplicates for FC multipathing - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -#define MAJOR_NR I2O_MAJOR - -#define MAX_I2OB 16 - -#define MAX_I2OB_DEPTH 8 -#define MAX_I2OB_RETRIES 4 - -//#define DRIVERDEBUG -#ifdef DRIVERDEBUG -#define DEBUG( s ) printk( s ) -#else -#define DEBUG( s ) -#endif - -/* - * Events that this OSM is interested in - */ -#define I2OB_EVENT_MASK (I2O_EVT_IND_BSA_VOLUME_LOAD | \ - I2O_EVT_IND_BSA_VOLUME_UNLOAD | \ - I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ | \ - I2O_EVT_IND_BSA_CAPACITY_CHANGE | \ - I2O_EVT_IND_BSA_SCSI_SMART ) - - -/* - * Some of these can be made smaller later - */ - -static int i2ob_context; -static struct block_device_operations i2ob_fops; - -/* - * I2O Block device descriptor - */ -struct i2ob_device -{ - struct i2o_controller *controller; - struct i2o_device *i2odev; - int unit; - int tid; - int flags; - int refcnt; - struct request *head, *tail; - request_queue_t *req_queue; - int max_segments; - int max_direct; /* Not yet used properly */ - int done_flag; - int depth; - int rcache; - int wcache; - int power; - int index; - int media_change_flag; - u32 max_sectors; - struct gendisk *gd; -}; - -/* - * FIXME: - * We should cache align these to avoid ping-ponging lines on SMP - * boxes under heavy I/O load... - */ - -struct i2ob_request -{ - struct i2ob_request *next; - struct request *req; - int num; - int sg_dma_direction; - int sg_nents; - struct scatterlist sg_table[16]; -}; - -/* - * Per IOP request queue information - * - * We have a separate request_queue_t per IOP so that a heavilly - * loaded I2O block device on an IOP does not starve block devices - * across all I2O controllers. - * - */ -struct i2ob_iop_queue -{ - unsigned int queue_depth; - struct i2ob_request request_queue[MAX_I2OB_DEPTH]; - struct i2ob_request *i2ob_qhead; - request_queue_t *req_queue; - spinlock_t lock; -}; -static struct i2ob_iop_queue *i2ob_queues[MAX_I2O_CONTROLLERS]; - -/* - * Each I2O disk is one of these. - */ - -static struct i2ob_device i2ob_dev[MAX_I2OB]; -static int i2ob_dev_count = 0; - -/* - * Mutex and spin lock for event handling synchronization - * evt_msg contains the last event. - */ -static DECLARE_MUTEX_LOCKED(i2ob_evt_sem); -static DECLARE_COMPLETION(i2ob_thread_dead); -static spinlock_t i2ob_evt_lock = SPIN_LOCK_UNLOCKED; -static u32 evt_msg[MSG_FRAME_SIZE]; - -static void i2o_block_reply(struct i2o_handler *, struct i2o_controller *, - struct i2o_message *); -static void i2ob_new_device(struct i2o_controller *, struct i2o_device *); -static void i2ob_del_device(struct i2o_controller *, struct i2o_device *); -static void i2ob_reboot_event(void); -static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int); -static void i2ob_end_request(struct request *); -static void i2ob_request(request_queue_t *); -static int i2ob_init_iop(unsigned int); -static int i2ob_query_device(struct i2ob_device *, int, int, void*, int); -static int i2ob_evt(void *); - -static int evt_pid = 0; -static int evt_running = 0; -static int scan_unit = 0; - -/* - * I2O OSM registration structure...keeps getting bigger and bigger :) - */ -static struct i2o_handler i2o_block_handler = -{ - i2o_block_reply, - i2ob_new_device, - i2ob_del_device, - i2ob_reboot_event, - "I2O Block OSM", - 0, - I2O_CLASS_RANDOM_BLOCK_STORAGE -}; - -/** - * i2ob_get - Get an I2O message - * @dev: I2O block device - * - * Get a message from the FIFO used for this block device. The message is returned - * or the I2O 'no message' value of 0xFFFFFFFF if nothing is available. - */ - -static u32 i2ob_get(struct i2ob_device *dev) -{ - struct i2o_controller *c=dev->controller; - return I2O_POST_READ32(c); -} - -static int i2ob_build_sglist(struct i2ob_device *dev, struct i2ob_request *ireq) -{ - struct scatterlist *sg = ireq->sg_table; - int nents; - - nents = blk_rq_map_sg(dev->req_queue, ireq->req, ireq->sg_table); - - if (rq_data_dir(ireq->req) == READ) - ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; - else - ireq->sg_dma_direction = PCI_DMA_TODEVICE; - - ireq->sg_nents = pci_map_sg(dev->controller->pdev, sg, nents, ireq->sg_dma_direction); - return ireq->sg_nents; -} - -void i2ob_free_sglist(struct i2ob_device *dev, struct i2ob_request *ireq) -{ - struct pci_dev *pdev = dev->controller->pdev; - struct scatterlist *sg = ireq->sg_table; - int nents = ireq->sg_nents; - pci_unmap_sg(pdev, sg, nents, ireq->sg_dma_direction); -} - -/** - * i2ob_send - Turn a request into a message and send it - * @m: Message offset - * @dev: I2O device - * @ireq: Request structure - * @unit: Device identity - * - * Generate an I2O BSAREAD request. This interface function is called for devices that - * appear to explode when they are fed indirect chain pointers (notably right now this - * appears to afflict Promise hardwre, so be careful what you feed the hardware - * - * No cleanup is done by this interface. It is done on the interrupt side when the - * reply arrives - */ - -static int i2ob_send(u32 m, struct i2ob_device *dev, struct i2ob_request *ireq, int unit) -{ - struct i2o_controller *c = dev->controller; - int tid = dev->tid; - void *msg; - void *mptr; - u64 offset; - struct request *req = ireq->req; - int count = req->nr_sectors<<9; - struct scatterlist *sg; - int sgnum; - int i; - - // printk(KERN_INFO "i2ob_send called\n"); - /* Map the message to a virtual address */ - msg = c->msg_virt + m; - - sgnum = i2ob_build_sglist(dev, ireq); - - /* FIXME: if we have no resources how should we get out of this */ - if(sgnum == 0) - BUG(); - - /* - * Build the message based on the request. - */ - i2o_raw_writel(i2ob_context|(unit<<8), msg+8); - i2o_raw_writel(ireq->num, msg+12); - i2o_raw_writel(req->nr_sectors << 9, msg+20); - - /* - * Mask out partitions from now on - */ - - /* This can be optimised later - just want to be sure its right for - starters */ - offset = ((u64)req->sector) << 9; - i2o_raw_writel( offset & 0xFFFFFFFF, msg+24); - i2o_raw_writel(offset>>32, msg+28); - mptr=msg+32; - - sg = ireq->sg_table; - if(rq_data_dir(req) == READ) - { - DEBUG("READ\n"); - i2o_raw_writel(I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid, msg+4); - for(i = sgnum; i > 0; i--) - { - if(i != 1) - i2o_raw_writel(0x10000000|sg_dma_len(sg), mptr); - else - i2o_raw_writel(0xD0000000|sg_dma_len(sg), mptr); - i2o_raw_writel(sg_dma_address(sg), mptr+4); - mptr += 8; - count -= sg_dma_len(sg); - sg++; - } - switch(dev->rcache) - { - case CACHE_NULL: - i2o_raw_writel(0, msg+16);break; - case CACHE_PREFETCH: - i2o_raw_writel(0x201F0008, msg+16);break; - case CACHE_SMARTFETCH: - if(req->nr_sectors > 16) - i2o_raw_writel(0x201F0008, msg+16); - else - i2o_raw_writel(0x001F0000, msg+16); - break; - } - -// printk("Reading %d entries %d bytes.\n", -// mptr-msg-8, req->nr_sectors<<9); - } - else if(rq_data_dir(req) == WRITE) - { - DEBUG("WRITE\n"); - i2o_raw_writel(I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid, msg+4); - for(i = sgnum; i > 0; i--) - { - if(i != 1) - i2o_raw_writel(0x14000000|sg_dma_len(sg), mptr); - else - i2o_raw_writel(0xD4000000|sg_dma_len(sg), mptr); - i2o_raw_writel(sg_dma_address(sg), mptr+4); - mptr += 8; - count -= sg_dma_len(sg); - sg++; - } - - switch(dev->wcache) - { - case CACHE_NULL: - i2o_raw_writel(0, msg+16);break; - case CACHE_WRITETHROUGH: - i2o_raw_writel(0x001F0008, msg+16);break; - case CACHE_WRITEBACK: - i2o_raw_writel(0x001F0010, msg+16);break; - case CACHE_SMARTBACK: - if(req->nr_sectors > 16) - i2o_raw_writel(0x001F0004, msg+16); - else - i2o_raw_writel(0x001F0010, msg+16); - break; - case CACHE_SMARTTHROUGH: - if(req->nr_sectors > 16) - i2o_raw_writel(0x001F0004, msg+16); - else - i2o_raw_writel(0x001F0010, msg+16); - } - -// printk("Writing %d entries %d bytes.\n", -// mptr-msg-8, req->nr_sectors<<9); - } - i2o_raw_writel(I2O_MESSAGE_SIZE(mptr-msg)>>2 | SGL_OFFSET_8, msg); - - if(count != 0) - { - printk(KERN_ERR "Request count botched by %d.\n", count); - } - - i2o_post_message(c,m); - i2ob_queues[c->unit]->queue_depth ++; - - return 0; -} - -/* - * Remove a request from the _locked_ request list. We update both the - * list chain and if this is the last item the tail pointer. Caller - * must hold the lock. - */ - -static inline void i2ob_unhook_request(struct i2ob_request *ireq, - unsigned int iop) -{ - ireq->next = i2ob_queues[iop]->i2ob_qhead; - i2ob_queues[iop]->i2ob_qhead = ireq; -} - -/* - * Request completion handler - */ - -static inline void i2ob_end_request(struct request *req) -{ - /* FIXME - pci unmap the request */ - - /* - * Loop until all of the buffers that are linked - * to this request have been marked updated and - * unlocked. - */ - - while (end_that_request_first( req, !req->errors, req->hard_cur_sectors )); - - /* - * It is now ok to complete the request. - */ - end_that_request_last( req ); - DEBUG("IO COMPLETED\n"); -} - -/* - * OSM reply handler. This gets all the message replies - */ - -static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg) -{ - unsigned long flags; - struct i2ob_request *ireq = NULL; - u8 st; - u32 *m = (u32 *)msg; - u8 unit = m[2]>>8; - struct i2ob_device *dev = &i2ob_dev[unit]; - - /* - * FAILed message - */ - if(m[0] & (1<<13)) - { - DEBUG("FAIL"); - /* - * FAILed message from controller - * We increment the error count and abort it - * - * In theory this will never happen. The I2O block class - * specification states that block devices never return - * FAILs but instead use the REQ status field...but - * better be on the safe side since no one really follows - * the spec to the book :) - */ - ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; - ireq->req->errors++; - - spin_lock_irqsave(dev->req_queue->queue_lock, flags); - i2ob_unhook_request(ireq, c->unit); - i2ob_end_request(ireq->req); - spin_unlock_irqrestore(dev->req_queue->queue_lock, flags); - - /* Now flush the message by making it a NOP */ - m[0]&=0x00FFFFFF; - m[0]|=(I2O_CMD_UTIL_NOP)<<24; - i2o_post_message(c, (unsigned long) m - (unsigned long) c->msg_virt); - - return; - } - - if(msg->function == I2O_CMD_UTIL_EVT_REGISTER) - { - spin_lock(&i2ob_evt_lock); - memcpy(evt_msg, msg, (m[0]>>16)<<2); - spin_unlock(&i2ob_evt_lock); - up(&i2ob_evt_sem); - return; - } - - if(!dev->i2odev) - { - /* - * This is HACK, but Intel Integrated RAID allows user - * to delete a volume that is claimed, locked, and in use - * by the OS. We have to check for a reply from a - * non-existent device and flag it as an error or the system - * goes kaput... - */ - ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; - ireq->req->errors++; - printk(KERN_WARNING "I2O Block: Data transfer to deleted device!\n"); - spin_lock_irqsave(dev->req_queue->queue_lock, flags); - i2ob_unhook_request(ireq, c->unit); - i2ob_end_request(ireq->req); - spin_unlock_irqrestore(dev->req_queue->queue_lock, flags); - return; - } - - /* - * Lets see what is cooking. We stuffed the - * request in the context. - */ - - ireq=&i2ob_queues[c->unit]->request_queue[m[3]]; - st=m[4]>>24; - - if(st!=0) - { - int err; - char *bsa_errors[] = - { - "Success", - "Media Error", - "Failure communicating to device", - "Device Failure", - "Device is not ready", - "Media not present", - "Media is locked by another user", - "Media has failed", - "Failure communicating to device", - "Device bus failure", - "Device is locked by another user", - "Device is write protected", - "Device has reset", - "Volume has changed, waiting for acknowledgement" - }; - - err = m[4]&0xFFFF; - - /* - * Device not ready means two things. One is that the - * the thing went offline (but not a removal media) - * - * The second is that you have a SuperTrak 100 and the - * firmware got constipated. Unlike standard i2o card - * setups the supertrak returns an error rather than - * blocking for the timeout in these cases. - * - * Don't stick a supertrak100 into cache aggressive modes - */ - - - printk(KERN_ERR "\n/dev/%s error: %s", dev->i2odev->dev_name, - bsa_errors[m[4]&0XFFFF]); - if(m[4]&0x00FF0000) - printk(" - DDM attempted %d retries", (m[4]>>16)&0x00FF ); - printk(".\n"); - ireq->req->errors++; - } - else - ireq->req->errors = 0; - - /* - * Dequeue the request. We use irqsave locks as one day we - * may be running polled controllers from a BH... - */ - - i2ob_free_sglist(dev, ireq); - spin_lock_irqsave(dev->req_queue->queue_lock, flags); - i2ob_unhook_request(ireq, c->unit); - i2ob_end_request(ireq->req); - i2ob_queues[c->unit]->queue_depth --; - - /* - * We may be able to do more I/O - */ - - i2ob_request(dev->gd->queue); - spin_unlock_irqrestore(dev->req_queue->queue_lock, flags); -} - -/* - * Event handler. Needs to be a separate thread b/c we may have - * to do things like scan a partition table, or query parameters - * which cannot be done from an interrupt or from a bottom half. - */ -static int i2ob_evt(void *dummy) -{ - unsigned int evt; - unsigned long flags; - struct i2ob_device *dev; - int unit; - //The only event that has data is the SCSI_SMART event. - struct i2o_reply { - u32 header[4]; - u32 evt_indicator; - u8 ASC; - u8 ASCQ; - u16 pad; - u8 data[16]; - } *evt_local; - - daemonize("i2oblock"); - allow_signal(SIGKILL); - - evt_running = 1; - - while(1) - { - if(down_interruptible(&i2ob_evt_sem)) - { - evt_running = 0; - printk("exiting..."); - break; - } - - /* - * Keep another CPU/interrupt from overwriting the - * message while we're reading it - * - * We stuffed the unit in the TxContext and grab the event mask - * None of the BSA we care about events have EventData - */ - spin_lock_irqsave(&i2ob_evt_lock, flags); - evt_local = (struct i2o_reply *)evt_msg; - spin_unlock_irqrestore(&i2ob_evt_lock, flags); - - unit = le32_to_cpu(evt_local->header[3]); - evt = le32_to_cpu(evt_local->evt_indicator); - - dev = &i2ob_dev[unit]; - switch(evt) - { - /* - * New volume loaded on same TID, so we just re-install. - * The TID/controller don't change as it is the same - * I2O device. It's just new media that we have to - * rescan. - */ - case I2O_EVT_IND_BSA_VOLUME_LOAD: - { - i2ob_install_device(dev->i2odev->controller, - dev->i2odev, unit); - add_disk(dev->gd); - break; - } - - /* - * No media, so set all parameters to 0 and set the media - * change flag. The I2O device is still valid, just doesn't - * have media, so we don't want to clear the controller or - * device pointer. - */ - case I2O_EVT_IND_BSA_VOLUME_UNLOAD: - { - struct gendisk *p = dev->gd; - blk_queue_max_sectors(dev->gd->queue, 0); - del_gendisk(p); - put_disk(p); - dev->gd = NULL; - dev->media_change_flag = 1; - break; - } - - case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ: - printk(KERN_WARNING "%s: Attempt to eject locked media\n", - dev->i2odev->dev_name); - break; - - /* - * The capacity has changed and we are going to be - * updating the max_sectors and other information - * about this disk. We try a revalidate first. If - * the block device is in use, we don't want to - * do that as there may be I/Os bound for the disk - * at the moment. In that case we read the size - * from the device and update the information ourselves - * and the user can later force a partition table - * update through an ioctl. - */ - case I2O_EVT_IND_BSA_CAPACITY_CHANGE: - { - u64 size; - - if(i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 ) - i2ob_query_device(dev, 0x0000, 4, &size, 8); - - spin_lock_irqsave(dev->req_queue->queue_lock, flags); - set_capacity(dev->gd, size>>9); - spin_unlock_irqrestore(dev->req_queue->queue_lock, flags); - break; - } - - /* - * We got a SCSI SMART event, we just log the relevant - * information and let the user decide what they want - * to do with the information. - */ - case I2O_EVT_IND_BSA_SCSI_SMART: - { - char buf[16]; - printk(KERN_INFO "I2O Block: %s received a SCSI SMART Event\n",dev->i2odev->dev_name); - evt_local->data[16]='\0'; - sprintf(buf,"%s",&evt_local->data[0]); - printk(KERN_INFO " Disk Serial#:%s\n",buf); - printk(KERN_INFO " ASC 0x%02x \n",evt_local->ASC); - printk(KERN_INFO " ASCQ 0x%02x \n",evt_local->ASCQ); - break; - } - - /* - * Non event - */ - - case 0: - break; - - /* - * An event we didn't ask for. Call the card manufacturer - * and tell them to fix their firmware :) - */ - - case 0x20: - /* - * If a promise card reports 0x20 event then the brown stuff - * hit the fan big time. The card seems to recover but loses - * the pending writes. Deeply ungood except for testing fsck - */ - if(dev->i2odev->controller->promise) - panic("I2O controller firmware failed. Reboot and force a filesystem check.\n"); - default: - printk(KERN_INFO "%s: Received event 0x%X we didn't register for\n" - KERN_INFO " Blame the I2O card manufacturer 8)\n", - dev->i2odev->dev_name, evt); - break; - } - }; - - complete_and_exit(&i2ob_thread_dead,0); - return 0; -} - -/* - * The I2O block driver is listed as one of those that pulls the - * front entry off the queue before processing it. This is important - * to remember here. If we drop the io lock then CURRENT will change - * on us. We must unlink CURRENT in this routine before we return, if - * we use it. - */ - -static void i2ob_request(request_queue_t *q) -{ - struct request *req; - struct i2ob_request *ireq; - struct i2ob_device *dev; - u32 m; - - while ((req = elv_next_request(q)) != NULL) { - dev = req->rq_disk->private_data; - - /* - * Queue depths probably belong with some kind of - * generic IOP commit control. Certainly it's not right - * its global! - */ - if(i2ob_queues[dev->unit]->queue_depth >= dev->depth) - break; - - /* Get a message */ - m = i2ob_get(dev); - - if(m==0xFFFFFFFF) - { - if(i2ob_queues[dev->unit]->queue_depth == 0) - printk(KERN_ERR "i2o_block: message queue and request queue empty!!\n"); - break; - } - /* - * Everything ok, so pull from kernel queue onto our queue - */ - req->errors = 0; - blkdev_dequeue_request(req); - - ireq = i2ob_queues[dev->unit]->i2ob_qhead; - i2ob_queues[dev->unit]->i2ob_qhead = ireq->next; - ireq->req = req; - - i2ob_send(m, dev, ireq, dev->index); - } -} - - -/* - * SCSI-CAM for ioctl geometry mapping - * Duplicated with SCSI - this should be moved into somewhere common - * perhaps genhd ? - * - * LBA -> CHS mapping table taken from: - * - * "Incorporating the I2O Architecture into BIOS for Intel Architecture - * Platforms" - * - * This is an I2O document that is only available to I2O members, - * not developers. - * - * From my understanding, this is how all the I2O cards do this - * - * Disk Size | Sectors | Heads | Cylinders - * ---------------+---------+-------+------------------- - * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) - * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) - * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) - * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) - * - */ -#define BLOCK_SIZE_528M 1081344 -#define BLOCK_SIZE_1G 2097152 -#define BLOCK_SIZE_21G 4403200 -#define BLOCK_SIZE_42G 8806400 -#define BLOCK_SIZE_84G 17612800 - -static void i2o_block_biosparam( - unsigned long capacity, - unsigned short *cyls, - unsigned char *hds, - unsigned char *secs) -{ - unsigned long heads, sectors, cylinders; - - sectors = 63L; /* Maximize sectors per track */ - if(capacity <= BLOCK_SIZE_528M) - heads = 16; - else if(capacity <= BLOCK_SIZE_1G) - heads = 32; - else if(capacity <= BLOCK_SIZE_21G) - heads = 64; - else if(capacity <= BLOCK_SIZE_42G) - heads = 128; - else - heads = 255; - - cylinders = (unsigned long)capacity / (heads * sectors); - - *cyls = (unsigned short) cylinders; /* Stuff return values */ - *secs = (unsigned char) sectors; - *hds = (unsigned char) heads; -} - -/* - * Issue device specific ioctl calls. - */ - -static int i2ob_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct gendisk *disk = inode->i_bdev->bd_disk; - struct i2ob_device *dev = disk->private_data; - void __user *argp = (void __user *)arg; - - /* Anyone capable of this syscall can do *real bad* things */ - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - switch (cmd) { - case HDIO_GETGEO: - { - struct hd_geometry g; - i2o_block_biosparam(get_capacity(disk), - &g.cylinders, &g.heads, &g.sectors); - g.start = get_start_sect(inode->i_bdev); - return copy_to_user(argp, &g, sizeof(g))?-EFAULT:0; - } - - case BLKI2OGRSTRAT: - return put_user(dev->rcache, (int __user *)argp); - case BLKI2OGWSTRAT: - return put_user(dev->wcache, (int __user *)argp); - case BLKI2OSRSTRAT: - if(arg<0||arg>CACHE_SMARTFETCH) - return -EINVAL; - dev->rcache = arg; - break; - case BLKI2OSWSTRAT: - if(arg!=0 && (argCACHE_SMARTBACK)) - return -EINVAL; - dev->wcache = arg; - break; - } - return -ENOTTY; -} - -/* - * Close the block device down - */ - -static int i2ob_release(struct inode *inode, struct file *file) -{ - struct gendisk *disk = inode->i_bdev->bd_disk; - struct i2ob_device *dev = disk->private_data; - - /* - * This is to deail with the case of an application - * opening a device and then the device dissapears while - * it's in use, and then the application tries to release - * it. ex: Unmounting a deleted RAID volume at reboot. - * If we send messages, it will just cause FAILs since - * the TID no longer exists. - */ - if(!dev->i2odev) - return 0; - - if (dev->refcnt <= 0) - printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt); - dev->refcnt--; - if(dev->refcnt==0) - { - /* - * Flush the onboard cache on unmount - */ - u32 msg[5]; - int *query_done = &dev->done_flag; - msg[0] = (FIVE_WORD_MSG_SIZE|SGL_OFFSET_0); - msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid; - msg[2] = i2ob_context|0x40000000; - msg[3] = (u32)query_done; - msg[4] = 60<<16; - DEBUG("Flushing..."); - i2o_post_wait(dev->controller, msg, 20, 60); - - /* - * Unlock the media - */ - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid; - msg[2] = i2ob_context|0x40000000; - msg[3] = (u32)query_done; - msg[4] = -1; - DEBUG("Unlocking..."); - i2o_post_wait(dev->controller, msg, 20, 2); - DEBUG("Unlocked.\n"); - - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_POWER<<24 | HOST_TID << 12 | dev->tid; - if(dev->flags & (1<<3|1<<4)) /* Removable */ - msg[4] = 0x21 << 24; - else - msg[4] = 0x24 << 24; - - if(i2o_post_wait(dev->controller, msg, 20, 60)==0) - dev->power = 0x24; - - /* - * Now unclaim the device. - */ - - if (i2o_release_device(dev->i2odev, &i2o_block_handler)) - printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n"); - - DEBUG("Unclaim\n"); - } - return 0; -} - -/* - * Open the block device. - */ - -static int i2ob_open(struct inode *inode, struct file *file) -{ - struct gendisk *disk = inode->i_bdev->bd_disk; - struct i2ob_device *dev = disk->private_data; - - if(!dev->i2odev) - return -ENODEV; - - if(dev->refcnt++==0) - { - u32 msg[6]; - - DEBUG("Claim "); - if(i2o_claim_device(dev->i2odev, &i2o_block_handler)) - { - dev->refcnt--; - printk(KERN_INFO "I2O Block: Could not open device\n"); - return -EBUSY; - } - DEBUG("Claimed "); - /* - * Power up if needed - */ - - if(dev->power > 0x1f) - { - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_POWER<<24 | HOST_TID << 12 | dev->tid; - msg[4] = 0x02 << 24; - if(i2o_post_wait(dev->controller, msg, 20, 60) == 0) - dev->power = 0x02; - } - - /* - * Mount the media if needed. Note that we don't use - * the lock bit. Since we have to issue a lock if it - * refuses a mount (quite possible) then we might as - * well just send two messages out. - */ - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid; - msg[4] = -1; - msg[5] = 0; - DEBUG("Mount "); - i2o_post_wait(dev->controller, msg, 24, 2); - - /* - * Lock the media - */ - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid; - msg[4] = -1; - DEBUG("Lock "); - i2o_post_wait(dev->controller, msg, 20, 2); - DEBUG("Ready.\n"); - } - return 0; -} - -/* - * Issue a device query - */ - -static int i2ob_query_device(struct i2ob_device *dev, int table, - int field, void *buf, int buflen) -{ - return i2o_query_scalar(dev->controller, dev->tid, - table, field, buf, buflen); -} - - -/* - * Install the I2O block device we found. - */ - -static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit) -{ - u64 size; - u32 blocksize; - u8 type; - u16 power; - u32 flags, status; - struct i2ob_device *dev=&i2ob_dev[unit]; - struct gendisk *disk; - request_queue_t *q; - int segments; - - - /* - * For logging purposes... - */ - printk(KERN_INFO "i2ob: Installing tid %d device at unit %d\n", - d->lct_data.tid, unit); - - /* - * If this is the first I2O block device found on this IOP, - * we need to initialize all the queue data structures - * before any I/O can be performed. If it fails, this - * device is useless. - */ - if(!i2ob_queues[c->unit]) { - if(i2ob_init_iop(c->unit)) - return 1; - } - - q = i2ob_queues[c->unit]->req_queue; - - /* - * This will save one level of lookup/indirection in critical - * code so that we can directly get the queue ptr from the - * device instead of having to go the IOP data structure. - */ - dev->req_queue = q; - - /* - * Allocate a gendisk structure and initialize it - */ - disk = alloc_disk(16); - if (!disk) - return 1; - - dev->gd = disk; - /* initialize gendik structure */ - disk->major = MAJOR_NR; - disk->first_minor = unit<<4; - disk->queue = q; - disk->fops = &i2ob_fops; - sprintf(disk->disk_name, "i2o/hd%c", 'a' + unit); - disk->private_data = dev; - - /* - * Ask for the current media data. If that isn't supported - * then we ask for the device capacity data - */ - if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0 - || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 ) - { - i2ob_query_device(dev, 0x0000, 3, &blocksize, 4); - i2ob_query_device(dev, 0x0000, 4, &size, 8); - } - - if(i2ob_query_device(dev, 0x0000, 2, &power, 2)!=0) - power = 0; - i2ob_query_device(dev, 0x0000, 5, &flags, 4); - i2ob_query_device(dev, 0x0000, 6, &status, 4); - set_capacity(disk, size>>9); - - /* - * Max number of Scatter-Gather Elements - */ - - dev->power = power; /* Save power state in device proper */ - dev->flags = flags; - - segments = (d->controller->status_block->inbound_frame_size - 7) / 2; - - if(segments > 16) - segments = 16; - - dev->power = power; /* Save power state */ - dev->flags = flags; /* Keep the type info */ - - blk_queue_max_sectors(q, 96); /* 256 might be nicer but many controllers - explode on 65536 or higher */ - blk_queue_max_phys_segments(q, segments); - blk_queue_max_hw_segments(q, segments); - - dev->rcache = CACHE_SMARTFETCH; - dev->wcache = CACHE_WRITETHROUGH; - - if(d->controller->battery == 0) - dev->wcache = CACHE_WRITETHROUGH; - - if(d->controller->promise) - dev->wcache = CACHE_WRITETHROUGH; - - if(d->controller->short_req) - { - blk_queue_max_sectors(q, 8); - blk_queue_max_phys_segments(q, 8); - blk_queue_max_hw_segments(q, 8); - } - - strcpy(d->dev_name, disk->disk_name); - strcpy(disk->devfs_name, disk->disk_name); - - printk(KERN_INFO "%s: Max segments %d, queue depth %d, byte limit %d.\n", - d->dev_name, dev->max_segments, dev->depth, dev->max_sectors<<9); - - i2ob_query_device(dev, 0x0000, 0, &type, 1); - - printk(KERN_INFO "%s: ", d->dev_name); - switch(type) - { - case 0: printk("Disk Storage");break; - case 4: printk("WORM");break; - case 5: printk("CD-ROM");break; - case 7: printk("Optical device");break; - default: - printk("Type %d", type); - } - if(status&(1<<10)) - printk("(RAID)"); - - if((flags^status)&(1<<4|1<<3)) /* Missing media or device */ - { - printk(KERN_INFO " Not loaded.\n"); - /* Device missing ? */ - if((flags^status)&(1<<4)) - return 1; - } - else - { - printk(": %dMB, %d byte sectors", - (int)(size>>20), blocksize); - } - if(status&(1<<0)) - { - u32 cachesize; - i2ob_query_device(dev, 0x0003, 0, &cachesize, 4); - cachesize>>=10; - if(cachesize>4095) - printk(", %dMb cache", cachesize>>10); - else - printk(", %dKb cache", cachesize); - } - printk(".\n"); - printk(KERN_INFO "%s: Maximum sectors/read set to %d.\n", - d->dev_name, dev->max_sectors); - - /* - * Register for the events we're interested in and that the - * device actually supports. - */ - - i2o_event_register(c, d->lct_data.tid, i2ob_context, unit, - (I2OB_EVENT_MASK & d->lct_data.event_capabilities)); - return 0; -} - -/* - * Initialize IOP specific queue structures. This is called - * once for each IOP that has a block device sitting behind it. - */ -static int i2ob_init_iop(unsigned int unit) -{ - int i; - - i2ob_queues[unit] = (struct i2ob_iop_queue *) kmalloc(sizeof(struct i2ob_iop_queue), GFP_ATOMIC); - if(!i2ob_queues[unit]) - { - printk(KERN_WARNING "Could not allocate request queue for I2O block device!\n"); - return -1; - } - - for(i = 0; i< MAX_I2OB_DEPTH; i++) - { - i2ob_queues[unit]->request_queue[i].next = &i2ob_queues[unit]->request_queue[i+1]; - i2ob_queues[unit]->request_queue[i].num = i; - } - - /* Queue is MAX_I2OB + 1... */ - i2ob_queues[unit]->request_queue[i].next = NULL; - i2ob_queues[unit]->i2ob_qhead = &i2ob_queues[unit]->request_queue[0]; - i2ob_queues[unit]->queue_depth = 0; - - i2ob_queues[unit]->lock = SPIN_LOCK_UNLOCKED; - i2ob_queues[unit]->req_queue = blk_init_queue(i2ob_request, &i2ob_queues[unit]->lock); - if (!i2ob_queues[unit]->req_queue) { - kfree(i2ob_queues[unit]); - return -1; - } - - i2ob_queues[unit]->req_queue->queuedata = &i2ob_queues[unit]; - - return 0; -} - -/* - * Probe the I2O subsytem for block class devices - */ -static void i2ob_scan(int bios) -{ - int i; - int warned = 0; - - struct i2o_device *d, *b=NULL; - struct i2o_controller *c; - - for(i=0; i< MAX_I2O_CONTROLLERS; i++) - { - c=i2o_find_controller(i); - - if(c==NULL) - continue; - - /* - * The device list connected to the I2O Controller is doubly linked - * Here we traverse the end of the list , and start claiming devices - * from that end. This assures that within an I2O controller atleast - * the newly created volumes get claimed after the older ones, thus - * mapping to same major/minor (and hence device file name) after - * every reboot. - * The exception being: - * 1. If there was a TID reuse. - * 2. There was more than one I2O controller. - */ - - if(!bios) - { - for (d=c->devices;d!=NULL;d=d->next) - if(d->next == NULL) - b = d; - } - else - b = c->devices; - - while(b != NULL) - { - d=b; - if(bios) - b = b->next; - else - b = b->prev; - - if(d->lct_data.class_id!=I2O_CLASS_RANDOM_BLOCK_STORAGE) - continue; - - if(d->lct_data.user_tid != 0xFFF) - continue; - - if(bios) - { - if(d->lct_data.bios_info != 0x80) - continue; - printk(KERN_INFO "Claiming as Boot device: Controller %d, TID %d\n", c->unit, d->lct_data.tid); - } - else - { - if(d->lct_data.bios_info == 0x80) - continue; /*Already claimed on pass 1 */ - } - - if(scan_unitunit, d->lct_data.tid); - - /* Check for available space */ - if(i2ob_dev_count>=MAX_I2OB) - { - printk(KERN_ERR "i2o_block: No more devices allowed!\n"); - return; - } - for(unit = 0; unit < MAX_I2OB; unit ++) - { - if(!i2ob_dev[unit].i2odev) - break; - } - - if(i2o_claim_device(d, &i2o_block_handler)) - { - printk(KERN_INFO "i2o_block: Unable to claim device. Installation aborted\n"); - return; - } - - dev = &i2ob_dev[unit]; - dev->i2odev = d; - dev->controller = c; - dev->tid = d->lct_data.tid; - dev->unit = c->unit; - - if(i2ob_install_device(c,d,unit)) { - i2o_release_device(d, &i2o_block_handler); - printk(KERN_ERR "i2o_block: Could not install new device\n"); - } - else - { - i2o_release_device(d, &i2o_block_handler); - add_disk(dev->gd); - i2ob_dev_count++; - i2o_device_notify_on(d, &i2o_block_handler); - } - - return; -} - -/* - * Deleted device notification handler. Called when a device we - * are talking to has been deleted by the user or some other - * mysterious fource outside the kernel. - */ -void i2ob_del_device(struct i2o_controller *c, struct i2o_device *d) -{ - int unit = 0; - unsigned long flags; - struct i2ob_device *dev; - - for(unit = 0; unit < MAX_I2OB; unit ++) - { - dev = &i2ob_dev[unit]; - if(dev->i2odev == d) - { - printk(KERN_INFO " /dev/%s: Controller %d Tid %d\n", - d->dev_name, c->unit, d->lct_data.tid); - break; - } - } - - printk(KERN_INFO "I2O Block Device Deleted\n"); - - if(unit >= MAX_I2OB) - { - printk(KERN_ERR "i2ob_del_device called, but not in dev table!\n"); - return; - } - - spin_lock_irqsave(dev->req_queue->queue_lock, flags); - - /* - * Need to do this...we somtimes get two events from the IRTOS - * in a row and that causes lots of problems. - */ - i2o_device_notify_off(d, &i2o_block_handler); - - /* - * This will force errors when i2ob_get_queue() is called - * by the kenrel. - */ - if(dev->gd) { - struct gendisk *gd = dev->gd; - gd->queue = NULL; - del_gendisk(gd); - put_disk(gd); - dev->gd = NULL; - } - spin_unlock_irqrestore(dev->req_queue->queue_lock, flags); - dev->req_queue = NULL; - dev->i2odev = NULL; - dev->refcnt = 0; - dev->tid = 0; - - /* - * Do we need this? - * The media didn't really change...the device is just gone - */ - dev->media_change_flag = 1; - - i2ob_dev_count--; -} - -/* - * Have we seen a media change ? - */ -static int i2ob_media_change(struct gendisk *disk) -{ - struct i2ob_device *p = disk->private_data; - if(p->media_change_flag) - { - p->media_change_flag=0; - return 1; - } - return 0; -} - -static int i2ob_revalidate(struct gendisk *disk) -{ - struct i2ob_device *p = disk->private_data; - return i2ob_install_device(p->controller, p->i2odev, p->index); -} - -/* - * Reboot notifier. This is called by i2o_core when the system - * shuts down. - */ -static void i2ob_reboot_event(void) -{ - int i; - - for(i=0;irefcnt!=0) - { - /* - * Flush the onboard cache - */ - u32 msg[5]; - int *query_done = &dev->done_flag; - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid; - msg[2] = i2ob_context|0x40000000; - msg[3] = (u32)query_done; - msg[4] = 60<<16; - - DEBUG("Flushing..."); - i2o_post_wait(dev->controller, msg, 20, 60); - - DEBUG("Unlocking..."); - /* - * Unlock the media - */ - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid; - msg[2] = i2ob_context|0x40000000; - msg[3] = (u32)query_done; - msg[4] = -1; - i2o_post_wait(dev->controller, msg, 20, 2); - - DEBUG("Unlocked.\n"); - } - } -} - -static struct block_device_operations i2ob_fops = -{ - .owner = THIS_MODULE, - .open = i2ob_open, - .release = i2ob_release, - .ioctl = i2ob_ioctl, - .media_changed = i2ob_media_change, - .revalidate_disk= i2ob_revalidate, -}; - -/* - * And here should be modules and kernel interface - * (Just smiley confuses emacs :-) - */ - -static int i2o_block_init(void) -{ - int i; - - printk(KERN_INFO "I2O Block Storage OSM v0.9\n"); - printk(KERN_INFO " (c) Copyright 1999-2001 Red Hat Software.\n"); - - /* - * Register the block device interfaces - */ - if (register_blkdev(MAJOR_NR, "i2o_block")) - return -EIO; - -#ifdef MODULE - printk(KERN_INFO "i2o_block: registered device at major %d\n", MAJOR_NR); -#endif - - /* - * Set up the queue - */ - for(i = 0; i < MAX_I2O_CONTROLLERS; i++) - i2ob_queues[i] = NULL; - - /* - * Now fill in the boiler plate - */ - - for (i = 0; i < MAX_I2OB; i++) { - struct i2ob_device *dev = &i2ob_dev[i]; - dev->index = i; - dev->refcnt = 0; - dev->flags = 0; - dev->controller = NULL; - dev->i2odev = NULL; - dev->tid = 0; - dev->head = NULL; - dev->tail = NULL; - dev->depth = MAX_I2OB_DEPTH; - dev->max_sectors = 2; - dev->gd = NULL; - } - - /* - * Register the OSM handler as we will need this to probe for - * drives, geometry and other goodies. - */ - - if(i2o_install_handler(&i2o_block_handler)<0) - { - unregister_blkdev(MAJOR_NR, "i2o_block"); - printk(KERN_ERR "i2o_block: unable to register OSM.\n"); - return -EINVAL; - } - i2ob_context = i2o_block_handler.context; - - /* - * Initialize event handling thread - */ - init_MUTEX_LOCKED(&i2ob_evt_sem); - evt_pid = kernel_thread(i2ob_evt, NULL, CLONE_SIGHAND); - if(evt_pid < 0) - { - printk(KERN_ERR "i2o_block: Could not initialize event thread. Aborting\n"); - i2o_remove_handler(&i2o_block_handler); - return 0; - } - - i2ob_probe(); - - return 0; - - unregister_blkdev(MAJOR_NR, "i2o_block"); - return -ENOMEM; -} - - -static void i2o_block_exit(void) -{ - int i; - - if(evt_running) { - printk(KERN_INFO "Killing I2O block threads..."); - i = kill_proc(evt_pid, SIGKILL, 1); - if(!i) { - printk("waiting...\n"); - } - /* Be sure it died */ - wait_for_completion(&i2ob_thread_dead); - printk("done.\n"); - } - - /* - * Unregister for updates from any devices..otherwise we still - * get them and the core jumps to random memory :O - */ - if(i2ob_dev_count) { - struct i2o_device *d; - for(i = 0; i < MAX_I2OB; i++) - if((d = i2ob_dev[i].i2odev)) - i2ob_del_device(d->controller, d); - } - - /* - * We may get further callbacks for ourself. The i2o_core - * code handles this case reasonably sanely. The problem here - * is we shouldn't get them .. but a couple of cards feel - * obliged to tell us stuff we don't care about. - * - * This isnt ideal at all but will do for now. - */ - - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ); - - /* - * Flush the OSM - */ - - i2o_remove_handler(&i2o_block_handler); - - /* - * Return the block device - */ - if (unregister_blkdev(MAJOR_NR, "i2o_block") != 0) - printk("i2o_block: cleanup_module failed\n"); - - /* - * release request queue - */ - for (i = 0; i < MAX_I2O_CONTROLLERS; i ++) - if(i2ob_queues[i]) { - blk_cleanup_queue(i2ob_queues[i]->req_queue); - kfree(i2ob_queues[i]); - } -} - -MODULE_AUTHOR("Red Hat"); -MODULE_DESCRIPTION("I2O Block Device OSM"); -MODULE_LICENSE("GPL"); - -module_init(i2o_block_init); -module_exit(i2o_block_exit); diff -L drivers/message/i2o/i2o_config.c -puN drivers/message/i2o/i2o_config.c~i2o-build_99 /dev/null --- 25/drivers/message/i2o/i2o_config.c +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,1149 +0,0 @@ -/* - * I2O Configuration Interface Driver - * - * (C) Copyright 1999-2002 Red Hat - * - * Written by Alan Cox, Building Number Three Ltd - * - * Fixes/additions: - * Deepak Saxena (04/20/1999): - * Added basic ioctl() support - * Deepak Saxena (06/07/1999): - * Added software download ioctl (still testing) - * Auvo Häkkinen (09/10/1999): - * Changes to i2o_cfg_reply(), ioctl_parms() - * Added ioct_validate() - * Taneli Vähäkangas (09/30/1999): - * Fixed ioctl_swdl() - * Taneli Vähäkangas (10/04/1999): - * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() - * Deepak Saxena (11/18/1999): - * Added event managmenet support - * Alan Cox : - * 2.4 rewrite ported to 2.5 - * Markus Lidel : - * Added pass-thru support for Adaptec's raidutils - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -static int i2o_cfg_context = -1; -static void *page_buf; -static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED; -struct wait_queue *i2o_wait_queue; - -#define MODINC(x,y) ((x) = ((x) + 1) % (y)) - -struct sg_simple_element { - u32 flag_count; - u32 addr_bus; -}; - -struct i2o_cfg_info -{ - struct file* fp; - struct fasync_struct *fasync; - struct i2o_evt_info event_q[I2O_EVT_Q_LEN]; - u16 q_in; // Queue head index - u16 q_out; // Queue tail index - u16 q_len; // Queue length - u16 q_lost; // Number of lost events - u32 q_id; // Event queue ID...used as tx_context - struct i2o_cfg_info *next; -}; -static struct i2o_cfg_info *open_files = NULL; -static int i2o_cfg_info_id = 0; - -static int ioctl_getiops(unsigned long); -static int ioctl_gethrt(unsigned long); -static int ioctl_getlct(unsigned long); -static int ioctl_parms(unsigned long, unsigned int); -static int ioctl_html(unsigned long); -static int ioctl_swdl(unsigned long); -static int ioctl_swul(unsigned long); -static int ioctl_swdel(unsigned long); -static int ioctl_validate(unsigned long); -static int ioctl_evt_reg(unsigned long, struct file *); -static int ioctl_evt_get(unsigned long, struct file *); -static int ioctl_passthru(unsigned long); -static int cfg_fasync(int, struct file*, int); - -/* - * This is the callback for any message we have posted. The message itself - * will be returned to the message pool when we return from the IRQ - * - * This runs in irq context so be short and sweet. - */ -static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m) -{ - u32 *msg = (u32 *)m; - - if (msg[0] & MSG_FAIL) { - u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]); - - printk(KERN_ERR "i2o_config: IOP failed to process the msg.\n"); - - /* Release the preserved msg frame by resubmitting it as a NOP */ - - preserved_msg[0] = THREE_WORD_MSG_SIZE | SGL_OFFSET_0; - preserved_msg[1] = I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0; - preserved_msg[2] = 0; - i2o_post_message(c, msg[7]); - } - - if (msg[4] >> 24) // ReqStatus != SUCCESS - i2o_report_status(KERN_INFO,"i2o_config", msg); - - if(m->function == I2O_CMD_UTIL_EVT_REGISTER) - { - struct i2o_cfg_info *inf; - - for(inf = open_files; inf; inf = inf->next) - if(inf->q_id == msg[3]) - break; - - // - // If this is the case, it means that we're getting - // events for a file descriptor that's been close()'d - // w/o the user unregistering for events first. - // The code currently assumes that the user will - // take care of unregistering for events before closing - // a file. - // - // TODO: - // Should we track event registartion and deregister - // for events when a file is close()'d so this doesn't - // happen? That would get rid of the search through - // the linked list since file->private_data could point - // directly to the i2o_config_info data structure...but - // it would mean having all sorts of tables to track - // what each file is registered for...I think the - // current method is simpler. - DS - // - if(!inf) - return; - - inf->event_q[inf->q_in].id.iop = c->unit; - inf->event_q[inf->q_in].id.tid = m->target_tid; - inf->event_q[inf->q_in].id.evt_mask = msg[4]; - - // - // Data size = msg size - reply header - // - inf->event_q[inf->q_in].data_size = (m->size - 5) * 4; - if(inf->event_q[inf->q_in].data_size) - memcpy(inf->event_q[inf->q_in].evt_data, - (unsigned char *)(msg + 5), - inf->event_q[inf->q_in].data_size); - - spin_lock(&i2o_config_lock); - MODINC(inf->q_in, I2O_EVT_Q_LEN); - if(inf->q_len == I2O_EVT_Q_LEN) - { - MODINC(inf->q_out, I2O_EVT_Q_LEN); - inf->q_lost++; - } - else - { - // Keep I2OEVTGET on another CPU from touching this - inf->q_len++; - } - spin_unlock(&i2o_config_lock); - - -// printk(KERN_INFO "File %p w/id %d has %d events\n", -// inf->fp, inf->q_id, inf->q_len); - - kill_fasync(&inf->fasync, SIGIO, POLL_IN); - } - - return; -} - -/* - * Each of these describes an i2o message handler. They are - * multiplexed by the i2o_core code - */ - -struct i2o_handler cfg_handler= -{ - i2o_cfg_reply, - NULL, - NULL, - NULL, - "Configuration", - 0, - 0xffffffff // All classes -}; - -static ssize_t cfg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) -{ - printk(KERN_INFO "i2o_config write not yet supported\n"); - - return 0; -} - - -static ssize_t cfg_read(struct file *file, char __user *buf, size_t count, loff_t *ptr) -{ - return 0; -} - -/* - * IOCTL Handler - */ -static int cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, - unsigned long arg) -{ - int ret; - - switch(cmd) - { - case I2OGETIOPS: - ret = ioctl_getiops(arg); - break; - - case I2OHRTGET: - ret = ioctl_gethrt(arg); - break; - - case I2OLCTGET: - ret = ioctl_getlct(arg); - break; - - case I2OPARMSET: - ret = ioctl_parms(arg, I2OPARMSET); - break; - - case I2OPARMGET: - ret = ioctl_parms(arg, I2OPARMGET); - break; - - case I2OSWDL: - ret = ioctl_swdl(arg); - break; - - case I2OSWUL: - ret = ioctl_swul(arg); - break; - - case I2OSWDEL: - ret = ioctl_swdel(arg); - break; - - case I2OVALIDATE: - ret = ioctl_validate(arg); - break; - - case I2OHTML: - ret = ioctl_html(arg); - break; - - case I2OEVTREG: - ret = ioctl_evt_reg(arg, fp); - break; - - case I2OEVTGET: - ret = ioctl_evt_get(arg, fp); - break; - - case I2OPASSTHRU: - ret = ioctl_passthru(arg); - break; - - default: - ret = -EINVAL; - } - - return ret; -} - -int ioctl_getiops(unsigned long arg) -{ - u8 __user *user_iop_table = (void __user *)arg; - struct i2o_controller *c = NULL; - int i; - u8 foo[MAX_I2O_CONTROLLERS]; - - if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS)) - return -EFAULT; - - for(i = 0; i < MAX_I2O_CONTROLLERS; i++) - { - c = i2o_find_controller(i); - if(c) - { - foo[i] = 1; - if(pci_set_dma_mask(c->pdev, 0xffffffff)) - { - printk(KERN_WARNING "i2o_config : No suitable DMA available on controller %d\n", i); - i2o_unlock_controller(c); - continue; - } - - i2o_unlock_controller(c); - } - else - { - foo[i] = 0; - } - } - - __copy_to_user(user_iop_table, foo, MAX_I2O_CONTROLLERS); - return 0; -} - -int ioctl_gethrt(unsigned long arg) -{ - struct i2o_controller *c; - struct i2o_cmd_hrtlct __user *cmd = (void __user *)arg; - struct i2o_cmd_hrtlct kcmd; - i2o_hrt *hrt; - int len; - u32 reslen; - int ret = 0; - - if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) - return -EFAULT; - - if(get_user(reslen, kcmd.reslen) < 0) - return -EFAULT; - - if(kcmd.resbuf == NULL) - return -EFAULT; - - c = i2o_find_controller(kcmd.iop); - if(!c) - return -ENXIO; - - hrt = (i2o_hrt *)c->hrt; - - i2o_unlock_controller(c); - - len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); - - /* We did a get user...so assuming mem is ok...is this bad? */ - put_user(len, kcmd.reslen); - if(len > reslen) - ret = -ENOBUFS; - if(copy_to_user(kcmd.resbuf, (void*)hrt, len)) - ret = -EFAULT; - - return ret; -} - -int ioctl_getlct(unsigned long arg) -{ - struct i2o_controller *c; - struct i2o_cmd_hrtlct __user *cmd = (void __user *)arg; - struct i2o_cmd_hrtlct kcmd; - i2o_lct *lct; - int len; - int ret = 0; - u32 reslen; - - if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) - return -EFAULT; - - if(get_user(reslen, kcmd.reslen) < 0) - return -EFAULT; - - if(kcmd.resbuf == NULL) - return -EFAULT; - - c = i2o_find_controller(kcmd.iop); - if(!c) - return -ENXIO; - - lct = (i2o_lct *)c->lct; - i2o_unlock_controller(c); - - len = (unsigned int)lct->table_size << 2; - put_user(len, kcmd.reslen); - if(len > reslen) - ret = -ENOBUFS; - else if(copy_to_user(kcmd.resbuf, (void*)lct, len)) - ret = -EFAULT; - - return ret; -} - -static int ioctl_parms(unsigned long arg, unsigned int type) -{ - int ret = 0; - struct i2o_controller *c; - struct i2o_cmd_psetget __user *cmd = (void __user *)arg; - struct i2o_cmd_psetget kcmd; - u32 reslen; - u8 *ops; - u8 *res; - int len; - - u32 i2o_cmd = (type == I2OPARMGET ? - I2O_CMD_UTIL_PARAMS_GET : - I2O_CMD_UTIL_PARAMS_SET); - - if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) - return -EFAULT; - - if(get_user(reslen, kcmd.reslen)) - return -EFAULT; - - c = i2o_find_controller(kcmd.iop); - if(!c) - return -ENXIO; - - ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL); - if(!ops) - { - i2o_unlock_controller(c); - return -ENOMEM; - } - - if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) - { - i2o_unlock_controller(c); - kfree(ops); - return -EFAULT; - } - - /* - * It's possible to have a _very_ large table - * and that the user asks for all of it at once... - */ - res = (u8*)kmalloc(65536, GFP_KERNEL); - if(!res) - { - i2o_unlock_controller(c); - kfree(ops); - return -ENOMEM; - } - - len = i2o_issue_params(i2o_cmd, c, kcmd.tid, - ops, kcmd.oplen, res, 65536); - i2o_unlock_controller(c); - kfree(ops); - - if (len < 0) { - kfree(res); - return -EAGAIN; - } - - put_user(len, kcmd.reslen); - if(len > reslen) - ret = -ENOBUFS; - else if(copy_to_user(kcmd.resbuf, res, len)) - ret = -EFAULT; - - kfree(res); - - return ret; -} - -int ioctl_html(unsigned long arg) -{ - struct i2o_html __user *cmd = (void __user *)arg; - struct i2o_html kcmd; - struct i2o_controller *c; - u8 *res = NULL; - void *query = NULL; - dma_addr_t query_phys, res_phys; - int ret = 0; - int token; - u32 len; - u32 reslen; - u32 msg[MSG_FRAME_SIZE]; - - if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html))) - { - printk(KERN_INFO "i2o_config: can't copy html cmd\n"); - return -EFAULT; - } - - if(get_user(reslen, kcmd.reslen) < 0) - { - printk(KERN_INFO "i2o_config: can't copy html reslen\n"); - return -EFAULT; - } - - if(!kcmd.resbuf) - { - printk(KERN_INFO "i2o_config: NULL html buffer\n"); - return -EFAULT; - } - - c = i2o_find_controller(kcmd.iop); - if(!c) - return -ENXIO; - - if(kcmd.qlen) /* Check for post data */ - { - query = pci_alloc_consistent(c->pdev, kcmd.qlen, &query_phys); - if(!query) - { - i2o_unlock_controller(c); - return -ENOMEM; - } - if(copy_from_user(query, kcmd.qbuf, kcmd.qlen)) - { - i2o_unlock_controller(c); - printk(KERN_INFO "i2o_config: could not get query\n"); - pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys); - return -EFAULT; - } - } - - res = pci_alloc_consistent(c->pdev, 65536, &res_phys); - if(!res) - { - i2o_unlock_controller(c); - pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys); - return -ENOMEM; - } - - msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid; - msg[2] = i2o_cfg_context; - msg[3] = 0; - msg[4] = kcmd.page; - msg[5] = 0xD0000000|65536; - msg[6] = res_phys; - if(!kcmd.qlen) /* Check for post data */ - msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5; - else - { - msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5; - msg[5] = 0x50000000|65536; - msg[7] = 0xD4000000|(kcmd.qlen); - msg[8] = query_phys; - } - /* - Wait for a considerable time till the Controller - does its job before timing out. The controller might - take more time to process this request if there are - many devices connected to it. - */ - token = i2o_post_wait_mem(c, msg, 9*4, 400, query, res, query_phys, res_phys, kcmd.qlen, 65536); - if(token < 0) - { - printk(KERN_DEBUG "token = %#10x\n", token); - i2o_unlock_controller(c); - - if(token != -ETIMEDOUT) - { - pci_free_consistent(c->pdev, 65536, res, res_phys); - if(kcmd.qlen) - pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys); - } - return token; - } - i2o_unlock_controller(c); - - len = strnlen(res, 65536); - put_user(len, kcmd.reslen); - if(len > reslen) - ret = -ENOMEM; - if(copy_to_user(kcmd.resbuf, res, len)) - ret = -EFAULT; - - pci_free_consistent(c->pdev, 65536, res, res_phys); - if(kcmd.qlen) - pci_free_consistent(c->pdev, kcmd.qlen, query, query_phys); - - return ret; -} - -int ioctl_swdl(unsigned long arg) -{ - struct i2o_sw_xfer kxfer; - struct i2o_sw_xfer __user *pxfer = (void __user *)arg; - unsigned char maxfrag = 0, curfrag = 1; - unsigned char *buffer; - u32 msg[9]; - unsigned int status = 0, swlen = 0, fragsize = 8192; - struct i2o_controller *c; - dma_addr_t buffer_phys; - - if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) - return -EFAULT; - - if(get_user(swlen, kxfer.swlen) < 0) - return -EFAULT; - - if(get_user(maxfrag, kxfer.maxfrag) < 0) - return -EFAULT; - - if(get_user(curfrag, kxfer.curfrag) < 0) - return -EFAULT; - - if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192; - - if(!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) - return -EFAULT; - - c = i2o_find_controller(kxfer.iop); - if(!c) - return -ENXIO; - - buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys); - if (buffer==NULL) - { - i2o_unlock_controller(c); - return -ENOMEM; - } - __copy_from_user(buffer, kxfer.buf, fragsize); - - msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7; - msg[1]= I2O_CMD_SW_DOWNLOAD<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2]= (u32)cfg_handler.context; - msg[3]= 0; - msg[4]= (((u32)kxfer.flags)<<24) | (((u32)kxfer.sw_type)<<16) | - (((u32)maxfrag)<<8) | (((u32)curfrag)); - msg[5]= swlen; - msg[6]= kxfer.sw_id; - msg[7]= (0xD0000000 | fragsize); - msg[8]= buffer_phys; - -// printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0); - - i2o_unlock_controller(c); - if(status != -ETIMEDOUT) - pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys); - - if (status != I2O_POST_WAIT_OK) - { - // it fails if you try and send frags out of order - // and for some yet unknown reasons too - printk(KERN_INFO "i2o_config: swdl failed, DetailedStatus = %d\n", status); - return status; - } - - return 0; -} - -int ioctl_swul(unsigned long arg) -{ - struct i2o_sw_xfer kxfer; - struct i2o_sw_xfer __user *pxfer = (void __user *)arg; - unsigned char maxfrag = 0, curfrag = 1; - unsigned char *buffer; - u32 msg[9]; - unsigned int status = 0, swlen = 0, fragsize = 8192; - struct i2o_controller *c; - dma_addr_t buffer_phys; - - if(copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) - return -EFAULT; - - if(get_user(swlen, kxfer.swlen) < 0) - return -EFAULT; - - if(get_user(maxfrag, kxfer.maxfrag) < 0) - return -EFAULT; - - if(get_user(curfrag, kxfer.curfrag) < 0) - return -EFAULT; - - if(curfrag==maxfrag) fragsize = swlen-(maxfrag-1)*8192; - - if(!kxfer.buf || !access_ok(VERIFY_WRITE, kxfer.buf, fragsize)) - return -EFAULT; - - c = i2o_find_controller(kxfer.iop); - if(!c) - return -ENXIO; - - buffer=pci_alloc_consistent(c->pdev, fragsize, &buffer_phys); - if (buffer==NULL) - { - i2o_unlock_controller(c); - return -ENOMEM; - } - - msg[0]= NINE_WORD_MSG_SIZE | SGL_OFFSET_7; - msg[1]= I2O_CMD_SW_UPLOAD<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2]= (u32)cfg_handler.context; - msg[3]= 0; - msg[4]= (u32)kxfer.flags<<24|(u32)kxfer.sw_type<<16|(u32)maxfrag<<8|(u32)curfrag; - msg[5]= swlen; - msg[6]= kxfer.sw_id; - msg[7]= (0xD0000000 | fragsize); - msg[8]= buffer_phys; - -// printk("i2o_config: swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_post_wait_mem(c, msg, sizeof(msg), 60, buffer, NULL, buffer_phys, 0, fragsize, 0); - i2o_unlock_controller(c); - - if (status != I2O_POST_WAIT_OK) - { - if(status != -ETIMEDOUT) - pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys); - printk(KERN_INFO "i2o_config: swul failed, DetailedStatus = %d\n", status); - return status; - } - - __copy_to_user(kxfer.buf, buffer, fragsize); - pci_free_consistent(c->pdev, fragsize, buffer, buffer_phys); - - return 0; -} - -int ioctl_swdel(unsigned long arg) -{ - struct i2o_controller *c; - struct i2o_sw_xfer kxfer; - struct i2o_sw_xfer __user *pxfer = (void __user *)arg; - u32 msg[7]; - unsigned int swlen; - int token; - - if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) - return -EFAULT; - - if (get_user(swlen, kxfer.swlen) < 0) - return -EFAULT; - - c = i2o_find_controller(kxfer.iop); - if (!c) - return -ENXIO; - - msg[0] = SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0; - msg[1] = I2O_CMD_SW_REMOVE<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2] = (u32)i2o_cfg_context; - msg[3] = 0; - msg[4] = (u32)kxfer.flags<<24 | (u32)kxfer.sw_type<<16; - msg[5] = swlen; - msg[6] = kxfer.sw_id; - - token = i2o_post_wait(c, msg, sizeof(msg), 10); - i2o_unlock_controller(c); - - if (token != I2O_POST_WAIT_OK) - { - printk(KERN_INFO "i2o_config: swdel failed, DetailedStatus = %d\n", token); - return -ETIMEDOUT; - } - - return 0; -} - -int ioctl_validate(unsigned long arg) -{ - int token; - int iop = (int)arg; - u32 msg[4]; - struct i2o_controller *c; - - c=i2o_find_controller(iop); - if (!c) - return -ENXIO; - - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_CONFIG_VALIDATE<<24 | HOST_TID<<12 | iop; - msg[2] = (u32)i2o_cfg_context; - msg[3] = 0; - - token = i2o_post_wait(c, msg, sizeof(msg), 10); - i2o_unlock_controller(c); - - if (token != I2O_POST_WAIT_OK) - { - printk(KERN_INFO "Can't validate configuration, ErrorStatus = %d\n", - token); - return -ETIMEDOUT; - } - - return 0; -} - -static int ioctl_evt_reg(unsigned long arg, struct file *fp) -{ - u32 msg[5]; - struct i2o_evt_id __user *pdesc = (void __user *)arg; - struct i2o_evt_id kdesc; - struct i2o_controller *iop; - struct i2o_device *d; - - if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) - return -EFAULT; - - /* IOP exists? */ - iop = i2o_find_controller(kdesc.iop); - if(!iop) - return -ENXIO; - i2o_unlock_controller(iop); - - /* Device exists? */ - for(d = iop->devices; d; d = d->next) - if(d->lct_data.tid == kdesc.tid) - break; - - if(!d) - return -ENODEV; - - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | kdesc.tid; - msg[2] = (u32)i2o_cfg_context; - msg[3] = (u32)fp->private_data; - msg[4] = kdesc.evt_mask; - - i2o_post_this(iop, msg, 20); - - return 0; -} - -static int ioctl_evt_get(unsigned long arg, struct file *fp) -{ - u32 id = (u32)fp->private_data; - struct i2o_cfg_info *p = NULL; - struct i2o_evt_get __user *uget = (void __user *)arg; - struct i2o_evt_get kget; - unsigned long flags; - - for(p = open_files; p; p = p->next) - if(p->q_id == id) - break; - - if(!p->q_len) - { - return -ENOENT; - return 0; - } - - memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); - MODINC(p->q_out, I2O_EVT_Q_LEN); - spin_lock_irqsave(&i2o_config_lock, flags); - p->q_len--; - kget.pending = p->q_len; - kget.lost = p->q_lost; - spin_unlock_irqrestore(&i2o_config_lock, flags); - - if(copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) - return -EFAULT; - return 0; -} - -static int ioctl_passthru(unsigned long arg) -{ - struct i2o_cmd_passthru __user *cmd = (void __user *) arg; - struct i2o_controller *c; - u32 msg[MSG_FRAME_SIZE]; - u32 __user *user_msg; - u32 *reply = NULL; - u32 __user *user_reply = NULL; - u32 size = 0; - u32 reply_size = 0; - u32 rcode = 0; - void *sg_list[SG_TABLESIZE]; - u32 sg_offset = 0; - u32 sg_count = 0; - int sg_index = 0; - u32 i = 0; - void *p = NULL; - unsigned int iop; - - if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) - return -EFAULT; - - c = i2o_find_controller(iop); - if (!c) - return -ENXIO; - - memset(&msg, 0, MSG_FRAME_SIZE*4); - if(get_user(size, &user_msg[0])) - return -EFAULT; - size = size>>16; - - user_reply = &user_msg[size]; - if(size > MSG_FRAME_SIZE) - return -EFAULT; - size *= 4; // Convert to bytes - - /* Copy in the user's I2O command */ - if(copy_from_user(msg, user_msg, size)) - return -EFAULT; - if(get_user(reply_size, &user_reply[0]) < 0) - return -EFAULT; - - reply_size = reply_size>>16; - reply = kmalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); - if(!reply) { - printk(KERN_WARNING"%s: Could not allocate reply buffer\n",c->name); - return -ENOMEM; - } - memset(reply, 0, REPLY_FRAME_SIZE*4); - sg_offset = (msg[0]>>4)&0x0f; - msg[2] = (u32)i2o_cfg_context; - msg[3] = (u32)reply; - - memset(sg_list,0, sizeof(sg_list[0])*SG_TABLESIZE); - if(sg_offset) { - struct sg_simple_element *sg; - - if(sg_offset * 4 >= size) { - rcode = -EFAULT; - goto cleanup; - } - // TODO 64bit fix - sg = (struct sg_simple_element*) (msg+sg_offset); - sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); - if (sg_count > SG_TABLESIZE) { - printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", c->name,sg_count); - kfree (reply); - return -EINVAL; - } - - for(i = 0; i < sg_count; i++) { - int sg_size; - - if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { - printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",c->name,i, sg[i].flag_count); - rcode = -EINVAL; - goto cleanup; - } - sg_size = sg[i].flag_count & 0xffffff; - /* Allocate memory for the transfer */ - p = kmalloc(sg_size, GFP_KERNEL); - if (!p) { - printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name,sg_size,i,sg_count); - rcode = -ENOMEM; - goto cleanup; - } - sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. - /* Copy in the user's SG buffer if necessary */ - if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { - // TODO 64bit fix - if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { - printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",c->name,i); - rcode = -EFAULT; - goto cleanup; - } - } - //TODO 64bit fix - sg[i].addr_bus = (u32)virt_to_bus(p); - } - } - - rcode = i2o_post_wait(c, msg, size, 60); - if(rcode) - goto cleanup; - - if(sg_offset) { - /* Copy back the Scatter Gather buffers back to user space */ - u32 j; - // TODO 64bit fix - struct sg_simple_element* sg; - int sg_size; - - // re-acquire the original message to handle correctly the sg copy operation - memset(&msg, 0, MSG_FRAME_SIZE*4); - // get user msg size in u32s - if (get_user(size, &user_msg[0])) { - rcode = -EFAULT; - goto cleanup; - } - size = size>>16; - size *= 4; - /* Copy in the user's I2O command */ - if (copy_from_user (msg, user_msg, size)) { - rcode = -EFAULT; - goto cleanup; - } - sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); - - // TODO 64bit fix - sg = (struct sg_simple_element*)(msg + sg_offset); - for (j = 0; j < sg_count; j++) { - /* Copy out the SG list to user's buffer if necessary */ - if (!(sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { - sg_size = sg[j].flag_count & 0xffffff; - // TODO 64bit fix - if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { - printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",c->name, sg_list[j], sg[j].addr_bus); - rcode = -EFAULT; - goto cleanup; - } - } - } - } - - /* Copy back the reply to user space */ - if (reply_size) { - // we wrote our own values for context - now restore the user supplied ones - if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { - printk(KERN_WARNING"%s: Could not copy message context FROM user\n",c->name); - rcode = -EFAULT; - } - if(copy_to_user(user_reply, reply, reply_size)) { - printk(KERN_WARNING"%s: Could not copy reply TO user\n",c->name); - rcode = -EFAULT; - } - } - -cleanup: - kfree(reply); - i2o_unlock_controller(c); - return rcode; -} - -static int cfg_open(struct inode *inode, struct file *file) -{ - struct i2o_cfg_info *tmp = - (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL); - unsigned long flags; - - if(!tmp) - return -ENOMEM; - - file->private_data = (void*)(i2o_cfg_info_id++); - tmp->fp = file; - tmp->fasync = NULL; - tmp->q_id = (u32)file->private_data; - tmp->q_len = 0; - tmp->q_in = 0; - tmp->q_out = 0; - tmp->q_lost = 0; - tmp->next = open_files; - - spin_lock_irqsave(&i2o_config_lock, flags); - open_files = tmp; - spin_unlock_irqrestore(&i2o_config_lock, flags); - - return 0; -} - -static int cfg_release(struct inode *inode, struct file *file) -{ - u32 id = (u32)file->private_data; - struct i2o_cfg_info *p1, *p2; - unsigned long flags; - - lock_kernel(); - p1 = p2 = NULL; - - spin_lock_irqsave(&i2o_config_lock, flags); - for(p1 = open_files; p1; ) - { - if(p1->q_id == id) - { - - if(p1->fasync) - cfg_fasync(-1, file, 0); - if(p2) - p2->next = p1->next; - else - open_files = p1->next; - - kfree(p1); - break; - } - p2 = p1; - p1 = p1->next; - } - spin_unlock_irqrestore(&i2o_config_lock, flags); - unlock_kernel(); - - return 0; -} - -static int cfg_fasync(int fd, struct file *fp, int on) -{ - u32 id = (u32)fp->private_data; - struct i2o_cfg_info *p; - - for(p = open_files; p; p = p->next) - if(p->q_id == id) - break; - - if(!p) - return -EBADF; - - return fasync_helper(fd, fp, on, &p->fasync); -} - -static struct file_operations config_fops = -{ - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = cfg_read, - .write = cfg_write, - .ioctl = cfg_ioctl, - .open = cfg_open, - .release = cfg_release, - .fasync = cfg_fasync, -}; - -static struct miscdevice i2o_miscdev = { - I2O_MINOR, - "i2octl", - &config_fops -}; - -static int __init i2o_config_init(void) -{ - printk(KERN_INFO "I2O configuration manager v 0.04.\n"); - printk(KERN_INFO " (C) Copyright 1999 Red Hat Software\n"); - - if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL) - { - printk(KERN_ERR "i2o_config: no memory for page buffer.\n"); - return -ENOBUFS; - } - if(misc_register(&i2o_miscdev) < 0) - { - printk(KERN_ERR "i2o_config: can't register device.\n"); - kfree(page_buf); - return -EBUSY; - } - /* - * Install our handler - */ - if(i2o_install_handler(&cfg_handler)<0) - { - kfree(page_buf); - printk(KERN_ERR "i2o_config: handler register failed.\n"); - misc_deregister(&i2o_miscdev); - return -EBUSY; - } - /* - * The low 16bits of the transaction context must match this - * for everything we post. Otherwise someone else gets our mail - */ - i2o_cfg_context = cfg_handler.context; - return 0; -} - -static void i2o_config_exit(void) -{ - misc_deregister(&i2o_miscdev); - - if(page_buf) - kfree(page_buf); - if(i2o_cfg_context != -1) - i2o_remove_handler(&cfg_handler); -} - -MODULE_AUTHOR("Red Hat Software"); -MODULE_DESCRIPTION("I2O Configuration"); -MODULE_LICENSE("GPL"); - -module_init(i2o_config_init); -module_exit(i2o_config_exit); diff -L drivers/message/i2o/i2o_core.c -puN drivers/message/i2o/i2o_core.c~i2o-build_99 /dev/null --- 25/drivers/message/i2o/i2o_core.c +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,3978 +0,0 @@ -/* - * Core I2O structure management - * - * (C) Copyright 1999-2002 Red Hat Software - * - * Written by Alan Cox, Building Number Three Ltd - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * A lot of the I2O message side code from this is taken from the - * Red Creek RCPCI45 adapter driver by Red Creek Communications - * - * Fixes/additions: - * Philipp Rumpf - * Juha Sievänen - * Auvo Häkkinen - * Deepak Saxena - * Boji T Kannanthanam - * Alan Cox : - * Ported to Linux 2.5. - * Markus Lidel : - * Minor fixes for 2.6. - * - */ - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#ifdef CONFIG_MTRR -#include -#endif // CONFIG_MTRR - -#include "i2o_lan.h" - -//#define DRIVERDEBUG - -#ifdef DRIVERDEBUG -#define dprintk(s, args...) printk(s, ## args) -#else -#define dprintk(s, args...) -#endif - -/* OSM table */ -static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES]; - -/* Controller list */ -static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS]; -struct i2o_controller *i2o_controller_chain; -int i2o_num_controllers; - -/* Initiator Context for Core message */ -static int core_context; - -/* Initialization && shutdown functions */ -void i2o_sys_init(void); -static void i2o_sys_shutdown(void); -static int i2o_reset_controller(struct i2o_controller *); -static int i2o_reboot_event(struct notifier_block *, unsigned long , void *); -static int i2o_online_controller(struct i2o_controller *); -static int i2o_init_outbound_q(struct i2o_controller *); -static int i2o_post_outbound_messages(struct i2o_controller *); - -/* Reply handler */ -static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *, - struct i2o_message *); - -/* Various helper functions */ -static int i2o_lct_get(struct i2o_controller *); -static int i2o_lct_notify(struct i2o_controller *); -static int i2o_hrt_get(struct i2o_controller *); - -static int i2o_build_sys_table(void); -static int i2o_systab_send(struct i2o_controller *c); - -/* I2O core event handler */ -static int i2o_core_evt(void *); -static int evt_pid; -static int evt_running; - -/* Dynamic LCT update handler */ -static int i2o_dyn_lct(void *); - -void i2o_report_controller_unit(struct i2o_controller *, struct i2o_device *); - -static void i2o_pci_dispose(struct i2o_controller *c); - -/* - * I2O System Table. Contains information about - * all the IOPs in the system. Used to inform IOPs - * about each other's existence. - * - * sys_tbl_ver is the CurrentChangeIndicator that is - * used by IOPs to track changes. - */ -static struct i2o_sys_tbl *sys_tbl; -static int sys_tbl_ind; -static int sys_tbl_len; - -/* - * This spin lock is used to keep a device from being - * added and deleted concurrently across CPUs or interrupts. - * This can occur when a user creates a device and immediatelly - * deletes it before the new_dev_notify() handler is called. - */ -static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED; - -/* - * Structures and definitions for synchronous message posting. - * See i2o_post_wait() for description. - */ -struct i2o_post_wait_data -{ - int *status; /* Pointer to status block on caller stack */ - int *complete; /* Pointer to completion flag on caller stack */ - u32 id; /* Unique identifier */ - wait_queue_head_t *wq; /* Wake up for caller (NULL for dead) */ - struct i2o_post_wait_data *next; /* Chain */ - void *mem[2]; /* Memory blocks to recover on failure path */ - dma_addr_t phys[2]; /* Physical address of blocks to recover */ - u32 size[2]; /* Size of blocks to recover */ -}; - -static struct i2o_post_wait_data *post_wait_queue; -static u32 post_wait_id; // Unique ID for each post_wait -static spinlock_t post_wait_lock = SPIN_LOCK_UNLOCKED; -static void i2o_post_wait_complete(struct i2o_controller *, u32, int); - -/* OSM descriptor handler */ -static struct i2o_handler i2o_core_handler = -{ - (void *)i2o_core_reply, - NULL, - NULL, - NULL, - "I2O core layer", - 0, - I2O_CLASS_EXECUTIVE -}; - -/* - * Used when queueing a reply to be handled later - */ - -struct reply_info -{ - struct i2o_controller *iop; - u32 msg[MSG_FRAME_SIZE]; -}; -static struct reply_info evt_reply; -static struct reply_info events[I2O_EVT_Q_LEN]; -static int evt_in; -static int evt_out; -static int evt_q_len; -#define MODINC(x,y) ((x) = ((x) + 1) % (y)) - -/* - * I2O configuration spinlock. This isnt a big deal for contention - * so we have one only - */ - -static DECLARE_MUTEX(i2o_configuration_lock); - -/* - * Event spinlock. Used to keep event queue sane and from - * handling multiple events simultaneously. - */ -static spinlock_t i2o_evt_lock = SPIN_LOCK_UNLOCKED; - -/* - * Semaphore used to synchronize event handling thread with - * interrupt handler. - */ - -static DECLARE_MUTEX(evt_sem); -static DECLARE_COMPLETION(evt_dead); -static DECLARE_WAIT_QUEUE_HEAD(evt_wait); - -static struct notifier_block i2o_reboot_notifier = -{ - i2o_reboot_event, - NULL, - 0 -}; - -/* - * Config options - */ - -static int verbose; - -#if BITS_PER_LONG == 64 -/** - * i2o_context_list_add - append an ptr to the context list and return a - * matching context id. - * @ptr: pointer to add to the context list - * @c: controller to which the context list belong - * returns context id, which could be used in the transaction context - * field. - * - * Because the context field in I2O is only 32-bit large, on 64-bit the - * pointer is to large to fit in the context field. The i2o_context_list - * functiones map pointers to context fields. - */ -u32 i2o_context_list_add(void *ptr, struct i2o_controller *c) { - u32 context = 1; - struct i2o_context_list_element **entry = &c->context_list; - struct i2o_context_list_element *element; - unsigned long flags; - - spin_lock_irqsave(&c->context_list_lock, flags); - while(*entry && ((*entry)->flags & I2O_CONTEXT_LIST_USED)) { - if((*entry)->context >= context) - context = (*entry)->context + 1; - entry = &((*entry)->next); - } - - if(!*entry) { - if(unlikely(!context)) { - spin_unlock_irqrestore(&c->context_list_lock, flags); - printk(KERN_EMERG "i2o_core: context list overflow\n"); - return 0; - } - - element = kmalloc(sizeof(struct i2o_context_list_element), GFP_KERNEL); - if(!element) { - printk(KERN_EMERG "i2o_core: could not allocate memory for context list element\n"); - return 0; - } - element->context = context; - element->next = NULL; - *entry = element; - } else - element = *entry; - - element->ptr = ptr; - element->flags = I2O_CONTEXT_LIST_USED; - - spin_unlock_irqrestore(&c->context_list_lock, flags); - dprintk(KERN_DEBUG "i2o_core: add context to list %p -> %d\n", ptr, context); - return context; -} - -/** - * i2o_context_list_remove - remove a ptr from the context list and return - * the matching context id. - * @ptr: pointer to be removed from the context list - * @c: controller to which the context list belong - * returns context id, which could be used in the transaction context - * field. - */ -u32 i2o_context_list_remove(void *ptr, struct i2o_controller *c) { - struct i2o_context_list_element **entry = &c->context_list; - struct i2o_context_list_element *element; - u32 context; - unsigned long flags; - - spin_lock_irqsave(&c->context_list_lock, flags); - while(*entry && ((*entry)->ptr != ptr)) - entry = &((*entry)->next); - - if(unlikely(!*entry)) { - spin_unlock_irqrestore(&c->context_list_lock, flags); - printk(KERN_WARNING "i2o_core: could not remove nonexistent ptr %p\n", ptr); - return 0; - } - - element = *entry; - - context = element->context; - element->ptr = NULL; - element->flags |= I2O_CONTEXT_LIST_DELETED; - - spin_unlock_irqrestore(&c->context_list_lock, flags); - dprintk(KERN_DEBUG "i2o_core: markt as deleted in context list %p -> %d\n", ptr, context); - return context; -} - -/** - * i2o_context_list_get - get a ptr from the context list and remove it - * from the list. - * @context: context id to which the pointer belong - * @c: controller to which the context list belong - * returns pointer to the matching context id - */ -void *i2o_context_list_get(u32 context, struct i2o_controller *c) { - struct i2o_context_list_element **entry = &c->context_list; - struct i2o_context_list_element *element; - void *ptr; - int count = 0; - unsigned long flags; - - spin_lock_irqsave(&c->context_list_lock, flags); - while(*entry && ((*entry)->context != context)) { - entry = &((*entry)->next); - count ++; - } - - if(unlikely(!*entry)) { - spin_unlock_irqrestore(&c->context_list_lock, flags); - printk(KERN_WARNING "i2o_core: context id %d not found\n", context); - return NULL; - } - - element = *entry; - ptr = element->ptr; - if(count >= I2O_CONTEXT_LIST_MIN_LENGTH) { - *entry = (*entry)->next; - kfree(element); - } else { - element->ptr = NULL; - element->flags &= !I2O_CONTEXT_LIST_USED; - } - - spin_unlock_irqrestore(&c->context_list_lock, flags); - dprintk(KERN_DEBUG "i2o_core: get ptr from context list %d -> %p\n", context, ptr); - return ptr; -} -#endif - -/* - * I2O Core reply handler - */ -static void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c, - struct i2o_message *m) -{ - u32 *msg=(u32 *)m; - u32 status; - u32 context = msg[2]; - - if (msg[0] & MSG_FAIL) // Fail bit is set - { - u32 *preserved_msg = (u32*)(c->msg_virt + msg[7]); - - i2o_report_status(KERN_INFO, "i2o_core", msg); - i2o_dump_message(preserved_msg); - - /* If the failed request needs special treatment, - * it should be done here. */ - - /* Release the preserved msg by resubmitting it as a NOP */ - - preserved_msg[0] = cpu_to_le32(THREE_WORD_MSG_SIZE | SGL_OFFSET_0); - preserved_msg[1] = cpu_to_le32(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0); - preserved_msg[2] = 0; - i2o_post_message(c, msg[7]); - - /* If reply to i2o_post_wait failed, return causes a timeout */ - - return; - } - -#ifdef DRIVERDEBUG - i2o_report_status(KERN_INFO, "i2o_core", msg); -#endif - - if(msg[2]&0x80000000) // Post wait message - { - if (msg[4] >> 24) - status = (msg[4] & 0xFFFF); - else - status = I2O_POST_WAIT_OK; - - i2o_post_wait_complete(c, context, status); - return; - } - - if(m->function == I2O_CMD_UTIL_EVT_REGISTER) - { - memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2); - events[evt_in].iop = c; - - spin_lock(&i2o_evt_lock); - MODINC(evt_in, I2O_EVT_Q_LEN); - if(evt_q_len == I2O_EVT_Q_LEN) - MODINC(evt_out, I2O_EVT_Q_LEN); - else - evt_q_len++; - spin_unlock(&i2o_evt_lock); - - up(&evt_sem); - wake_up_interruptible(&evt_wait); - return; - } - - if(m->function == I2O_CMD_LCT_NOTIFY) - { - up(&c->lct_sem); - return; - } - - /* - * If this happens, we want to dump the message to the syslog so - * it can be sent back to the card manufacturer by the end user - * to aid in debugging. - * - */ - printk(KERN_WARNING "%s: Unsolicited message reply sent to core!" - "Message dumped to syslog\n", - c->name); - i2o_dump_message(msg); - - return; -} - -/** - * i2o_install_handler - install a message handler - * @h: Handler structure - * - * Install an I2O handler - these handle the asynchronous messaging - * from the card once it has initialised. If the table of handlers is - * full then -ENOSPC is returned. On a success 0 is returned and the - * context field is set by the function. The structure is part of the - * system from this time onwards. It must not be freed until it has - * been uninstalled - */ - -int i2o_install_handler(struct i2o_handler *h) -{ - int i; - down(&i2o_configuration_lock); - for(i=0;icontext = i; - i2o_handlers[i]=h; - up(&i2o_configuration_lock); - return 0; - } - } - up(&i2o_configuration_lock); - return -ENOSPC; -} - -/** - * i2o_remove_handler - remove an i2o message handler - * @h: handler - * - * Remove a message handler previously installed with i2o_install_handler. - * After this function returns the handler object can be freed or re-used - */ - -int i2o_remove_handler(struct i2o_handler *h) -{ - i2o_handlers[h->context]=NULL; - return 0; -} - - -/* - * Each I2O controller has a chain of devices on it. - * Each device has a pointer to its LCT entry to be used - * for fun purposes. - */ - -/** - * i2o_install_device - attach a device to a controller - * @c: controller - * @d: device - * - * Add a new device to an i2o controller. This can be called from - * non interrupt contexts only. It adds the device and marks it as - * unclaimed. The device memory becomes part of the kernel and must - * be uninstalled before being freed or reused. Zero is returned - * on success. - */ - -int i2o_install_device(struct i2o_controller *c, struct i2o_device *d) -{ - int i; - - down(&i2o_configuration_lock); - d->controller=c; - d->owner=NULL; - d->next=c->devices; - d->prev=NULL; - if (c->devices != NULL) - c->devices->prev=d; - c->devices=d; - *d->dev_name = 0; - - for(i = 0; i < I2O_MAX_MANAGERS; i++) - d->managers[i] = NULL; - - up(&i2o_configuration_lock); - return 0; -} - -/* we need this version to call out of i2o_delete_controller */ - -int __i2o_delete_device(struct i2o_device *d) -{ - struct i2o_device **p; - int i; - - p=&(d->controller->devices); - - /* - * Hey we have a driver! - * Check to see if the driver wants us to notify it of - * device deletion. If it doesn't we assume that it - * is unsafe to delete a device with an owner and - * fail. - */ - if(d->owner) - { - if(d->owner->dev_del_notify) - { - dprintk(KERN_INFO "Device has owner, notifying\n"); - d->owner->dev_del_notify(d->controller, d); - if(d->owner) - { - printk(KERN_WARNING - "Driver \"%s\" did not release device!\n", d->owner->name); - return -EBUSY; - } - } - else - return -EBUSY; - } - - /* - * Tell any other users who are talking to this device - * that it's going away. We assume that everything works. - */ - for(i=0; i < I2O_MAX_MANAGERS; i++) - { - if(d->managers[i] && d->managers[i]->dev_del_notify) - d->managers[i]->dev_del_notify(d->controller, d); - } - - while(*p!=NULL) - { - if(*p==d) - { - /* - * Destroy - */ - *p=d->next; - kfree(d); - return 0; - } - p=&((*p)->next); - } - printk(KERN_ERR "i2o_delete_device: passed invalid device.\n"); - return -EINVAL; -} - -/** - * i2o_delete_device - remove an i2o device - * @d: device to remove - * - * This function unhooks a device from a controller. The device - * will not be unhooked if it has an owner who does not wish to free - * it, or if the owner lacks a dev_del_notify function. In that case - * -EBUSY is returned. On success 0 is returned. Other errors cause - * negative errno values to be returned - */ - -int i2o_delete_device(struct i2o_device *d) -{ - int ret; - - down(&i2o_configuration_lock); - - /* - * Seek, locate - */ - - ret = __i2o_delete_device(d); - - up(&i2o_configuration_lock); - - return ret; -} - -/** - * i2o_install_controller - attach a controller - * @c: controller - * - * Add a new controller to the i2o layer. This can be called from - * non interrupt contexts only. It adds the controller and marks it as - * unused with no devices. If the tables are full or memory allocations - * fail then a negative errno code is returned. On success zero is - * returned and the controller is bound to the system. The structure - * must not be freed or reused until being uninstalled. - */ - -int i2o_install_controller(struct i2o_controller *c) -{ - int i; - down(&i2o_configuration_lock); - for(i=0;idlct = (i2o_lct*)pci_alloc_consistent(c->pdev, 8192, &c->dlct_phys); - if(c->dlct==NULL) - { - up(&i2o_configuration_lock); - return -ENOMEM; - } - i2o_controllers[i]=c; - c->devices = NULL; - c->next=i2o_controller_chain; - i2o_controller_chain=c; - c->unit = i; - c->page_frame = NULL; - c->hrt = NULL; - c->hrt_len = 0; - c->lct = NULL; - c->status_block = NULL; - sprintf(c->name, "i2o/iop%d", i); - i2o_num_controllers++; - init_MUTEX_LOCKED(&c->lct_sem); - up(&i2o_configuration_lock); - return 0; - } - } - printk(KERN_ERR "No free i2o controller slots.\n"); - up(&i2o_configuration_lock); - return -EBUSY; -} - -/** - * i2o_delete_controller - delete a controller - * @c: controller - * - * Remove an i2o controller from the system. If the controller or its - * devices are busy then -EBUSY is returned. On a failure a negative - * errno code is returned. On success zero is returned. - */ - -int i2o_delete_controller(struct i2o_controller *c) -{ - struct i2o_controller **p; - int users; - char name[16]; - int stat; - - dprintk(KERN_INFO "Deleting controller %s\n", c->name); - - /* - * Clear event registration as this can cause weird behavior - */ - if(c->status_block->iop_state == ADAPTER_STATE_OPERATIONAL) - i2o_event_register(c, core_context, 0, 0, 0); - - down(&i2o_configuration_lock); - if((users=atomic_read(&c->users))) - { - dprintk(KERN_INFO "I2O: %d users for controller %s\n", users, - c->name); - up(&i2o_configuration_lock); - return -EBUSY; - } - while(c->devices) - { - if(__i2o_delete_device(c->devices)<0) - { - /* Shouldnt happen */ - I2O_IRQ_WRITE32(c, 0xFFFFFFFF); - c->enabled = 0; - up(&i2o_configuration_lock); - return -EBUSY; - } - } - - /* - * If this is shutdown time, the thread's already been killed - */ - if(c->lct_running) { - stat = kill_proc(c->lct_pid, SIGKILL, 1); - if(!stat) { - int count = 10 * 100; - while(c->lct_running && --count) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - } - - if(!count) - printk(KERN_ERR - "%s: LCT thread still running!\n", - c->name); - } - } - - p=&i2o_controller_chain; - - while(*p) - { - if(*p==c) - { - /* Ask the IOP to switch to RESET state */ - i2o_reset_controller(c); - - /* Release IRQ */ - i2o_pci_dispose(c); - - *p=c->next; - up(&i2o_configuration_lock); - - if(c->page_frame) - { - pci_unmap_single(c->pdev, c->page_frame_map, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE); - kfree(c->page_frame); - } - if(c->hrt) - pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys); - if(c->lct) - pci_free_consistent(c->pdev, c->lct->table_size << 2, c->lct, c->lct_phys); - if(c->status_block) - pci_free_consistent(c->pdev, sizeof(i2o_status_block), c->status_block, c->status_block_phys); - if(c->dlct) - pci_free_consistent(c->pdev, 8192, c->dlct, c->dlct_phys); - - i2o_controllers[c->unit]=NULL; - memcpy(name, c->name, strlen(c->name)+1); - kfree(c); - dprintk(KERN_INFO "%s: Deleted from controller chain.\n", name); - - i2o_num_controllers--; - return 0; - } - p=&((*p)->next); - } - up(&i2o_configuration_lock); - printk(KERN_ERR "i2o_delete_controller: bad pointer!\n"); - return -ENOENT; -} - -/** - * i2o_unlock_controller - unlock a controller - * @c: controller to unlock - * - * Take a lock on an i2o controller. This prevents it being deleted. - * i2o controllers are not refcounted so a deletion of an in use device - * will fail, not take affect on the last dereference. - */ - -void i2o_unlock_controller(struct i2o_controller *c) -{ - atomic_dec(&c->users); -} - -/** - * i2o_find_controller - return a locked controller - * @n: controller number - * - * Returns a pointer to the controller object. The controller is locked - * on return. NULL is returned if the controller is not found. - */ - -struct i2o_controller *i2o_find_controller(int n) -{ - struct i2o_controller *c; - - if(n<0 || n>=MAX_I2O_CONTROLLERS) - return NULL; - - down(&i2o_configuration_lock); - c=i2o_controllers[n]; - if(c!=NULL) - atomic_inc(&c->users); - up(&i2o_configuration_lock); - return c; -} - -/** - * i2o_issue_claim - claim or release a device - * @cmd: command - * @c: controller to claim for - * @tid: i2o task id - * @type: type of claim - * - * Issue I2O UTIL_CLAIM and UTIL_RELEASE messages. The message to be sent - * is set by cmd. The tid is the task id of the object to claim and the - * type is the claim type (see the i2o standard) - * - * Zero is returned on success. - */ - -static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type) -{ - u32 msg[5]; - - msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0; - msg[1] = cmd << 24 | HOST_TID<<12 | tid; - msg[3] = 0; - msg[4] = type; - - return i2o_post_wait(c, msg, sizeof(msg), 60); -} - -/* - * i2o_claim_device - claim a device for use by an OSM - * @d: device to claim - * @h: handler for this device - * - * Do the leg work to assign a device to a given OSM on Linux. The - * kernel updates the internal handler data for the device and then - * performs an I2O claim for the device, attempting to claim the - * device as primary. If the attempt fails a negative errno code - * is returned. On success zero is returned. - */ - -int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h) -{ - int ret = 0; - - down(&i2o_configuration_lock); - if (d->owner) { - printk(KERN_INFO "Device claim called, but dev already owned by %s!", - h->name); - ret = -EBUSY; - goto out; - } - d->owner=h; - - if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid, - I2O_CLAIM_PRIMARY)) - { - d->owner = NULL; - ret = -EBUSY; - } -out: - up(&i2o_configuration_lock); - return ret; -} - -/** - * i2o_release_device - release a device that the OSM is using - * @d: device to claim - * @h: handler for this device - * - * Drop a claim by an OSM on a given I2O device. The handler is cleared - * and 0 is returned on success. - * - * AC - some devices seem to want to refuse an unclaim until they have - * finished internal processing. It makes sense since you don't want a - * new device to go reconfiguring the entire system until you are done. - * Thus we are prepared to wait briefly. - */ - -int i2o_release_device(struct i2o_device *d, struct i2o_handler *h) -{ - int err = 0; - int tries; - - down(&i2o_configuration_lock); - if (d->owner != h) { - printk(KERN_INFO "Claim release called, but not owned by %s!\n", - h->name); - up(&i2o_configuration_lock); - return -ENOENT; - } - - for(tries=0;tries<10;tries++) - { - d->owner = NULL; - - /* - * If the controller takes a nonblocking approach to - * releases we have to sleep/poll for a few times. - */ - - if((err=i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid, I2O_CLAIM_PRIMARY)) ) - { - err = -ENXIO; - current->state = TASK_UNINTERRUPTIBLE; - schedule_timeout(HZ); - } - else - { - err=0; - break; - } - } - up(&i2o_configuration_lock); - return err; -} - -/** - * i2o_device_notify_on - Enable deletion notifiers - * @d: device for notification - * @h: handler to install - * - * Called by OSMs to let the core know that they want to be - * notified if the given device is deleted from the system. - */ - -int i2o_device_notify_on(struct i2o_device *d, struct i2o_handler *h) -{ - int i; - - if(d->num_managers == I2O_MAX_MANAGERS) - return -ENOSPC; - - for(i = 0; i < I2O_MAX_MANAGERS; i++) - { - if(!d->managers[i]) - { - d->managers[i] = h; - break; - } - } - - d->num_managers++; - - return 0; -} - -/** - * i2o_device_notify_off - Remove deletion notifiers - * @d: device for notification - * @h: handler to remove - * - * Called by OSMs to let the core know that they no longer - * are interested in the fate of the given device. - */ -int i2o_device_notify_off(struct i2o_device *d, struct i2o_handler *h) -{ - int i; - - for(i=0; i < I2O_MAX_MANAGERS; i++) - { - if(d->managers[i] == h) - { - d->managers[i] = NULL; - d->num_managers--; - return 0; - } - } - - return -ENOENT; -} - -/** - * i2o_event_register - register interest in an event - * @c: Controller to register interest with - * @tid: I2O task id - * @init_context: initiator context to use with this notifier - * @tr_context: transaction context to use with this notifier - * @evt_mask: mask of events - * - * Create and posts an event registration message to the task. No reply - * is waited for, or expected. Errors in posting will be reported. - */ - -int i2o_event_register(struct i2o_controller *c, u32 tid, - u32 init_context, u32 tr_context, u32 evt_mask) -{ - u32 msg[5]; // Not performance critical, so we just - // i2o_post_this it instead of building it - // in IOP memory - - msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid; - msg[2] = init_context; - msg[3] = tr_context; - msg[4] = evt_mask; - - return i2o_post_this(c, msg, sizeof(msg)); -} - -/* - * i2o_event_ack - acknowledge an event - * @c: controller - * @msg: pointer to the UTIL_EVENT_REGISTER reply we received - * - * We just take a pointer to the original UTIL_EVENT_REGISTER reply - * message and change the function code since that's what spec - * describes an EventAck message looking like. - */ - -int i2o_event_ack(struct i2o_controller *c, u32 *msg) -{ - struct i2o_message *m = (struct i2o_message *)msg; - - m->function = I2O_CMD_UTIL_EVT_ACK; - - return i2o_post_wait(c, msg, m->size * 4, 2); -} - -/* - * Core event handler. Runs as a separate thread and is woken - * up whenever there is an Executive class event. - */ -static int i2o_core_evt(void *reply_data) -{ - struct reply_info *reply = (struct reply_info *) reply_data; - u32 *msg = reply->msg; - struct i2o_controller *c = NULL; - unsigned long flags; - - daemonize("i2oevtd"); - allow_signal(SIGKILL); - - evt_running = 1; - - while(1) - { - if(down_interruptible(&evt_sem)) - { - dprintk(KERN_INFO "I2O event thread dead\n"); - printk("exiting..."); - evt_running = 0; - complete_and_exit(&evt_dead, 0); - } - - /* - * Copy the data out of the queue so that we don't have to lock - * around the whole function and just around the qlen update - */ - spin_lock_irqsave(&i2o_evt_lock, flags); - memcpy(reply, &events[evt_out], sizeof(struct reply_info)); - MODINC(evt_out, I2O_EVT_Q_LEN); - evt_q_len--; - spin_unlock_irqrestore(&i2o_evt_lock, flags); - - c = reply->iop; - dprintk(KERN_INFO "I2O IRTOS EVENT: iop%d, event %#10x\n", c->unit, msg[4]); - - /* - * We do not attempt to delete/quiesce/etc. the controller if - * some sort of error indidication occurs. We may want to do - * so in the future, but for now we just let the user deal with - * it. One reason for this is that what to do with an error - * or when to send what ærror is not really agreed on, so - * we get errors that may not be fatal but just look like they - * are...so let the user deal with it. - */ - switch(msg[4]) - { - case I2O_EVT_IND_EXEC_RESOURCE_LIMITS: - printk(KERN_ERR "%s: Out of resources\n", c->name); - break; - - case I2O_EVT_IND_EXEC_POWER_FAIL: - printk(KERN_ERR "%s: Power failure\n", c->name); - break; - - case I2O_EVT_IND_EXEC_HW_FAIL: - { - char *fail[] = - { - "Unknown Error", - "Power Lost", - "Code Violation", - "Parity Error", - "Code Execution Exception", - "Watchdog Timer Expired" - }; - - if(msg[5] <= 6) - printk(KERN_ERR "%s: Hardware Failure: %s\n", - c->name, fail[msg[5]]); - else - printk(KERN_ERR "%s: Unknown Hardware Failure\n", c->name); - - break; - } - - /* - * New device created - * - Create a new i2o_device entry - * - Inform all interested drivers about this device's existence - */ - case I2O_EVT_IND_EXEC_NEW_LCT_ENTRY: - { - struct i2o_device *d = (struct i2o_device *) - kmalloc(sizeof(struct i2o_device), GFP_KERNEL); - int i; - - if (d == NULL) { - printk(KERN_EMERG "i2oevtd: out of memory\n"); - break; - } - memcpy(&d->lct_data, &msg[5], sizeof(i2o_lct_entry)); - - d->next = NULL; - d->controller = c; - d->flags = 0; - - i2o_report_controller_unit(c, d); - i2o_install_device(c,d); - - for(i = 0; i < MAX_I2O_MODULES; i++) - { - if(i2o_handlers[i] && - i2o_handlers[i]->new_dev_notify && - (i2o_handlers[i]->class&d->lct_data.class_id)) - { - spin_lock(&i2o_dev_lock); - i2o_handlers[i]->new_dev_notify(c,d); - spin_unlock(&i2o_dev_lock); - } - } - - break; - } - - /* - * LCT entry for a device has been modified, so update it - * internally. - */ - case I2O_EVT_IND_EXEC_MODIFIED_LCT: - { - struct i2o_device *d; - i2o_lct_entry *new_lct = (i2o_lct_entry *)&msg[5]; - - for(d = c->devices; d; d = d->next) - { - if(d->lct_data.tid == new_lct->tid) - { - memcpy(&d->lct_data, new_lct, sizeof(i2o_lct_entry)); - break; - } - } - break; - } - - case I2O_EVT_IND_CONFIGURATION_FLAG: - printk(KERN_WARNING "%s requires user configuration\n", c->name); - break; - - case I2O_EVT_IND_GENERAL_WARNING: - printk(KERN_WARNING "%s: Warning notification received!" - "Check configuration for errors!\n", c->name); - break; - - case I2O_EVT_IND_EVT_MASK_MODIFIED: - /* Well I guess that was us hey .. */ - break; - - default: - printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]); - break; - } - } - - return 0; -} - -/* - * Dynamic LCT update. This compares the LCT with the currently - * installed devices to check for device deletions..this needed b/c there - * is no DELETED_LCT_ENTRY EventIndicator for the Executive class so - * we can't just have the event handler do this...annoying - * - * This is a hole in the spec that will hopefully be fixed someday. - */ -static int i2o_dyn_lct(void *foo) -{ - struct i2o_controller *c = (struct i2o_controller *)foo; - struct i2o_device *d = NULL; - struct i2o_device *d1 = NULL; - int i = 0; - int found = 0; - int entries; - void *tmp; - - daemonize("iop%d_lctd", c->unit); - allow_signal(SIGKILL); - - c->lct_running = 1; - - while(1) - { - down_interruptible(&c->lct_sem); - if(signal_pending(current)) - { - dprintk(KERN_ERR "%s: LCT thread dead\n", c->name); - c->lct_running = 0; - return 0; - } - - entries = c->dlct->table_size; - entries -= 3; - entries /= 9; - - dprintk(KERN_INFO "%s: Dynamic LCT Update\n",c->name); - dprintk(KERN_INFO "%s: Dynamic LCT contains %d entries\n", c->name, entries); - - if(!entries) - { - printk(KERN_INFO "%s: Empty LCT???\n", c->name); - continue; - } - - /* - * Loop through all the devices on the IOP looking for their - * LCT data in the LCT. We assume that TIDs are not repeated. - * as that is the only way to really tell. It's been confirmed - * by the IRTOS vendor(s?) that TIDs are not reused until they - * wrap arround(4096), and I doubt a system will up long enough - * to create/delete that many devices. - */ - for(d = c->devices; d; ) - { - found = 0; - d1 = d->next; - - for(i = 0; i < entries; i++) - { - if(d->lct_data.tid == c->dlct->lct_entry[i].tid) - { - found = 1; - break; - } - } - if(!found) - { - dprintk(KERN_INFO "i2o_core: Deleted device!\n"); - spin_lock(&i2o_dev_lock); - i2o_delete_device(d); - spin_unlock(&i2o_dev_lock); - } - d = d1; - } - - /* - * Tell LCT to renotify us next time there is a change - */ - i2o_lct_notify(c); - - /* - * Copy new LCT into public LCT - * - * Possible race if someone is reading LCT while we are copying - * over it. If this happens, we'll fix it then. but I doubt that - * the LCT will get updated often enough or will get read by - * a user often enough to worry. - */ - if(c->lct->table_size < c->dlct->table_size) - { - dma_addr_t phys; - tmp = c->lct; - c->lct = pci_alloc_consistent(c->pdev, c->dlct->table_size<<2, &phys); - if(!c->lct) - { - printk(KERN_ERR "%s: No memory for LCT!\n", c->name); - c->lct = tmp; - continue; - } - pci_free_consistent(tmp, c->lct->table_size << 2, c->lct, c->lct_phys); - c->lct_phys = phys; - } - memcpy(c->lct, c->dlct, c->dlct->table_size<<2); - } - - return 0; -} - -/** - * i2o_run_queue - process pending events on a controller - * @c: controller to process - * - * This is called by the bus specific driver layer when an interrupt - * or poll of this card interface is desired. - */ - -void i2o_run_queue(struct i2o_controller *c) -{ - struct i2o_message *m; - u32 mv; - u32 *msg; - - /* - * Old 960 steppings had a bug in the I2O unit that caused - * the queue to appear empty when it wasn't. - */ - if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF) - mv=I2O_REPLY_READ32(c); - - while(mv!=0xFFFFFFFF) - { - struct i2o_handler *i; - /* Map the message from the page frame map to kernel virtual */ - /* m=(struct i2o_message *)(mv - (unsigned long)c->page_frame_map + (unsigned long)c->page_frame); */ - m=(struct i2o_message *)bus_to_virt(mv); - msg=(u32*)m; - - /* - * Ensure this message is seen coherently but cachably by - * the processor - */ - - pci_dma_sync_single_for_cpu(c->pdev, c->page_frame_map, MSG_FRAME_SIZE, PCI_DMA_FROMDEVICE); - - /* - * Despatch it - */ - - i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)]; - if(i && i->reply) - i->reply(i,c,m); - else - { - printk(KERN_WARNING "I2O: Spurious reply to handler %d\n", - m->initiator_context&(MAX_I2O_MODULES-1)); - } - i2o_flush_reply(c,mv); - mb(); - - /* That 960 bug again... */ - if((mv=I2O_REPLY_READ32(c))==0xFFFFFFFF) - mv=I2O_REPLY_READ32(c); - } -} - - -/** - * i2o_get_class_name - do i2o class name lookup - * @class: class number - * - * Return a descriptive string for an i2o class - */ - -const char *i2o_get_class_name(int class) -{ - int idx = 16; - static char *i2o_class_name[] = { - "Executive", - "Device Driver Module", - "Block Device", - "Tape Device", - "LAN Interface", - "WAN Interface", - "Fibre Channel Port", - "Fibre Channel Device", - "SCSI Device", - "ATE Port", - "ATE Device", - "Floppy Controller", - "Floppy Device", - "Secondary Bus Port", - "Peer Transport Agent", - "Peer Transport", - "Unknown" - }; - - switch(class&0xFFF) - { - case I2O_CLASS_EXECUTIVE: - idx = 0; break; - case I2O_CLASS_DDM: - idx = 1; break; - case I2O_CLASS_RANDOM_BLOCK_STORAGE: - idx = 2; break; - case I2O_CLASS_SEQUENTIAL_STORAGE: - idx = 3; break; - case I2O_CLASS_LAN: - idx = 4; break; - case I2O_CLASS_WAN: - idx = 5; break; - case I2O_CLASS_FIBRE_CHANNEL_PORT: - idx = 6; break; - case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: - idx = 7; break; - case I2O_CLASS_SCSI_PERIPHERAL: - idx = 8; break; - case I2O_CLASS_ATE_PORT: - idx = 9; break; - case I2O_CLASS_ATE_PERIPHERAL: - idx = 10; break; - case I2O_CLASS_FLOPPY_CONTROLLER: - idx = 11; break; - case I2O_CLASS_FLOPPY_DEVICE: - idx = 12; break; - case I2O_CLASS_BUS_ADAPTER_PORT: - idx = 13; break; - case I2O_CLASS_PEER_TRANSPORT_AGENT: - idx = 14; break; - case I2O_CLASS_PEER_TRANSPORT: - idx = 15; break; - } - - return i2o_class_name[idx]; -} - - -/** - * i2o_wait_message - obtain an i2o message from the IOP - * @c: controller - * @why: explanation - * - * This function waits up to 5 seconds for a message slot to be - * available. If no message is available it prints an error message - * that is expected to be what the message will be used for (eg - * "get_status"). 0xFFFFFFFF is returned on a failure. - * - * On a success the message is returned. This is the physical page - * frame offset address from the read port. (See the i2o spec) - */ - -u32 i2o_wait_message(struct i2o_controller *c, char *why) -{ - long time=jiffies; - u32 m; - while((m=I2O_POST_READ32(c))==0xFFFFFFFF) - { - if((jiffies-time)>=5*HZ) - { - dprintk(KERN_ERR "%s: Timeout waiting for message frame to send %s.\n", - c->name, why); - return 0xFFFFFFFF; - } - schedule(); - barrier(); - } - return m; -} - -/** - * i2o_report_controller_unit - print information about a tid - * @c: controller - * @d: device - * - * Dump an information block associated with a given unit (TID). The - * tables are read and a block of text is output to printk that is - * formatted intended for the user. - */ - -void i2o_report_controller_unit(struct i2o_controller *c, struct i2o_device *d) -{ - char buf[64]; - char str[22]; - int ret; - int unit = d->lct_data.tid; - - if(verbose==0) - return; - - printk(KERN_INFO "Target ID %d.\n", unit); - if((ret=i2o_query_scalar(c, unit, 0xF100, 3, buf, 16))>=0) - { - buf[16]=0; - printk(KERN_INFO " Vendor: %s\n", buf); - } - if((ret=i2o_query_scalar(c, unit, 0xF100, 4, buf, 16))>=0) - { - buf[16]=0; - printk(KERN_INFO " Device: %s\n", buf); - } - if(i2o_query_scalar(c, unit, 0xF100, 5, buf, 16)>=0) - { - buf[16]=0; - printk(KERN_INFO " Description: %s\n", buf); - } - if((ret=i2o_query_scalar(c, unit, 0xF100, 6, buf, 8))>=0) - { - buf[8]=0; - printk(KERN_INFO " Rev: %s\n", buf); - } - - printk(KERN_INFO " Class: "); - sprintf(str, "%-21s", i2o_get_class_name(d->lct_data.class_id)); - printk("%s\n", str); - - printk(KERN_INFO " Subclass: 0x%04X\n", d->lct_data.sub_class); - printk(KERN_INFO " Flags: "); - - if(d->lct_data.device_flags&(1<<0)) - printk("C"); // ConfigDialog requested - if(d->lct_data.device_flags&(1<<1)) - printk("U"); // Multi-user capable - if(!(d->lct_data.device_flags&(1<<4))) - printk("P"); // Peer service enabled! - if(!(d->lct_data.device_flags&(1<<5))) - printk("M"); // Mgmt service enabled! - printk("\n"); - -} - - -/* - * Parse the hardware resource table. Right now we print it out - * and don't do a lot with it. We should collate these and then - * interact with the Linux resource allocation block. - * - * Lets prove we can read it first eh ? - * - * This is full of endianisms! - */ - -static int i2o_parse_hrt(struct i2o_controller *c) -{ -#ifdef DRIVERDEBUG - u32 *rows=(u32*)c->hrt; - u8 *p=(u8 *)c->hrt; - u8 *d; - int count; - int length; - int i; - int state; - - if(p[3]!=0) - { - printk(KERN_ERR "%s: HRT table for controller is too new a version.\n", - c->name); - return -1; - } - - count=p[0]|(p[1]<<8); - length = p[2]; - - printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n", - c->name, count, length<<2); - - rows+=2; - - for(i=0;i>=12; - if(state&(1<<0)) - printk("H"); /* Hidden */ - if(state&(1<<2)) - { - printk("P"); /* Present */ - if(state&(1<<1)) - printk("C"); /* Controlled */ - } - if(state>9) - printk("*"); /* Hard */ - - printk("]:"); - - switch(p[3]&0xFFFF) - { - case 0: - /* Adapter private bus - easy */ - printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", - p[2], d[1]<<8|d[0], *(u32 *)(d+4)); - break; - case 1: - /* ISA bus */ - printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", - p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4)); - break; - - case 2: /* EISA bus */ - printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X", - p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4)); - break; - - case 3: /* MCA bus */ - printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", - p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4)); - break; - - case 4: /* PCI bus */ - printk("PCI %d: Bus %d Device %d Function %d", - p[2], d[2], d[1], d[0]); - break; - - case 0x80: /* Other */ - default: - printk("Unsupported bus type."); - break; - } - printk("\n"); - rows+=length; - } -#endif - return 0; -} - -/* - * The logical configuration table tells us what we can talk to - * on the board. Most of the stuff isn't interesting to us. - */ - -static int i2o_parse_lct(struct i2o_controller *c) -{ - int i; - int max; - int tid; - struct i2o_device *d; - i2o_lct *lct = c->lct; - - if (lct == NULL) { - printk(KERN_ERR "%s: LCT is empty???\n", c->name); - return -1; - } - - max = lct->table_size; - max -= 3; - max /= 9; - - printk(KERN_INFO "%s: LCT has %d entries.\n", c->name, max); - - if(lct->iop_flags&(1<<0)) - printk(KERN_WARNING "%s: Configuration dialog desired.\n", c->name); - - for(i=0;icontroller = c; - d->next = NULL; - - memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); - - d->flags = 0; - tid = d->lct_data.tid; - - i2o_report_controller_unit(c, d); - - i2o_install_device(c, d); - } - return 0; -} - - -/** - * i2o_quiesce_controller - quiesce controller - * @c: controller - * - * Quiesce an IOP. Causes IOP to make external operation quiescent - * (i2o 'READY' state). Internal operation of the IOP continues normally. - */ - -int i2o_quiesce_controller(struct i2o_controller *c) -{ - u32 msg[4]; - int ret; - - i2o_status_get(c); - - /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ - - if ((c->status_block->iop_state != ADAPTER_STATE_READY) && - (c->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)) - { - return 0; - } - - msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID; - msg[3] = 0; - - /* Long timeout needed for quiesce if lots of devices */ - - if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240))) - printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n", - c->name, -ret); - else - dprintk(KERN_INFO "%s: Quiesced.\n", c->name); - - i2o_status_get(c); // Entered READY state - return ret; -} - -/** - * i2o_enable_controller - move controller from ready to operational - * @c: controller - * - * Enable IOP. This allows the IOP to resume external operations and - * reverses the effect of a quiesce. In the event of an error a negative - * errno code is returned. - */ - -int i2o_enable_controller(struct i2o_controller *c) -{ - u32 msg[4]; - int ret; - - i2o_status_get(c); - - /* Enable only allowed on READY state */ - if(c->status_block->iop_state != ADAPTER_STATE_READY) - return -EINVAL; - - msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID; - - /* How long of a timeout do we need? */ - - if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240))) - printk(KERN_ERR "%s: Could not enable (status=%#x).\n", - c->name, -ret); - else - dprintk(KERN_INFO "%s: Enabled.\n", c->name); - - i2o_status_get(c); // entered OPERATIONAL state - - return ret; -} - -/** - * i2o_clear_controller - clear a controller - * @c: controller - * - * Clear an IOP to HOLD state, ie. terminate external operations, clear all - * input queues and prepare for a system restart. IOP's internal operation - * continues normally and the outbound queue is alive. - * The IOP is not expected to rebuild its LCT. - */ - -int i2o_clear_controller(struct i2o_controller *c) -{ - struct i2o_controller *iop; - u32 msg[4]; - int ret; - - /* Quiesce all IOPs first */ - - for (iop = i2o_controller_chain; iop; iop = iop->next) - i2o_quiesce_controller(iop); - - msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID; - msg[3]=0; - - if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30))) - printk(KERN_INFO "%s: Unable to clear (status=%#x).\n", - c->name, -ret); - else - dprintk(KERN_INFO "%s: Cleared.\n",c->name); - - i2o_status_get(c); - - /* Enable other IOPs */ - - for (iop = i2o_controller_chain; iop; iop = iop->next) - if (iop != c) - i2o_enable_controller(iop); - - return ret; -} - - -/** - * i2o_reset_controller - reset an IOP - * @c: controller to reset - * - * Reset the IOP into INIT state and wait until IOP gets into RESET state. - * Terminate all external operations, clear IOP's inbound and outbound - * queues, terminate all DDMs, and reload the IOP's operating environment - * and all local DDMs. The IOP rebuilds its LCT. - */ - -static int i2o_reset_controller(struct i2o_controller *c) -{ - struct i2o_controller *iop; - u32 m; - u8 *status; - dma_addr_t status_phys; - u32 *msg; - long time; - - /* Quiesce all IOPs first */ - - for (iop = i2o_controller_chain; iop; iop = iop->next) - { - if(!iop->dpt) - i2o_quiesce_controller(iop); - } - - m=i2o_wait_message(c, "AdapterReset"); - if(m==0xFFFFFFFF) - return -ETIMEDOUT; - msg=(u32 *)(c->msg_virt+m); - - status = pci_alloc_consistent(c->pdev, 4, &status_phys); - if(status == NULL) { - printk(KERN_ERR "IOP reset failed - no free memory.\n"); - return -ENOMEM; - } - memset(status, 0, 4); - - msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; - msg[2]=core_context; - msg[3]=0; - msg[4]=0; - msg[5]=0; - msg[6]=status_phys; - msg[7]=0; /* 64bit host FIXME */ - - i2o_post_message(c,m); - - /* Wait for a reply */ - time=jiffies; - while(*status==0) - { - if((jiffies-time)>=20*HZ) - { - printk(KERN_ERR "IOP reset timeout.\n"); - /* The controller still may respond and overwrite - * status_phys, LEAK it to prevent memory corruption. - */ - return -ETIMEDOUT; - } - schedule(); - barrier(); - } - - if (*status==I2O_CMD_IN_PROGRESS) - { - /* - * Once the reset is sent, the IOP goes into the INIT state - * which is indeterminate. We need to wait until the IOP - * has rebooted before we can let the system talk to - * it. We read the inbound Free_List until a message is - * available. If we can't read one in the given ammount of - * time, we assume the IOP could not reboot properly. - */ - - dprintk(KERN_INFO "%s: Reset in progress, waiting for reboot...\n", - c->name); - - time = jiffies; - m = I2O_POST_READ32(c); - while(m == 0XFFFFFFFF) - { - if((jiffies-time) >= 30*HZ) - { - printk(KERN_ERR "%s: Timeout waiting for IOP reset.\n", - c->name); - /* The controller still may respond and - * overwrite status_phys, LEAK it to prevent - * memory corruption. - */ - return -ETIMEDOUT; - } - schedule(); - barrier(); - m = I2O_POST_READ32(c); - } - i2o_flush_reply(c,m); - } - - /* If IopReset was rejected or didn't perform reset, try IopClear */ - - i2o_status_get(c); - if (status[0] == I2O_CMD_REJECTED || - c->status_block->iop_state != ADAPTER_STATE_RESET) - { - printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name); - i2o_clear_controller(c); - } - else - dprintk(KERN_INFO "%s: Reset completed.\n", c->name); - - /* Enable other IOPs */ - - for (iop = i2o_controller_chain; iop; iop = iop->next) - if (iop != c) - i2o_enable_controller(iop); - - pci_free_consistent(c->pdev, 4, status, status_phys); - return 0; -} - - -/** - * i2o_status_get - get the status block for the IOP - * @c: controller - * - * Issue a status query on the controller. This updates the - * attached status_block. If the controller fails to reply or an - * error occurs then a negative errno code is returned. On success - * zero is returned and the status_blok is updated. - */ - -int i2o_status_get(struct i2o_controller *c) -{ - long time; - u32 m; - u32 *msg; - u8 *status_block; - - if (c->status_block == NULL) - { - c->status_block = (i2o_status_block *) - pci_alloc_consistent(c->pdev, sizeof(i2o_status_block), &c->status_block_phys); - if (c->status_block == NULL) - { - printk(KERN_CRIT "%s: Get Status Block failed; Out of memory.\n", - c->name); - return -ENOMEM; - } - } - - status_block = (u8*)c->status_block; - memset(c->status_block,0,sizeof(i2o_status_block)); - - m=i2o_wait_message(c, "StatusGet"); - if(m==0xFFFFFFFF) - return -ETIMEDOUT; - msg=(u32 *)(c->msg_virt+m); - - msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0; - msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID; - msg[2]=core_context; - msg[3]=0; - msg[4]=0; - msg[5]=0; - msg[6]=c->status_block_phys; - msg[7]=0; /* 64bit host FIXME */ - msg[8]=sizeof(i2o_status_block); /* always 88 bytes */ - - i2o_post_message(c,m); - - /* Wait for a reply */ - - time=jiffies; - while(status_block[87]!=0xFF) - { - if((jiffies-time)>=5*HZ) - { - printk(KERN_ERR "%s: Get status timeout.\n",c->name); - return -ETIMEDOUT; - } - yield(); - barrier(); - } - -#ifdef DRIVERDEBUG - printk(KERN_INFO "%s: State = ", c->name); - switch (c->status_block->iop_state) { - case 0x01: - printk("INIT\n"); - break; - case 0x02: - printk("RESET\n"); - break; - case 0x04: - printk("HOLD\n"); - break; - case 0x05: - printk("READY\n"); - break; - case 0x08: - printk("OPERATIONAL\n"); - break; - case 0x10: - printk("FAILED\n"); - break; - case 0x11: - printk("FAULTED\n"); - break; - default: - printk("%x (unknown !!)\n",c->status_block->iop_state); -} -#endif - - return 0; -} - -/* - * Get the Hardware Resource Table for the device. - * The HRT contains information about possible hidden devices - * but is mostly useless to us - */ -int i2o_hrt_get(struct i2o_controller *c) -{ - u32 msg[6]; - int ret, size = sizeof(i2o_hrt); - int loops = 3; /* we only try 3 times to get the HRT, this should be - more then enough. Worst case should be 2 times.*/ - - /* First read just the header to figure out the real size */ - - do { - /* first we allocate the memory for the HRT */ - if (c->hrt == NULL) { - c->hrt=pci_alloc_consistent(c->pdev, size, &c->hrt_phys); - if (c->hrt == NULL) { - printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", c->name); - return -ENOMEM; - } - c->hrt_len = size; - } - - msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4; - msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[3]= 0; - msg[4]= (0xD0000000 | c->hrt_len); /* Simple transaction */ - msg[5]= c->hrt_phys; /* Dump it here */ - - ret = i2o_post_wait_mem(c, msg, sizeof(msg), 20, c->hrt, NULL, c->hrt_phys, 0, c->hrt_len, 0); - - if(ret == -ETIMEDOUT) - { - /* The HRT block we used is in limbo somewhere. When the iop wakes up - we will recover it */ - c->hrt = NULL; - c->hrt_len = 0; - return ret; - } - - if(ret<0) - { - printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n", - c->name, -ret); - return ret; - } - - if (c->hrt->num_entries * c->hrt->entry_len << 2 > c->hrt_len) { - size = c->hrt->num_entries * c->hrt->entry_len << 2; - pci_free_consistent(c->pdev, c->hrt_len, c->hrt, c->hrt_phys); - c->hrt_len = 0; - c->hrt = NULL; - } - loops --; - } while (c->hrt == NULL && loops > 0); - - if(c->hrt == NULL) - { - printk(KERN_ERR "%s: Unable to get HRT after three tries, giving up\n", c->name); - return -1; - } - - i2o_parse_hrt(c); // just for debugging - - return 0; -} - -/* - * Send the I2O System Table to the specified IOP - * - * The system table contains information about all the IOPs in the - * system. It is build and then sent to each IOP so that IOPs can - * establish connections between each other. - * - */ -static int i2o_systab_send(struct i2o_controller *iop) -{ - u32 msg[12]; - dma_addr_t sys_tbl_phys; - int ret; - struct resource *root; - u32 *privbuf = kmalloc(16, GFP_KERNEL); - if(privbuf == NULL) - return -ENOMEM; - - - if(iop->status_block->current_mem_size < iop->status_block->desired_mem_size) - { - struct resource *res = &iop->mem_resource; - res->name = iop->pdev->bus->name; - res->flags = IORESOURCE_MEM; - res->start = 0; - res->end = 0; - printk("%s: requires private memory resources.\n", iop->name); - root = pci_find_parent_resource(iop->pdev, res); - if(root==NULL) - printk("Can't find parent resource!\n"); - if(root && allocate_resource(root, res, - iop->status_block->desired_mem_size, - iop->status_block->desired_mem_size, - iop->status_block->desired_mem_size, - 1<<20, /* Unspecified, so use 1Mb and play safe */ - NULL, - NULL)>=0) - { - iop->mem_alloc = 1; - iop->status_block->current_mem_size = 1 + res->end - res->start; - iop->status_block->current_mem_base = res->start; - printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n", - iop->name, 1+res->end-res->start, res->start); - } - } - if(iop->status_block->current_io_size < iop->status_block->desired_io_size) - { - struct resource *res = &iop->io_resource; - res->name = iop->pdev->bus->name; - res->flags = IORESOURCE_IO; - res->start = 0; - res->end = 0; - printk("%s: requires private memory resources.\n", iop->name); - root = pci_find_parent_resource(iop->pdev, res); - if(root==NULL) - printk("Can't find parent resource!\n"); - if(root && allocate_resource(root, res, - iop->status_block->desired_io_size, - iop->status_block->desired_io_size, - iop->status_block->desired_io_size, - 1<<20, /* Unspecified, so use 1Mb and play safe */ - NULL, - NULL)>=0) - { - iop->io_alloc = 1; - iop->status_block->current_io_size = 1 + res->end - res->start; - iop->status_block->current_mem_base = res->start; - printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n", - iop->name, 1+res->end-res->start, res->start); - } - } - else - { - privbuf[0] = iop->status_block->current_mem_base; - privbuf[1] = iop->status_block->current_mem_size; - privbuf[2] = iop->status_block->current_io_base; - privbuf[3] = iop->status_block->current_io_size; - } - - msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6; - msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[3] = 0; - msg[4] = (0<<16) | ((iop->unit+2) ); /* Host 0 IOP ID (unit + 2) */ - msg[5] = 0; /* Segment 0 */ - - /* - * Provide three SGL-elements: - * System table (SysTab), Private memory space declaration and - * Private i/o space declaration - * - * Nasty one here. We can't use pci_alloc_consistent to send the - * same table to everyone. We have to go remap it for them all - */ - - sys_tbl_phys = pci_map_single(iop->pdev, sys_tbl, sys_tbl_len, PCI_DMA_TODEVICE); - msg[6] = 0x54000000 | sys_tbl_phys; - - msg[7] = sys_tbl_phys; - msg[8] = 0x54000000 | privbuf[1]; - msg[9] = privbuf[0]; - msg[10] = 0xD4000000 | privbuf[3]; - msg[11] = privbuf[2]; - - ret=i2o_post_wait(iop, msg, sizeof(msg), 120); - - pci_unmap_single(iop->pdev, sys_tbl_phys, sys_tbl_len, PCI_DMA_TODEVICE); - - if(ret==-ETIMEDOUT) - { - printk(KERN_ERR "%s: SysTab setup timed out.\n", iop->name); - } - else if(ret<0) - { - printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n", - iop->name, -ret); - } - else - { - dprintk(KERN_INFO "%s: SysTab set.\n", iop->name); - } - i2o_status_get(iop); // Entered READY state - - kfree(privbuf); - return ret; - - } - -/* - * Initialize I2O subsystem. - */ -void __init i2o_sys_init(void) -{ - struct i2o_controller *iop, *niop = NULL; - - printk(KERN_INFO "Activating I2O controllers...\n"); - printk(KERN_INFO "This may take a few minutes if there are many devices\n"); - - /* In INIT state, Activate IOPs */ - for (iop = i2o_controller_chain; iop; iop = niop) { - dprintk(KERN_INFO "Calling i2o_activate_controller for %s...\n", - iop->name); - niop = iop->next; - if (i2o_activate_controller(iop) < 0) - i2o_delete_controller(iop); - } - - /* Active IOPs in HOLD state */ - -rebuild_sys_tab: - if (i2o_controller_chain == NULL) - return; - - /* - * If build_sys_table fails, we kill everything and bail - * as we can't init the IOPs w/o a system table - */ - dprintk(KERN_INFO "i2o_core: Calling i2o_build_sys_table...\n"); - if (i2o_build_sys_table() < 0) { - i2o_sys_shutdown(); - return; - } - - /* If IOP don't get online, we need to rebuild the System table */ - for (iop = i2o_controller_chain; iop; iop = niop) { - niop = iop->next; - dprintk(KERN_INFO "Calling i2o_online_controller for %s...\n", iop->name); - if (i2o_online_controller(iop) < 0) { - i2o_delete_controller(iop); - goto rebuild_sys_tab; - } - } - - /* Active IOPs now in OPERATIONAL state */ - - /* - * Register for status updates from all IOPs - */ - for(iop = i2o_controller_chain; iop; iop=iop->next) { - - /* Create a kernel thread to deal with dynamic LCT updates */ - iop->lct_pid = kernel_thread(i2o_dyn_lct, iop, CLONE_SIGHAND); - - /* Update change ind on DLCT */ - iop->dlct->change_ind = iop->lct->change_ind; - - /* Start dynamic LCT updates */ - i2o_lct_notify(iop); - - /* Register for all events from IRTOS */ - i2o_event_register(iop, core_context, 0, 0, 0xFFFFFFFF); - } -} - -/** - * i2o_sys_shutdown - shutdown I2O system - * - * Bring down each i2o controller and then return. Each controller - * is taken through an orderly shutdown - */ - -static void i2o_sys_shutdown(void) -{ - struct i2o_controller *iop, *niop; - - /* Delete all IOPs from the controller chain */ - /* that will reset all IOPs too */ - - for (iop = i2o_controller_chain; iop; iop = niop) { - niop = iop->next; - i2o_delete_controller(iop); - } -} - -/** - * i2o_activate_controller - bring controller up to HOLD - * @iop: controller - * - * This function brings an I2O controller into HOLD state. The adapter - * is reset if necessary and then the queues and resource table - * are read. -1 is returned on a failure, 0 on success. - * - */ - -int i2o_activate_controller(struct i2o_controller *iop) -{ - /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ - /* In READY state, Get status */ - - if (i2o_status_get(iop) < 0) { - printk(KERN_INFO "Unable to obtain status of %s, " - "attempting a reset.\n", iop->name); - if (i2o_reset_controller(iop) < 0) - return -1; - } - - if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) { - printk(KERN_CRIT "%s: hardware fault\n", iop->name); - return -1; - } - - if (iop->status_block->i2o_version > I2OVER15) { - printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O Specification.\n", - iop->name); - return -1; - } - - if (iop->status_block->iop_state == ADAPTER_STATE_READY || - iop->status_block->iop_state == ADAPTER_STATE_OPERATIONAL || - iop->status_block->iop_state == ADAPTER_STATE_HOLD || - iop->status_block->iop_state == ADAPTER_STATE_FAILED) - { - dprintk(KERN_INFO "%s: Already running, trying to reset...\n", - iop->name); - if (i2o_reset_controller(iop) < 0) - return -1; - } - - if (i2o_init_outbound_q(iop) < 0) - return -1; - - if (i2o_post_outbound_messages(iop)) - return -1; - - /* In HOLD state */ - - if (i2o_hrt_get(iop) < 0) - return -1; - - return 0; -} - - -/** - * i2o_init_outbound_queue - setup the outbound queue - * @c: controller - * - * Clear and (re)initialize IOP's outbound queue. Returns 0 on - * success or a negative errno code on a failure. - */ - -int i2o_init_outbound_q(struct i2o_controller *c) -{ - u8 *status; - dma_addr_t status_phys; - u32 m; - u32 *msg; - u32 time; - - dprintk(KERN_INFO "%s: Initializing Outbound Queue...\n", c->name); - m=i2o_wait_message(c, "OutboundInit"); - if(m==0xFFFFFFFF) - return -ETIMEDOUT; - msg=(u32 *)(c->msg_virt+m); - - status = pci_alloc_consistent(c->pdev, 4, &status_phys); - if (status==NULL) { - printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n", - c->name); - return -ENOMEM; - } - memset(status, 0, 4); - - msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6; - msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2]= core_context; - msg[3]= 0x0106; /* Transaction context */ - msg[4]= 4096; /* Host page frame size */ - /* Frame size is in words. 256 bytes a frame for now */ - msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size in words and Initcode */ - msg[6]= 0xD0000004; /* Simple SG LE, EOB */ - msg[7]= status_phys; - - i2o_post_message(c,m); - - barrier(); - time=jiffies; - while(status[0] < I2O_CMD_REJECTED) - { - if((jiffies-time)>=30*HZ) - { - if(status[0]==0x00) - printk(KERN_ERR "%s: Ignored queue initialize request.\n", - c->name); - else - printk(KERN_ERR "%s: Outbound queue initialize timeout.\n", - c->name); - pci_free_consistent(c->pdev, 4, status, status_phys); - return -ETIMEDOUT; - } - yield(); - barrier(); - } - - if(status[0] != I2O_CMD_COMPLETED) - { - printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name); - pci_free_consistent(c->pdev, 4, status, status_phys); - return -ETIMEDOUT; - } - pci_free_consistent(c->pdev, 4, status, status_phys); - return 0; -} - -/** - * i2o_post_outbound_messages - fill message queue - * @c: controller - * - * Allocate a message frame and load the messages into the IOP. The - * function returns zero on success or a negative errno code on - * failure. - */ - -int i2o_post_outbound_messages(struct i2o_controller *c) -{ - int i; - u32 m; - /* Alloc space for IOP's outbound queue message frames */ - - c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL); - if(c->page_frame==NULL) { - printk(KERN_ERR "%s: Outbound Q initialize failed; out of memory.\n", - c->name); - return -ENOMEM; - } - - c->page_frame_map = pci_map_single(c->pdev, c->page_frame, MSG_POOL_SIZE, PCI_DMA_FROMDEVICE); - - if(c->page_frame_map == 0) - { - kfree(c->page_frame); - printk(KERN_ERR "%s: Unable to map outbound queue.\n", c->name); - return -ENOMEM; - } - - m = c->page_frame_map; - - /* Post frames */ - - for(i=0; i< NMBR_MSG_FRAMES; i++) { - I2O_REPLY_WRITE32(c,m); - mb(); - m += (MSG_FRAME_SIZE << 2); - } - - return 0; -} - -/* - * Get the IOP's Logical Configuration Table - */ -int i2o_lct_get(struct i2o_controller *c) -{ - u32 msg[8]; - int ret, size = c->status_block->expected_lct_size; - - do { - if (c->lct == NULL) { - c->lct = pci_alloc_consistent(c->pdev, size, &c->lct_phys); - if(c->lct == NULL) { - printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", - c->name); - return -ENOMEM; - } - } - memset(c->lct, 0, size); - - msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6; - msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID; - /* msg[2] filled in i2o_post_wait */ - msg[3] = 0; - msg[4] = 0xFFFFFFFF; /* All devices */ - msg[5] = 0x00000000; /* Report now */ - msg[6] = 0xD0000000|size; - msg[7] = c->lct_phys; - - ret=i2o_post_wait_mem(c, msg, sizeof(msg), 120, c->lct, NULL, c->lct_phys, 0, size, 0); - - if(ret == -ETIMEDOUT) - { - c->lct = NULL; - return ret; - } - - if(ret<0) - { - printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n", - c->name, -ret); - return ret; - } - - if (c->lct->table_size << 2 > size) { - int new_size = c->lct->table_size << 2; - pci_free_consistent(c->pdev, size, c->lct, c->lct_phys); - size = new_size; - c->lct = NULL; - } - } while (c->lct == NULL); - - if ((ret=i2o_parse_lct(c)) < 0) - return ret; - - return 0; -} - -/* - * Like above, but used for async notification. The main - * difference is that we keep track of the CurrentChangeIndiicator - * so that we only get updates when it actually changes. - * - */ -int i2o_lct_notify(struct i2o_controller *c) -{ - u32 msg[8]; - - msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6; - msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID; - msg[2] = core_context; - msg[3] = 0xDEADBEEF; - msg[4] = 0xFFFFFFFF; /* All devices */ - msg[5] = c->dlct->change_ind+1; /* Next change */ - msg[6] = 0xD0000000|8192; - msg[7] = c->dlct_phys; - - return i2o_post_this(c, msg, sizeof(msg)); -} - -/* - * Bring a controller online into OPERATIONAL state. - */ - -int i2o_online_controller(struct i2o_controller *iop) -{ - u32 v; - - if (i2o_systab_send(iop) < 0) - return -1; - - /* In READY state */ - - dprintk(KERN_INFO "%s: Attempting to enable...\n", iop->name); - if (i2o_enable_controller(iop) < 0) - return -1; - - /* In OPERATIONAL state */ - - dprintk(KERN_INFO "%s: Attempting to get/parse lct...\n", iop->name); - if (i2o_lct_get(iop) < 0) - return -1; - - /* Check battery status */ - - iop->battery = 0; - if(i2o_query_scalar(iop, ADAPTER_TID, 0x0000, 4, &v, 4)>=0) - { - if(v&16) - iop->battery = 1; - } - - return 0; -} - -/* - * Build system table - * - * The system table contains information about all the IOPs in the - * system (duh) and is used by the Executives on the IOPs to establish - * peer2peer connections. We're not supporting peer2peer at the moment, - * but this will be needed down the road for things like lan2lan forwarding. - */ -static int i2o_build_sys_table(void) -{ - struct i2o_controller *iop = NULL; - struct i2o_controller *niop = NULL; - int count = 0; - - sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs - (i2o_num_controllers) * - sizeof(struct i2o_sys_tbl_entry); - - if(sys_tbl) - kfree(sys_tbl); - - sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL); - if(!sys_tbl) { - printk(KERN_CRIT "SysTab Set failed. Out of memory.\n"); - return -ENOMEM; - } - memset((void*)sys_tbl, 0, sys_tbl_len); - - sys_tbl->num_entries = i2o_num_controllers; - sys_tbl->version = I2OVERSION; /* TODO: Version 2.0 */ - sys_tbl->change_ind = sys_tbl_ind++; - - for(iop = i2o_controller_chain; iop; iop = niop) - { - niop = iop->next; - - /* - * Get updated IOP state so we have the latest information - * - * We should delete the controller at this point if it - * doesn't respond since if it's not on the system table - * it is techninically not part of the I2O subsyßtem... - */ - if(i2o_status_get(iop)) { - printk(KERN_ERR "%s: Deleting b/c could not get status while" - "attempting to build system table\n", iop->name); - i2o_delete_controller(iop); - sys_tbl->num_entries--; - continue; // try the next one - } - - sys_tbl->iops[count].org_id = iop->status_block->org_id; - sys_tbl->iops[count].iop_id = iop->unit + 2; - sys_tbl->iops[count].seg_num = 0; - sys_tbl->iops[count].i2o_version = - iop->status_block->i2o_version; - sys_tbl->iops[count].iop_state = - iop->status_block->iop_state; - sys_tbl->iops[count].msg_type = - iop->status_block->msg_type; - sys_tbl->iops[count].frame_size = - iop->status_block->inbound_frame_size; - sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? - sys_tbl->iops[count].iop_capabilities = - iop->status_block->iop_capabilities; - sys_tbl->iops[count].inbound_low = (u32)iop->post_port; - sys_tbl->iops[count].inbound_high = 0; // FIXME: 64-bit support - - count++; - } - -#ifdef DRIVERDEBUG -{ - u32 *table; - table = (u32*)sys_tbl; - for(count = 0; count < (sys_tbl_len >>2); count++) - printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]); -} -#endif - - return 0; -} - - -/* - * Run time support routines - */ - -/* - * Generic "post and forget" helpers. This is less efficient - we do - * a memcpy for example that isnt strictly needed, but for most uses - * this is simply not worth optimising - */ - -int i2o_post_this(struct i2o_controller *c, u32 *data, int len) -{ - u32 m; - u32 *msg; - unsigned long t=jiffies; - - do - { - mb(); - m = I2O_POST_READ32(c); - } - while(m==0xFFFFFFFF && (jiffies-t)name); - return -ETIMEDOUT; - } - msg = (u32 *)(c->msg_virt + m); - memcpy_toio(msg, data, len); - i2o_post_message(c,m); - return 0; -} - -/** - * i2o_post_wait_mem - I2O query/reply with DMA buffers - * @c: controller - * @msg: message to send - * @len: length of message - * @timeout: time in seconds to wait - * @mem1: attached memory buffer 1 - * @mem2: attached memory buffer 2 - * @phys1: physical address of buffer 1 - * @phys2: physical address of buffer 2 - * @size1: size of buffer 1 - * @size2: size of buffer 2 - * - * This core API allows an OSM to post a message and then be told whether - * or not the system received a successful reply. - * - * If the message times out then the value '-ETIMEDOUT' is returned. This - * is a special case. In this situation the message may (should) complete - * at an indefinite time in the future. When it completes it will use the - * memory buffers attached to the request. If -ETIMEDOUT is returned then - * the memory buffers must not be freed. Instead the event completion will - * free them for you. In all other cases the buffers are your problem. - * - * Pass NULL for unneeded buffers. - */ - -int i2o_post_wait_mem(struct i2o_controller *c, u32 *msg, int len, int timeout, void *mem1, void *mem2, dma_addr_t phys1, dma_addr_t phys2, int size1, int size2) -{ - DECLARE_WAIT_QUEUE_HEAD(wq_i2o_post); - DECLARE_WAITQUEUE(wait, current); - int complete = 0; - int status; - unsigned long flags = 0; - struct i2o_post_wait_data *wait_data = - kmalloc(sizeof(struct i2o_post_wait_data), GFP_KERNEL); - - if(!wait_data) - return -ENOMEM; - - /* - * Create a new notification object - */ - wait_data->status = &status; - wait_data->complete = &complete; - wait_data->mem[0] = mem1; - wait_data->mem[1] = mem2; - wait_data->phys[0] = phys1; - wait_data->phys[1] = phys2; - wait_data->size[0] = size1; - wait_data->size[1] = size2; - - /* - * Queue the event with its unique id - */ - spin_lock_irqsave(&post_wait_lock, flags); - - wait_data->next = post_wait_queue; - post_wait_queue = wait_data; - wait_data->id = (++post_wait_id) & 0x7fff; - wait_data->wq = &wq_i2o_post; - - spin_unlock_irqrestore(&post_wait_lock, flags); - - /* - * Fill in the message id - */ - - msg[2] = 0x80000000|(u32)core_context|((u32)wait_data->id<<16); - - /* - * Post the message to the controller. At some point later it - * will return. If we time out before it returns then - * complete will be zero. From the point post_this returns - * the wait_data may have been deleted. - */ - - add_wait_queue(&wq_i2o_post, &wait); - set_current_state(TASK_INTERRUPTIBLE); - if ((status = i2o_post_this(c, msg, len))==0) { - schedule_timeout(HZ * timeout); - } - else - { - remove_wait_queue(&wq_i2o_post, &wait); - return -EIO; - } - remove_wait_queue(&wq_i2o_post, &wait); - - if(signal_pending(current)) - status = -EINTR; - - spin_lock_irqsave(&post_wait_lock, flags); - barrier(); /* Be sure we see complete as it is locked */ - if(!complete) - { - /* - * Mark the entry dead. We cannot remove it. This is important. - * When it does terminate (which it must do if the controller hasnt - * died..) then it will otherwise scribble on stuff. - * !complete lets us safely check if the entry is still - * allocated and thus we can write into it - */ - wait_data->wq = NULL; - status = -ETIMEDOUT; - } - else - { - /* Debugging check - remove me soon */ - if(status == -ETIMEDOUT) - { - printk("TIMEDOUT BUG!\n"); - status = -EIO; - } - } - /* And the wait_data is not leaked either! */ - spin_unlock_irqrestore(&post_wait_lock, flags); - return status; -} - -/** - * i2o_post_wait - I2O query/reply - * @c: controller - * @msg: message to send - * @len: length of message - * @timeout: time in seconds to wait - * - * This core API allows an OSM to post a message and then be told whether - * or not the system received a successful reply. - */ - -int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout) -{ - return i2o_post_wait_mem(c, msg, len, timeout, NULL, NULL, 0, 0, 0, 0); -} - -/* - * i2o_post_wait is completed and we want to wake up the - * sleeping proccess. Called by core's reply handler. - */ - -static void i2o_post_wait_complete(struct i2o_controller *c, u32 context, int status) -{ - struct i2o_post_wait_data **p1, *q; - unsigned long flags; - - /* - * We need to search through the post_wait - * queue to see if the given message is still - * outstanding. If not, it means that the IOP - * took longer to respond to the message than we - * had allowed and timer has already expired. - * Not much we can do about that except log - * it for debug purposes, increase timeout, and recompile - * - * Lock needed to keep anyone from moving queue pointers - * around while we're looking through them. - */ - - spin_lock_irqsave(&post_wait_lock, flags); - - for(p1 = &post_wait_queue; *p1!=NULL; p1 = &((*p1)->next)) - { - q = (*p1); - if(q->id == ((context >> 16) & 0x7fff)) { - /* - * Delete it - */ - - *p1 = q->next; - - /* - * Live or dead ? - */ - - if(q->wq) - { - /* Live entry - wakeup and set status */ - *q->status = status; - *q->complete = 1; - wake_up(q->wq); - } - else - { - /* - * Free resources. Caller is dead - */ - - if(q->mem[0]) - pci_free_consistent(c->pdev, q->size[0], q->mem[0], q->phys[0]); - if(q->mem[1]) - pci_free_consistent(c->pdev, q->size[1], q->mem[1], q->phys[1]); - - printk(KERN_WARNING "i2o_post_wait event completed after timeout.\n"); - } - kfree(q); - spin_unlock(&post_wait_lock); - return; - } - } - spin_unlock(&post_wait_lock); - - printk(KERN_DEBUG "i2o_post_wait: Bogus reply!\n"); -} - -/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET - * - * This function can be used for all UtilParamsGet/Set operations. - * The OperationList is given in oplist-buffer, - * and results are returned in reslist-buffer. - * Note that the minimum sized reslist is 8 bytes and contains - * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. - */ - -int i2o_issue_params(int cmd, struct i2o_controller *iop, int tid, - void *oplist, int oplen, void *reslist, int reslen) -{ - u32 msg[9]; - u32 *res32 = (u32*)reslist; - u32 *restmp = (u32*)reslist; - int len = 0; - int i = 0; - int wait_status; - u32 *opmem, *resmem; - dma_addr_t opmem_phys, resmem_phys; - - /* Get DMAable memory */ - opmem = pci_alloc_consistent(iop->pdev, oplen, &opmem_phys); - if(opmem == NULL) - return -ENOMEM; - memcpy(opmem, oplist, oplen); - - resmem = pci_alloc_consistent(iop->pdev, reslen, &resmem_phys); - if(resmem == NULL) - { - pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys); - return -ENOMEM; - } - - msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; - msg[1] = cmd << 24 | HOST_TID << 12 | tid; - msg[3] = 0; - msg[4] = 0; - msg[5] = 0x54000000 | oplen; /* OperationList */ - msg[6] = opmem_phys; - msg[7] = 0xD0000000 | reslen; /* ResultList */ - msg[8] = resmem_phys; - - wait_status = i2o_post_wait_mem(iop, msg, sizeof(msg), 10, opmem, resmem, opmem_phys, resmem_phys, oplen, reslen); - - /* - * This only looks like a memory leak - don't "fix" it. - */ - if(wait_status == -ETIMEDOUT) - return wait_status; - - memcpy(reslist, resmem, reslen); - pci_free_consistent(iop->pdev, reslen, resmem, resmem_phys); - pci_free_consistent(iop->pdev, oplen, opmem, opmem_phys); - - /* Query failed */ - if(wait_status != 0) - return wait_status; - /* - * Calculate number of bytes of Result LIST - * We need to loop through each Result BLOCK and grab the length - */ - restmp = res32 + 1; - len = 1; - for(i = 0; i < (res32[0]&0X0000FFFF); i++) - { - if(restmp[0]&0x00FF0000) /* BlockStatus != SUCCESS */ - { - printk(KERN_WARNING "%s - Error:\n ErrorInfoSize = 0x%02x, " - "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", - (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" - : "PARAMS_GET", - res32[1]>>24, (res32[1]>>16)&0xFF, res32[1]&0xFFFF); - - /* - * If this is the only request,than we return an error - */ - if((res32[0]&0x0000FFFF) == 1) - { - return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */ - } - } - len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */ - restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */ - } - return (len << 2); /* bytes used by result list */ -} - -/* - * Query one scalar group value or a whole scalar group. - */ -int i2o_query_scalar(struct i2o_controller *iop, int tid, - int group, int field, void *buf, int buflen) -{ - u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; - u8 resblk[8+buflen]; /* 8 bytes for header */ - int size; - - if (field == -1) /* whole group */ - opblk[4] = -1; - - size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, iop, tid, - opblk, sizeof(opblk), resblk, sizeof(resblk)); - - memcpy(buf, resblk+8, buflen); /* cut off header */ - - if(size>buflen) - return buflen; - return size; -} - -/* - * Set a scalar group value or a whole group. - */ -int i2o_set_scalar(struct i2o_controller *iop, int tid, - int group, int field, void *buf, int buflen) -{ - u16 *opblk; - u8 resblk[8+buflen]; /* 8 bytes for header */ - int size; - - opblk = kmalloc(buflen+64, GFP_KERNEL); - if (opblk == NULL) - { - printk(KERN_ERR "i2o: no memory for operation buffer.\n"); - return -ENOMEM; - } - - opblk[0] = 1; /* operation count */ - opblk[1] = 0; /* pad */ - opblk[2] = I2O_PARAMS_FIELD_SET; - opblk[3] = group; - - if(field == -1) { /* whole group */ - opblk[4] = -1; - memcpy(opblk+5, buf, buflen); - } - else /* single field */ - { - opblk[4] = 1; - opblk[5] = field; - memcpy(opblk+6, buf, buflen); - } - - size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid, - opblk, 12+buflen, resblk, sizeof(resblk)); - - kfree(opblk); - if(size>buflen) - return buflen; - return size; -} - -/* - * if oper == I2O_PARAMS_TABLE_GET, get from all rows - * if fieldcount == -1 return all fields - * ibuf and ibuflen are unused (use NULL, 0) - * else return specific fields - * ibuf contains fieldindexes - * - * if oper == I2O_PARAMS_LIST_GET, get from specific rows - * if fieldcount == -1 return all fields - * ibuf contains rowcount, keyvalues - * else return specific fields - * fieldcount is # of fieldindexes - * ibuf contains fieldindexes, rowcount, keyvalues - * - * You could also use directly function i2o_issue_params(). - */ -int i2o_query_table(int oper, struct i2o_controller *iop, int tid, int group, - int fieldcount, void *ibuf, int ibuflen, - void *resblk, int reslen) -{ - u16 *opblk; - int size; - - opblk = kmalloc(10 + ibuflen, GFP_KERNEL); - if (opblk == NULL) - { - printk(KERN_ERR "i2o: no memory for query buffer.\n"); - return -ENOMEM; - } - - opblk[0] = 1; /* operation count */ - opblk[1] = 0; /* pad */ - opblk[2] = oper; - opblk[3] = group; - opblk[4] = fieldcount; - memcpy(opblk+5, ibuf, ibuflen); /* other params */ - - size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET,iop, tid, - opblk, 10+ibuflen, resblk, reslen); - - kfree(opblk); - if(size>reslen) - return reslen; - return size; -} - -/* - * Clear table group, i.e. delete all rows. - */ -int i2o_clear_table(struct i2o_controller *iop, int tid, int group) -{ - u16 opblk[] = { 1, 0, I2O_PARAMS_TABLE_CLEAR, group }; - u8 resblk[32]; /* min 8 bytes for result header */ - - return i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid, - opblk, sizeof(opblk), resblk, sizeof(resblk)); -} - -/* - * Add a new row into a table group. - * - * if fieldcount==-1 then we add whole rows - * buf contains rowcount, keyvalues - * else just specific fields are given, rest use defaults - * buf contains fieldindexes, rowcount, keyvalues - */ -int i2o_row_add_table(struct i2o_controller *iop, int tid, - int group, int fieldcount, void *buf, int buflen) -{ - u16 *opblk; - u8 resblk[32]; /* min 8 bytes for header */ - int size; - - opblk = kmalloc(buflen+64, GFP_KERNEL); - if (opblk == NULL) - { - printk(KERN_ERR "i2o: no memory for operation buffer.\n"); - return -ENOMEM; - } - - opblk[0] = 1; /* operation count */ - opblk[1] = 0; /* pad */ - opblk[2] = I2O_PARAMS_ROW_ADD; - opblk[3] = group; - opblk[4] = fieldcount; - memcpy(opblk+5, buf, buflen); - - size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid, - opblk, 10+buflen, resblk, sizeof(resblk)); - - kfree(opblk); - if(size>buflen) - return buflen; - return size; -} - - -/* - * Used for error reporting/debugging purposes. - * Following fail status are common to all classes. - * The preserved message must be handled in the reply handler. - */ -void i2o_report_fail_status(u8 req_status, u32* msg) -{ - static char *FAIL_STATUS[] = { - "0x80", /* not used */ - "SERVICE_SUSPENDED", /* 0x81 */ - "SERVICE_TERMINATED", /* 0x82 */ - "CONGESTION", - "FAILURE", - "STATE_ERROR", - "TIME_OUT", - "ROUTING_FAILURE", - "INVALID_VERSION", - "INVALID_OFFSET", - "INVALID_MSG_FLAGS", - "FRAME_TOO_SMALL", - "FRAME_TOO_LARGE", - "INVALID_TARGET_ID", - "INVALID_INITIATOR_ID", - "INVALID_INITIATOR_CONTEX", /* 0x8F */ - "UNKNOWN_FAILURE" /* 0xFF */ - }; - - if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) - printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status); - else - printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]); - - /* Dump some details */ - - printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n", - (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); - printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n", - (msg[4] >> 8) & 0xFF, msg[4] & 0xFF); - printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", - msg[5] >> 16, msg[5] & 0xFFF); - - printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF); - if (msg[4] & (1<<16)) - printk("(FormatError), " - "this msg can never be delivered/processed.\n"); - if (msg[4] & (1<<17)) - printk("(PathError), " - "this msg can no longer be delivered/processed.\n"); - if (msg[4] & (1<<18)) - printk("(PathState), " - "the system state does not allow delivery.\n"); - if (msg[4] & (1<<19)) - printk("(Congestion), resources temporarily not available;" - "do not retry immediately.\n"); -} - -/* - * Used for error reporting/debugging purposes. - * Following reply status are common to all classes. - */ -void i2o_report_common_status(u8 req_status) -{ - static char *REPLY_STATUS[] = { - "SUCCESS", - "ABORT_DIRTY", - "ABORT_NO_DATA_TRANSFER", - "ABORT_PARTIAL_TRANSFER", - "ERROR_DIRTY", - "ERROR_NO_DATA_TRANSFER", - "ERROR_PARTIAL_TRANSFER", - "PROCESS_ABORT_DIRTY", - "PROCESS_ABORT_NO_DATA_TRANSFER", - "PROCESS_ABORT_PARTIAL_TRANSFER", - "TRANSACTION_ERROR", - "PROGRESS_REPORT" - }; - - if (req_status >= ARRAY_SIZE(REPLY_STATUS)) - printk("RequestStatus = %0#2x", req_status); - else - printk("%s", REPLY_STATUS[req_status]); -} - -/* - * Used for error reporting/debugging purposes. - * Following detailed status are valid for executive class, - * utility class, DDM class and for transaction error replies. - */ -static void i2o_report_common_dsc(u16 detailed_status) -{ - static char *COMMON_DSC[] = { - "SUCCESS", - "0x01", // not used - "BAD_KEY", - "TCL_ERROR", - "REPLY_BUFFER_FULL", - "NO_SUCH_PAGE", - "INSUFFICIENT_RESOURCE_SOFT", - "INSUFFICIENT_RESOURCE_HARD", - "0x08", // not used - "CHAIN_BUFFER_TOO_LARGE", - "UNSUPPORTED_FUNCTION", - "DEVICE_LOCKED", - "DEVICE_RESET", - "INAPPROPRIATE_FUNCTION", - "INVALID_INITIATOR_ADDRESS", - "INVALID_MESSAGE_FLAGS", - "INVALID_OFFSET", - "INVALID_PARAMETER", - "INVALID_REQUEST", - "INVALID_TARGET_ADDRESS", - "MESSAGE_TOO_LARGE", - "MESSAGE_TOO_SMALL", - "MISSING_PARAMETER", - "TIMEOUT", - "UNKNOWN_ERROR", - "UNKNOWN_FUNCTION", - "UNSUPPORTED_VERSION", - "DEVICE_BUSY", - "DEVICE_NOT_AVAILABLE" - }; - - if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) - printk(" / DetailedStatus = %0#4x.\n", detailed_status); - else - printk(" / %s.\n", COMMON_DSC[detailed_status]); -} - -/* - * Used for error reporting/debugging purposes - */ -static void i2o_report_lan_dsc(u16 detailed_status) -{ - static char *LAN_DSC[] = { // Lan detailed status code strings - "SUCCESS", - "DEVICE_FAILURE", - "DESTINATION_NOT_FOUND", - "TRANSMIT_ERROR", - "TRANSMIT_ABORTED", - "RECEIVE_ERROR", - "RECEIVE_ABORTED", - "DMA_ERROR", - "BAD_PACKET_DETECTED", - "OUT_OF_MEMORY", - "BUCKET_OVERRUN", - "IOP_INTERNAL_ERROR", - "CANCELED", - "INVALID_TRANSACTION_CONTEXT", - "DEST_ADDRESS_DETECTED", - "DEST_ADDRESS_OMITTED", - "PARTIAL_PACKET_RETURNED", - "TEMP_SUSPENDED_STATE", // last Lan detailed status code - "INVALID_REQUEST" // general detailed status code - }; - - if (detailed_status > I2O_DSC_INVALID_REQUEST) - printk(" / %0#4x.\n", detailed_status); - else - printk(" / %s.\n", LAN_DSC[detailed_status]); -} - -/* - * Used for error reporting/debugging purposes - */ -static void i2o_report_util_cmd(u8 cmd) -{ - switch (cmd) { - case I2O_CMD_UTIL_NOP: - printk("UTIL_NOP, "); - break; - case I2O_CMD_UTIL_ABORT: - printk("UTIL_ABORT, "); - break; - case I2O_CMD_UTIL_CLAIM: - printk("UTIL_CLAIM, "); - break; - case I2O_CMD_UTIL_RELEASE: - printk("UTIL_CLAIM_RELEASE, "); - break; - case I2O_CMD_UTIL_CONFIG_DIALOG: - printk("UTIL_CONFIG_DIALOG, "); - break; - case I2O_CMD_UTIL_DEVICE_RESERVE: - printk("UTIL_DEVICE_RESERVE, "); - break; - case I2O_CMD_UTIL_DEVICE_RELEASE: - printk("UTIL_DEVICE_RELEASE, "); - break; - case I2O_CMD_UTIL_EVT_ACK: - printk("UTIL_EVENT_ACKNOWLEDGE, "); - break; - case I2O_CMD_UTIL_EVT_REGISTER: - printk("UTIL_EVENT_REGISTER, "); - break; - case I2O_CMD_UTIL_LOCK: - printk("UTIL_LOCK, "); - break; - case I2O_CMD_UTIL_LOCK_RELEASE: - printk("UTIL_LOCK_RELEASE, "); - break; - case I2O_CMD_UTIL_PARAMS_GET: - printk("UTIL_PARAMS_GET, "); - break; - case I2O_CMD_UTIL_PARAMS_SET: - printk("UTIL_PARAMS_SET, "); - break; - case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: - printk("UTIL_REPLY_FAULT_NOTIFY, "); - break; - default: - printk("Cmd = %0#2x, ",cmd); - } -} - -/* - * Used for error reporting/debugging purposes - */ -static void i2o_report_exec_cmd(u8 cmd) -{ - switch (cmd) { - case I2O_CMD_ADAPTER_ASSIGN: - printk("EXEC_ADAPTER_ASSIGN, "); - break; - case I2O_CMD_ADAPTER_READ: - printk("EXEC_ADAPTER_READ, "); - break; - case I2O_CMD_ADAPTER_RELEASE: - printk("EXEC_ADAPTER_RELEASE, "); - break; - case I2O_CMD_BIOS_INFO_SET: - printk("EXEC_BIOS_INFO_SET, "); - break; - case I2O_CMD_BOOT_DEVICE_SET: - printk("EXEC_BOOT_DEVICE_SET, "); - break; - case I2O_CMD_CONFIG_VALIDATE: - printk("EXEC_CONFIG_VALIDATE, "); - break; - case I2O_CMD_CONN_SETUP: - printk("EXEC_CONN_SETUP, "); - break; - case I2O_CMD_DDM_DESTROY: - printk("EXEC_DDM_DESTROY, "); - break; - case I2O_CMD_DDM_ENABLE: - printk("EXEC_DDM_ENABLE, "); - break; - case I2O_CMD_DDM_QUIESCE: - printk("EXEC_DDM_QUIESCE, "); - break; - case I2O_CMD_DDM_RESET: - printk("EXEC_DDM_RESET, "); - break; - case I2O_CMD_DDM_SUSPEND: - printk("EXEC_DDM_SUSPEND, "); - break; - case I2O_CMD_DEVICE_ASSIGN: - printk("EXEC_DEVICE_ASSIGN, "); - break; - case I2O_CMD_DEVICE_RELEASE: - printk("EXEC_DEVICE_RELEASE, "); - break; - case I2O_CMD_HRT_GET: - printk("EXEC_HRT_GET, "); - break; - case I2O_CMD_ADAPTER_CLEAR: - printk("EXEC_IOP_CLEAR, "); - break; - case I2O_CMD_ADAPTER_CONNECT: - printk("EXEC_IOP_CONNECT, "); - break; - case I2O_CMD_ADAPTER_RESET: - printk("EXEC_IOP_RESET, "); - break; - case I2O_CMD_LCT_NOTIFY: - printk("EXEC_LCT_NOTIFY, "); - break; - case I2O_CMD_OUTBOUND_INIT: - printk("EXEC_OUTBOUND_INIT, "); - break; - case I2O_CMD_PATH_ENABLE: - printk("EXEC_PATH_ENABLE, "); - break; - case I2O_CMD_PATH_QUIESCE: - printk("EXEC_PATH_QUIESCE, "); - break; - case I2O_CMD_PATH_RESET: - printk("EXEC_PATH_RESET, "); - break; - case I2O_CMD_STATIC_MF_CREATE: - printk("EXEC_STATIC_MF_CREATE, "); - break; - case I2O_CMD_STATIC_MF_RELEASE: - printk("EXEC_STATIC_MF_RELEASE, "); - break; - case I2O_CMD_STATUS_GET: - printk("EXEC_STATUS_GET, "); - break; - case I2O_CMD_SW_DOWNLOAD: - printk("EXEC_SW_DOWNLOAD, "); - break; - case I2O_CMD_SW_UPLOAD: - printk("EXEC_SW_UPLOAD, "); - break; - case I2O_CMD_SW_REMOVE: - printk("EXEC_SW_REMOVE, "); - break; - case I2O_CMD_SYS_ENABLE: - printk("EXEC_SYS_ENABLE, "); - break; - case I2O_CMD_SYS_MODIFY: - printk("EXEC_SYS_MODIFY, "); - break; - case I2O_CMD_SYS_QUIESCE: - printk("EXEC_SYS_QUIESCE, "); - break; - case I2O_CMD_SYS_TAB_SET: - printk("EXEC_SYS_TAB_SET, "); - break; - default: - printk("Cmd = %#02x, ",cmd); - } -} - -/* - * Used for error reporting/debugging purposes - */ -static void i2o_report_lan_cmd(u8 cmd) -{ - switch (cmd) { - case LAN_PACKET_SEND: - printk("LAN_PACKET_SEND, "); - break; - case LAN_SDU_SEND: - printk("LAN_SDU_SEND, "); - break; - case LAN_RECEIVE_POST: - printk("LAN_RECEIVE_POST, "); - break; - case LAN_RESET: - printk("LAN_RESET, "); - break; - case LAN_SUSPEND: - printk("LAN_SUSPEND, "); - break; - default: - printk("Cmd = %0#2x, ",cmd); - } -} - -/* - * Used for error reporting/debugging purposes. - * Report Cmd name, Request status, Detailed Status. - */ -void i2o_report_status(const char *severity, const char *str, u32 *msg) -{ - u8 cmd = (msg[1]>>24)&0xFF; - u8 req_status = (msg[4]>>24)&0xFF; - u16 detailed_status = msg[4]&0xFFFF; - struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)]; - - if (cmd == I2O_CMD_UTIL_EVT_REGISTER) - return; // No status in this reply - - printk("%s%s: ", severity, str); - - if (cmd < 0x1F) // Utility cmd - i2o_report_util_cmd(cmd); - - else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd - i2o_report_exec_cmd(cmd); - - else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F) - i2o_report_lan_cmd(cmd); // LAN cmd - else - printk("Cmd = %0#2x, ", cmd); // Other cmds - - if (msg[0] & MSG_FAIL) { - i2o_report_fail_status(req_status, msg); - return; - } - - i2o_report_common_status(req_status); - - if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) - i2o_report_common_dsc(detailed_status); - else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F) - i2o_report_lan_dsc(detailed_status); - else - printk(" / DetailedStatus = %0#4x.\n", detailed_status); -} - -/* Used to dump a message to syslog during debugging */ -void i2o_dump_message(u32 *msg) -{ -#ifdef DRIVERDEBUG - int i; - printk(KERN_INFO "Dumping I2O message size %d @ %p\n", - msg[0]>>16&0xffff, msg); - for(i = 0; i < ((msg[0]>>16)&0xffff); i++) - printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]); -#endif -} - -/* - * I2O reboot/shutdown notification. - * - * - Call each OSM's reboot notifier (if one exists) - * - Quiesce each IOP in the system - * - * Each IOP has to be quiesced before we can ensure that the system - * can be properly shutdown as a transaction that has already been - * acknowledged still needs to be placed in permanent store on the IOP. - * The SysQuiesce causes the IOP to force all HDMs to complete their - * transactions before returning, so only at that point is it safe - * - */ -static int i2o_reboot_event(struct notifier_block *n, unsigned long code, void -*p) -{ - int i = 0; - struct i2o_controller *c = NULL; - - if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF) - return NOTIFY_DONE; - - printk(KERN_INFO "Shutting down I2O system.\n"); - printk(KERN_INFO - " This could take a few minutes if there are many devices attached\n"); - - for(i = 0; i < MAX_I2O_MODULES; i++) - { - if(i2o_handlers[i] && i2o_handlers[i]->reboot_notify) - i2o_handlers[i]->reboot_notify(); - } - - for(c = i2o_controller_chain; c; c = c->next) - { - if(i2o_quiesce_controller(c)) - { - printk(KERN_WARNING "i2o: Could not quiesce %s.\n" - "Verify setup on next system power up.\n", - c->name); - } - } - - printk(KERN_INFO "I2O system down.\n"); - return NOTIFY_DONE; -} - - - - -/** - * i2o_pci_dispose - Free bus specific resources - * @c: I2O controller - * - * Disable interrupts and then free interrupt, I/O and mtrr resources - * used by this controller. Called by the I2O core on unload. - */ - -static void i2o_pci_dispose(struct i2o_controller *c) -{ - I2O_IRQ_WRITE32(c,0xFFFFFFFF); - if(c->irq > 0) - free_irq(c->irq, c); - iounmap(c->base_virt); - if(c->raptor) - iounmap(c->msg_virt); - -#ifdef CONFIG_MTRR - if(c->mtrr_reg0 > 0) - mtrr_del(c->mtrr_reg0, 0, 0); - if(c->mtrr_reg1 > 0) - mtrr_del(c->mtrr_reg1, 0, 0); -#endif -} - -/** - * i2o_pci_interrupt - Bus specific interrupt handler - * @irq: interrupt line - * @dev_id: cookie - * - * Handle an interrupt from a PCI based I2O controller. This turns out - * to be rather simple. We keep the controller pointer in the cookie. - */ - -static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) -{ - struct i2o_controller *c = dev_id; - i2o_run_queue(c); - return IRQ_HANDLED; -} - -/** - * i2o_pci_install - Install a PCI i2o controller - * @dev: PCI device of the I2O controller - * - * Install a PCI (or in theory AGP) i2o controller. Devices are - * initialized, configured and registered with the i2o core subsystem. Be - * very careful with ordering. There may be pending interrupts. - * - * To Do: Add support for polled controllers - */ - -int __init i2o_pci_install(struct pci_dev *dev) -{ - struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller), - GFP_KERNEL); - void *bar0_virt; - void *bar1_virt; - unsigned long bar0_phys = 0; - unsigned long bar1_phys = 0; - unsigned long bar0_size = 0; - unsigned long bar1_size = 0; - - int i; - - if(c==NULL) - { - printk(KERN_ERR "i2o: Insufficient memory to add controller.\n"); - return -ENOMEM; - } - memset(c, 0, sizeof(*c)); - - c->irq = -1; - c->dpt = 0; - c->raptor = 0; - c->short_req = 0; - c->pdev = dev; - -#if BITS_PER_LONG == 64 - c->context_list_lock = SPIN_LOCK_UNLOCKED; -#endif - - /* - * Cards that fall apart if you hit them with large I/O - * loads... - */ - - if(dev->vendor == PCI_VENDOR_ID_NCR && dev->device == 0x0630) - { - c->short_req = 1; - printk(KERN_INFO "I2O: Symbios FC920 workarounds activated.\n"); - } - - if(dev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) - { - c->promise = 1; - printk(KERN_INFO "I2O: Promise workarounds activated.\n"); - } - - /* - * Cards that go bananas if you quiesce them before you reset - * them - */ - - if(dev->vendor == PCI_VENDOR_ID_DPT) { - c->dpt=1; - if(dev->device == 0xA511) - c->raptor=1; - } - - for(i=0; i<6; i++) - { - /* Skip I/O spaces */ - if(!(pci_resource_flags(dev, i) & IORESOURCE_IO)) - { - if(!bar0_phys) - { - bar0_phys = pci_resource_start(dev, i); - bar0_size = pci_resource_len(dev, i); - if(!c->raptor) - break; - } - else - { - bar1_phys = pci_resource_start(dev, i); - bar1_size = pci_resource_len(dev, i); - break; - } - } - } - - if(i==6) - { - printk(KERN_ERR "i2o: I2O controller has no memory regions defined.\n"); - kfree(c); - return -EINVAL; - } - - - /* Map the I2O controller */ - if(!c->raptor) - printk(KERN_INFO "i2o: PCI I2O controller at %08lX size=%ld\n", bar0_phys, bar0_size); - else - printk(KERN_INFO "i2o: PCI I2O controller\n BAR0 at 0x%08lX size=%ld\n BAR1 at 0x%08lX size=%ld\n", bar0_phys, bar0_size, bar1_phys, bar1_size); - - bar0_virt = ioremap(bar0_phys, bar0_size); - if(bar0_virt==0) - { - printk(KERN_ERR "i2o: Unable to map controller.\n"); - kfree(c); - return -EINVAL; - } - - if(c->raptor) - { - bar1_virt = ioremap(bar1_phys, bar1_size); - if(bar1_virt==0) - { - printk(KERN_ERR "i2o: Unable to map controller.\n"); - kfree(c); - iounmap(bar0_virt); - return -EINVAL; - } - } else { - bar1_virt = bar0_virt; - bar1_phys = bar0_phys; - bar1_size = bar0_size; - } - - c->irq_mask = bar0_virt+0x34; - c->post_port = bar0_virt+0x40; - c->reply_port = bar0_virt+0x44; - - c->base_phys = bar0_phys; - c->base_virt = bar0_virt; - c->msg_phys = bar1_phys; - c->msg_virt = bar1_virt; - - /* - * Enable Write Combining MTRR for IOP's memory region - */ -#ifdef CONFIG_MTRR - c->mtrr_reg0 = mtrr_add(c->base_phys, bar0_size, MTRR_TYPE_WRCOMB, 1); - /* - * If it is an INTEL i960 I/O processor then set the first 64K to - * Uncacheable since the region contains the Messaging unit which - * shouldn't be cached. - */ - c->mtrr_reg1 = -1; - if(dev->vendor == PCI_VENDOR_ID_INTEL || dev->vendor == PCI_VENDOR_ID_DPT) - { - printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n"); - c->mtrr_reg1 = mtrr_add(c->base_phys, 65536, MTRR_TYPE_UNCACHABLE, 1); - if(c->mtrr_reg1< 0) - { - printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n"); - mtrr_del(c->mtrr_reg0, c->msg_phys, bar1_size); - c->mtrr_reg0 = -1; - } - } - if(c->raptor) - c->mtrr_reg1 = mtrr_add(c->msg_phys, bar1_size, MTRR_TYPE_WRCOMB, 1); - -#endif - - I2O_IRQ_WRITE32(c,0xFFFFFFFF); - - i = i2o_install_controller(c); - - if(i<0) - { - printk(KERN_ERR "i2o: Unable to install controller.\n"); - kfree(c); - iounmap(bar0_virt); - if(c->raptor) - iounmap(bar1_virt); - return i; - } - - c->irq = dev->irq; - if(c->irq) - { - i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ, - c->name, c); - if(i<0) - { - printk(KERN_ERR "%s: unable to allocate interrupt %d.\n", - c->name, dev->irq); - c->irq = -1; - i2o_delete_controller(c); - iounmap(bar0_virt); - if(c->raptor) - iounmap(bar1_virt); - return -EBUSY; - } - } - - printk(KERN_INFO "%s: Installed at IRQ%d\n", c->name, dev->irq); - I2O_IRQ_WRITE32(c,0x0); - c->enabled = 1; - return 0; -} - -/** - * i2o_pci_scan - Scan the pci bus for controllers - * - * Scan the PCI devices on the system looking for any device which is a - * memory of the Intelligent, I2O class. We attempt to set up each such device - * and register it with the core. - * - * Returns the number of controllers registered - * - * Note; Do not change this to a hot plug interface. I2O 1.5 itself - * does not support hot plugging. - */ - -int __init i2o_pci_scan(void) -{ - struct pci_dev *dev = NULL; - int count=0; - - printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); - - while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) - { - if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O && - (dev->vendor!=PCI_VENDOR_ID_DPT || dev->device!=0xA511)) - continue; - - if((dev->class>>8)==PCI_CLASS_INTELLIGENT_I2O && - (dev->class&0xFF)>1) - { - printk(KERN_INFO "i2o: I2O Controller found but does not support I2O 1.5 (skipping).\n"); - continue; - } - if (pci_enable_device(dev)) - continue; - printk(KERN_INFO "i2o: I2O controller on bus %d at %d.\n", - dev->bus->number, dev->devfn); - if(pci_set_dma_mask(dev, 0xffffffff)) - { - printk(KERN_WARNING "I2O controller on bus %d at %d : No suitable DMA available\n", dev->bus->number, dev->devfn); - continue; - } - pci_set_master(dev); - if(i2o_pci_install(dev)==0) - count++; - } - if(count) - printk(KERN_INFO "i2o: %d I2O controller%s found and installed.\n", count, - count==1?"":"s"); - return count?count:-ENODEV; -} - -static int i2o_core_init(void) -{ - printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n"); - if (i2o_install_handler(&i2o_core_handler) < 0) - { - printk(KERN_ERR "i2o_core: Unable to install core handler.\nI2O stack not loaded!"); - return 0; - } - - core_context = i2o_core_handler.context; - - /* - * Initialize event handling thread - */ - - init_MUTEX_LOCKED(&evt_sem); - evt_pid = kernel_thread(i2o_core_evt, &evt_reply, CLONE_SIGHAND); - if(evt_pid < 0) - { - printk(KERN_ERR "I2O: Could not create event handler kernel thread\n"); - i2o_remove_handler(&i2o_core_handler); - return 0; - } - else - printk(KERN_INFO "I2O: Event thread created as pid %d\n", evt_pid); - - i2o_pci_scan(); - if(i2o_num_controllers) - i2o_sys_init(); - - register_reboot_notifier(&i2o_reboot_notifier); - - return 0; -} - -static void i2o_core_exit(void) -{ - int stat; - - unregister_reboot_notifier(&i2o_reboot_notifier); - - if(i2o_num_controllers) - i2o_sys_shutdown(); - - /* - * If this is shutdown time, the thread has already been killed - */ - if(evt_running) { - printk("Terminating i2o threads..."); - stat = kill_proc(evt_pid, SIGKILL, 1); - if(!stat) { - printk("waiting...\n"); - wait_for_completion(&evt_dead); - } - printk("done.\n"); - } - i2o_remove_handler(&i2o_core_handler); -} - -module_init(i2o_core_init); -module_exit(i2o_core_exit); - -MODULE_PARM(verbose, "i"); -MODULE_PARM_DESC(verbose, "Verbose diagnostics"); - -MODULE_AUTHOR("Red Hat Software"); -MODULE_DESCRIPTION("I2O Core"); -MODULE_LICENSE("GPL"); - -EXPORT_SYMBOL(i2o_controller_chain); -EXPORT_SYMBOL(i2o_num_controllers); -EXPORT_SYMBOL(i2o_find_controller); -EXPORT_SYMBOL(i2o_unlock_controller); -EXPORT_SYMBOL(i2o_status_get); -EXPORT_SYMBOL(i2o_install_handler); -EXPORT_SYMBOL(i2o_remove_handler); -EXPORT_SYMBOL(i2o_install_controller); -EXPORT_SYMBOL(i2o_delete_controller); -EXPORT_SYMBOL(i2o_run_queue); -EXPORT_SYMBOL(i2o_claim_device); -EXPORT_SYMBOL(i2o_release_device); -EXPORT_SYMBOL(i2o_device_notify_on); -EXPORT_SYMBOL(i2o_device_notify_off); -EXPORT_SYMBOL(i2o_post_this); -EXPORT_SYMBOL(i2o_post_wait); -EXPORT_SYMBOL(i2o_post_wait_mem); -EXPORT_SYMBOL(i2o_query_scalar); -EXPORT_SYMBOL(i2o_set_scalar); -EXPORT_SYMBOL(i2o_query_table); -EXPORT_SYMBOL(i2o_clear_table); -EXPORT_SYMBOL(i2o_row_add_table); -EXPORT_SYMBOL(i2o_issue_params); -EXPORT_SYMBOL(i2o_event_register); -EXPORT_SYMBOL(i2o_event_ack); -EXPORT_SYMBOL(i2o_report_status); -EXPORT_SYMBOL(i2o_dump_message); -EXPORT_SYMBOL(i2o_get_class_name); -EXPORT_SYMBOL(i2o_context_list_add); -EXPORT_SYMBOL(i2o_context_list_get); -EXPORT_SYMBOL(i2o_context_list_remove); diff -L drivers/message/i2o/i2o_lan.h -puN drivers/message/i2o/i2o_lan.h~i2o-build_99 /dev/null --- 25/drivers/message/i2o/i2o_lan.h +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,159 +0,0 @@ -/* - * i2o_lan.h I2O LAN Class definitions - * - * I2O LAN CLASS OSM May 26th 2000 - * - * (C) Copyright 1999, 2000 University of Helsinki, - * Department of Computer Science - * - * This code is still under development / test. - * - * Author: Auvo Häkkinen - * Juha Sievänen - * Taneli Vähäkangas - */ - -#ifndef _I2O_LAN_H -#define _I2O_LAN_H - -/* Default values for tunable parameters first */ - -#define I2O_LAN_MAX_BUCKETS_OUT 96 -#define I2O_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */ -#define I2O_LAN_RX_COPYBREAK 200 -#define I2O_LAN_TX_TIMEOUT (1*HZ) -#define I2O_LAN_TX_BATCH_MODE 2 /* 2=automatic, 1=on, 0=off */ -#define I2O_LAN_EVENT_MASK 0 /* 0=None, 0xFFC00002=All */ - -/* LAN types */ -#define I2O_LAN_ETHERNET 0x0030 -#define I2O_LAN_100VG 0x0040 -#define I2O_LAN_TR 0x0050 -#define I2O_LAN_FDDI 0x0060 -#define I2O_LAN_FIBRE_CHANNEL 0x0070 -#define I2O_LAN_UNKNOWN 0x00000000 - -/* Connector types */ - -/* Ethernet */ -#define I2O_LAN_AUI (I2O_LAN_ETHERNET << 4) + 0x00000001 -#define I2O_LAN_10BASE5 (I2O_LAN_ETHERNET << 4) + 0x00000002 -#define I2O_LAN_FIORL (I2O_LAN_ETHERNET << 4) + 0x00000003 -#define I2O_LAN_10BASE2 (I2O_LAN_ETHERNET << 4) + 0x00000004 -#define I2O_LAN_10BROAD36 (I2O_LAN_ETHERNET << 4) + 0x00000005 -#define I2O_LAN_10BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000006 -#define I2O_LAN_10BASE_FP (I2O_LAN_ETHERNET << 4) + 0x00000007 -#define I2O_LAN_10BASE_FB (I2O_LAN_ETHERNET << 4) + 0x00000008 -#define I2O_LAN_10BASE_FL (I2O_LAN_ETHERNET << 4) + 0x00000009 -#define I2O_LAN_100BASE_TX (I2O_LAN_ETHERNET << 4) + 0x0000000A -#define I2O_LAN_100BASE_FX (I2O_LAN_ETHERNET << 4) + 0x0000000B -#define I2O_LAN_100BASE_T4 (I2O_LAN_ETHERNET << 4) + 0x0000000C -#define I2O_LAN_1000BASE_SX (I2O_LAN_ETHERNET << 4) + 0x0000000D -#define I2O_LAN_1000BASE_LX (I2O_LAN_ETHERNET << 4) + 0x0000000E -#define I2O_LAN_1000BASE_CX (I2O_LAN_ETHERNET << 4) + 0x0000000F -#define I2O_LAN_1000BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000010 - -/* AnyLAN */ -#define I2O_LAN_100VG_ETHERNET (I2O_LAN_100VG << 4) + 0x00000001 -#define I2O_LAN_100VG_TR (I2O_LAN_100VG << 4) + 0x00000002 - -/* Token Ring */ -#define I2O_LAN_4MBIT (I2O_LAN_TR << 4) + 0x00000001 -#define I2O_LAN_16MBIT (I2O_LAN_TR << 4) + 0x00000002 - -/* FDDI */ -#define I2O_LAN_125MBAUD (I2O_LAN_FDDI << 4) + 0x00000001 - -/* Fibre Channel */ -#define I2O_LAN_POINT_POINT (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000001 -#define I2O_LAN_ARB_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000002 -#define I2O_LAN_PUBLIC_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000003 -#define I2O_LAN_FABRIC (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000004 - -#define I2O_LAN_EMULATION 0x00000F00 -#define I2O_LAN_OTHER 0x00000F01 -#define I2O_LAN_DEFAULT 0xFFFFFFFF - -/* LAN class functions */ - -#define LAN_PACKET_SEND 0x3B -#define LAN_SDU_SEND 0x3D -#define LAN_RECEIVE_POST 0x3E -#define LAN_RESET 0x35 -#define LAN_SUSPEND 0x37 - -/* LAN DetailedStatusCode defines */ -#define I2O_LAN_DSC_SUCCESS 0x00 -#define I2O_LAN_DSC_DEVICE_FAILURE 0x01 -#define I2O_LAN_DSC_DESTINATION_NOT_FOUND 0x02 -#define I2O_LAN_DSC_TRANSMIT_ERROR 0x03 -#define I2O_LAN_DSC_TRANSMIT_ABORTED 0x04 -#define I2O_LAN_DSC_RECEIVE_ERROR 0x05 -#define I2O_LAN_DSC_RECEIVE_ABORTED 0x06 -#define I2O_LAN_DSC_DMA_ERROR 0x07 -#define I2O_LAN_DSC_BAD_PACKET_DETECTED 0x08 -#define I2O_LAN_DSC_OUT_OF_MEMORY 0x09 -#define I2O_LAN_DSC_BUCKET_OVERRUN 0x0A -#define I2O_LAN_DSC_IOP_INTERNAL_ERROR 0x0B -#define I2O_LAN_DSC_CANCELED 0x0C -#define I2O_LAN_DSC_INVALID_TRANSACTION_CONTEXT 0x0D -#define I2O_LAN_DSC_DEST_ADDRESS_DETECTED 0x0E -#define I2O_LAN_DSC_DEST_ADDRESS_OMITTED 0x0F -#define I2O_LAN_DSC_PARTIAL_PACKET_RETURNED 0x10 -#define I2O_LAN_DSC_SUSPENDED 0x11 - -struct i2o_packet_info { - u32 offset : 24; - u32 flags : 8; - u32 len : 24; - u32 status : 8; -}; - -struct i2o_bucket_descriptor { - u32 context; /* FIXME: 64bit support */ - struct i2o_packet_info packet_info[1]; -}; - -/* Event Indicator Mask Flags for LAN OSM */ - -#define I2O_LAN_EVT_LINK_DOWN 0x01 -#define I2O_LAN_EVT_LINK_UP 0x02 -#define I2O_LAN_EVT_MEDIA_CHANGE 0x04 - -#include -#include - -struct i2o_lan_local { - u8 unit; - struct i2o_device *i2o_dev; - - struct fddi_statistics stats; /* see also struct net_device_stats */ - unsigned short (*type_trans)(struct sk_buff *, struct net_device *); - atomic_t buckets_out; /* nbr of unused buckets on DDM */ - atomic_t tx_out; /* outstanding TXes */ - u8 tx_count; /* packets in one TX message frame */ - u16 tx_max_out; /* DDM's Tx queue len */ - u8 sgl_max; /* max SGLs in one message frame */ - u32 m; /* IOP address of the batch msg frame */ - - struct work_struct i2o_batch_send_task; - int send_active; - struct sk_buff **i2o_fbl; /* Free bucket list (to reuse skbs) */ - int i2o_fbl_tail; - spinlock_t fbl_lock; - - spinlock_t tx_lock; - - u32 max_size_mc_table; /* max number of multicast addresses */ - - /* LAN OSM configurable parameters are here: */ - - u16 max_buckets_out; /* max nbr of buckets to send to DDM */ - u16 bucket_thresh; /* send more when this many used */ - u16 rx_copybreak; - - u8 tx_batch_mode; /* Set when using batch mode sends */ - u32 i2o_event_mask; /* To turn on interesting event flags */ -}; - -#endif /* _I2O_LAN_H */ diff -L drivers/message/i2o/i2o_proc.c -puN drivers/message/i2o/i2o_proc.c~i2o-build_99 /dev/null --- 25/drivers/message/i2o/i2o_proc.c +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,3409 +0,0 @@ -/* - * procfs handler for Linux I2O subsystem - * - * (c) Copyright 1999 Deepak Saxena - * - * Originally written by Deepak Saxena(deepak@plexity.net) - * - * This program is free software. You can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * This is an initial test release. The code is based on the design - * of the ide procfs system (drivers/block/ide-proc.c). Some code - * taken from i2o-core module by Alan Cox. - * - * DISCLAIMER: This code is still under development/test and may cause - * your system to behave unpredictably. Use at your own discretion. - * - * LAN entries by Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI), - * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI) - * University of Helsinki, Department of Computer Science - */ - -/* - * set tabstop=3 - */ - -/* - * TODO List - * - * - Add support for any version 2.0 spec changes once 2.0 IRTOS is - * is available to test with - * - Clean up code to use official structure definitions - */ - -// FIXME! -#define FMT_U64_HEX "0x%08x%08x" -#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64)) - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "i2o_lan.h" - -/* - * Structure used to define /proc entries - */ -typedef struct _i2o_proc_entry_t -{ - char *name; /* entry name */ - mode_t mode; /* mode */ - read_proc_t *read_proc; /* read func */ - write_proc_t *write_proc; /* write func */ - struct file_operations *fops_proc; /* file operations func */ -} i2o_proc_entry; - -// #define DRIVERDEBUG - -static int i2o_seq_show_lct(struct seq_file *, void *); -static int i2o_seq_show_hrt(struct seq_file *, void *); -static int i2o_seq_show_status(struct seq_file *, void *); - -static int i2o_proc_read_hw(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_ddm_table(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_driver_store(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_drivers_stored(char *, char **, off_t, int, int *, void *); - -static int i2o_proc_read_groups(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_phys_device(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_claimed(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_users(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_priv_msgs(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_authorized_users(char *, char **, off_t, int, int *, void *); - -static int i2o_proc_read_dev_name(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_dev_identity(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_ddm_identity(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_uinfo(char *, char **, off_t, int, int *, void *); -static int i2o_proc_read_sgl_limits(char *, char **, off_t, int, int *, void *); - -static int i2o_proc_read_sensors(char *, char **, off_t, int, int *, void *); - -static int print_serial_number(char *, int, u8 *, int); - -static int i2o_proc_create_entries(void *, i2o_proc_entry *, - struct proc_dir_entry *); -static void i2o_proc_remove_entries(i2o_proc_entry *, struct proc_dir_entry *); -static int i2o_proc_add_controller(struct i2o_controller *, - struct proc_dir_entry * ); -static void i2o_proc_remove_controller(struct i2o_controller *, - struct proc_dir_entry * ); -static void i2o_proc_add_device(struct i2o_device *, struct proc_dir_entry *); -static void i2o_proc_remove_device(struct i2o_device *); -static int create_i2o_procfs(void); -static int destroy_i2o_procfs(void); -static void i2o_proc_new_dev(struct i2o_controller *, struct i2o_device *); -static void i2o_proc_dev_del(struct i2o_controller *, struct i2o_device *); - -static int i2o_proc_read_lan_dev_info(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_mac_addr(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_mcast_addr(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_batch_control(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_operation(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_media_operation(char *, char **, off_t, int, - int *, void *); -static int i2o_proc_read_lan_alt_addr(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_tx_info(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_rx_info(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_hist_stats(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_eth_stats(char *, char **, off_t, int, - int *, void *); -static int i2o_proc_read_lan_tr_stats(char *, char **, off_t, int, int *, - void *); -static int i2o_proc_read_lan_fddi_stats(char *, char **, off_t, int, int *, - void *); - -static struct proc_dir_entry *i2o_proc_dir_root; - -/* - * I2O OSM descriptor - */ -static struct i2o_handler i2o_proc_handler = -{ - NULL, - i2o_proc_new_dev, - i2o_proc_dev_del, - NULL, - "I2O procfs Layer", - 0, - 0xffffffff // All classes -}; - -static int i2o_seq_open_hrt(struct inode *inode, struct file *file) -{ - return single_open(file, i2o_seq_show_hrt, PDE(inode)->data); -}; - -struct file_operations i2o_seq_fops_hrt = { - .open = i2o_seq_open_hrt, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release -}; - -static int i2o_seq_open_lct(struct inode *inode, struct file *file) -{ - return single_open(file, i2o_seq_show_lct, PDE(inode)->data); -}; - -struct file_operations i2o_seq_fops_lct = { - .open = i2o_seq_open_lct, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release -}; - -static int i2o_seq_open_status(struct inode *inode, struct file *file) -{ - return single_open(file, i2o_seq_show_status, PDE(inode)->data); -}; - -struct file_operations i2o_seq_fops_status = { - .open = i2o_seq_open_status, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release -}; - -/* - * IOP specific entries...write field just in case someone - * ever wants one. - */ -static i2o_proc_entry generic_iop_entries[] = -{ - {"hrt", S_IFREG|S_IRUGO, NULL, NULL, &i2o_seq_fops_hrt}, - {"lct", S_IFREG|S_IRUGO, NULL, NULL, &i2o_seq_fops_lct}, - {"status", S_IFREG|S_IRUGO, NULL, NULL, &i2o_seq_fops_status}, - {"hw", S_IFREG|S_IRUGO, i2o_proc_read_hw, NULL, NULL}, - {"ddm_table", S_IFREG|S_IRUGO, i2o_proc_read_ddm_table, NULL, NULL}, - {"driver_store", S_IFREG|S_IRUGO, i2o_proc_read_driver_store, NULL, NULL}, - {"drivers_stored", S_IFREG|S_IRUGO, i2o_proc_read_drivers_stored, NULL, NULL}, - {NULL, 0, NULL, NULL, NULL} -}; - -/* - * Device specific entries - */ -static i2o_proc_entry generic_dev_entries[] = -{ - {"groups", S_IFREG|S_IRUGO, i2o_proc_read_groups, NULL, NULL}, - {"phys_dev", S_IFREG|S_IRUGO, i2o_proc_read_phys_device, NULL, NULL}, - {"claimed", S_IFREG|S_IRUGO, i2o_proc_read_claimed, NULL, NULL}, - {"users", S_IFREG|S_IRUGO, i2o_proc_read_users, NULL, NULL}, - {"priv_msgs", S_IFREG|S_IRUGO, i2o_proc_read_priv_msgs, NULL, NULL}, - {"authorized_users", S_IFREG|S_IRUGO, i2o_proc_read_authorized_users, NULL, NULL}, - {"dev_identity", S_IFREG|S_IRUGO, i2o_proc_read_dev_identity, NULL, NULL}, - {"ddm_identity", S_IFREG|S_IRUGO, i2o_proc_read_ddm_identity, NULL, NULL}, - {"user_info", S_IFREG|S_IRUGO, i2o_proc_read_uinfo, NULL, NULL}, - {"sgl_limits", S_IFREG|S_IRUGO, i2o_proc_read_sgl_limits, NULL, NULL}, - {"sensors", S_IFREG|S_IRUGO, i2o_proc_read_sensors, NULL, NULL}, - {NULL, 0, NULL, NULL, NULL} -}; - -/* - * Storage unit specific entries (SCSI Periph, BS) with device names - */ -static i2o_proc_entry rbs_dev_entries[] = -{ - {"dev_name", S_IFREG|S_IRUGO, i2o_proc_read_dev_name, NULL, NULL}, - {NULL, 0, NULL, NULL} -}; - -#define SCSI_TABLE_SIZE 13 -static char *scsi_devices[] = -{ - "Direct-Access Read/Write", - "Sequential-Access Storage", - "Printer", - "Processor", - "WORM Device", - "CD-ROM Device", - "Scanner Device", - "Optical Memory Device", - "Medium Changer Device", - "Communications Device", - "Graphics Art Pre-Press Device", - "Graphics Art Pre-Press Device", - "Array Controller Device" -}; - -/* private */ - -/* - * Generic LAN specific entries - * - * Should groups with r/w entries have their own subdirectory? - * - */ -static i2o_proc_entry lan_entries[] = -{ - {"lan_dev_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_dev_info, NULL, NULL}, - {"lan_mac_addr", S_IFREG|S_IRUGO, i2o_proc_read_lan_mac_addr, NULL, NULL}, - {"lan_mcast_addr", S_IFREG|S_IRUGO|S_IWUSR, - i2o_proc_read_lan_mcast_addr, NULL, NULL}, - {"lan_batch_ctrl", S_IFREG|S_IRUGO|S_IWUSR, - i2o_proc_read_lan_batch_control, NULL, NULL}, - {"lan_operation", S_IFREG|S_IRUGO, i2o_proc_read_lan_operation, NULL, NULL}, - {"lan_media_operation", S_IFREG|S_IRUGO, - i2o_proc_read_lan_media_operation, NULL, NULL}, - {"lan_alt_addr", S_IFREG|S_IRUGO, i2o_proc_read_lan_alt_addr, NULL, NULL}, - {"lan_tx_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_tx_info, NULL, NULL}, - {"lan_rx_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_rx_info, NULL, NULL}, - - {"lan_hist_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_hist_stats, NULL, NULL}, - {NULL, 0, NULL, NULL, NULL} -}; - -/* - * Port specific LAN entries - * - */ -static i2o_proc_entry lan_eth_entries[] = -{ - {"lan_eth_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_eth_stats, NULL, NULL}, - {NULL, 0, NULL, NULL, NULL} -}; - -static i2o_proc_entry lan_tr_entries[] = -{ - {"lan_tr_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_tr_stats, NULL, NULL}, - {NULL, 0, NULL, NULL, NULL} -}; - -static i2o_proc_entry lan_fddi_entries[] = -{ - {"lan_fddi_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_fddi_stats, NULL, NULL}, - {NULL, 0, NULL, NULL, NULL} -}; - - -static char *chtostr(u8 *chars, int n) -{ - char tmp[256]; - tmp[0] = 0; - return strncat(tmp, (char *)chars, n); -} - -static int i2o_report_query_status(char *buf, int block_status, char *group) -{ - switch (block_status) - { - case -ETIMEDOUT: - return sprintf(buf, "Timeout reading group %s.\n",group); - case -ENOMEM: - return sprintf(buf, "No free memory to read the table.\n"); - case -I2O_PARAMS_STATUS_INVALID_GROUP_ID: - return sprintf(buf, "Group %s not supported.\n", group); - default: - return sprintf(buf, "Error reading group %s. BlockStatus 0x%02X\n", - group, -block_status); - } -} - -static char* bus_strings[] = -{ - "Local Bus", - "ISA", - "EISA", - "MCA", - "PCI", - "PCMCIA", - "NUBUS", - "CARDBUS" -}; - -static spinlock_t i2o_proc_lock = SPIN_LOCK_UNLOCKED; - -int i2o_seq_show_hrt(struct seq_file *seq, void *v) -{ - struct i2o_controller *c = (struct i2o_controller *)seq->private; - i2o_hrt *hrt = (i2o_hrt *)c->hrt; - u32 bus; - int i; - - if(hrt->hrt_version) - { - seq_printf(seq, "HRT table for controller is too new a version.\n"); - return 0; - } - - seq_printf(seq, "HRT has %d entries of %d bytes each.\n", - hrt->num_entries, hrt->entry_len << 2); - - for(i = 0; i < hrt->num_entries; i++) - { - seq_printf(seq, "Entry %d:\n", i); - seq_printf(seq, " Adapter ID: %0#10x\n", - hrt->hrt_entry[i].adapter_id); - seq_printf(seq, " Controlling tid: %0#6x\n", - hrt->hrt_entry[i].parent_tid); - - if(hrt->hrt_entry[i].bus_type != 0x80) - { - bus = hrt->hrt_entry[i].bus_type; - seq_printf(seq, " %s Information\n", bus_strings[bus]); - - switch(bus) - { - case I2O_BUS_LOCAL: - seq_printf(seq, " IOBase: %0#6x,", - hrt->hrt_entry[i].bus.local_bus.LbBaseIOPort); - seq_printf(seq, " MemoryBase: %0#10x\n", - hrt->hrt_entry[i].bus.local_bus.LbBaseMemoryAddress); - break; - - case I2O_BUS_ISA: - seq_printf(seq, " IOBase: %0#6x,", - hrt->hrt_entry[i].bus.isa_bus.IsaBaseIOPort); - seq_printf(seq, " MemoryBase: %0#10x,", - hrt->hrt_entry[i].bus.isa_bus.IsaBaseMemoryAddress); - seq_printf(seq, " CSN: %0#4x,", - hrt->hrt_entry[i].bus.isa_bus.CSN); - break; - - case I2O_BUS_EISA: - seq_printf(seq, " IOBase: %0#6x,", - hrt->hrt_entry[i].bus.eisa_bus.EisaBaseIOPort); - seq_printf(seq, " MemoryBase: %0#10x,", - hrt->hrt_entry[i].bus.eisa_bus.EisaBaseMemoryAddress); - seq_printf(seq, " Slot: %0#4x,", - hrt->hrt_entry[i].bus.eisa_bus.EisaSlotNumber); - break; - - case I2O_BUS_MCA: - seq_printf(seq, " IOBase: %0#6x,", - hrt->hrt_entry[i].bus.mca_bus.McaBaseIOPort); - seq_printf(seq, " MemoryBase: %0#10x,", - hrt->hrt_entry[i].bus.mca_bus.McaBaseMemoryAddress); - seq_printf(seq, " Slot: %0#4x,", - hrt->hrt_entry[i].bus.mca_bus.McaSlotNumber); - break; - - case I2O_BUS_PCI: - seq_printf(seq, " Bus: %0#4x", - hrt->hrt_entry[i].bus.pci_bus.PciBusNumber); - seq_printf(seq, " Dev: %0#4x", - hrt->hrt_entry[i].bus.pci_bus.PciDeviceNumber); - seq_printf(seq, " Func: %0#4x", - hrt->hrt_entry[i].bus.pci_bus.PciFunctionNumber); - seq_printf(seq, " Vendor: %0#6x", - hrt->hrt_entry[i].bus.pci_bus.PciVendorID); - seq_printf(seq, " Device: %0#6x\n", - hrt->hrt_entry[i].bus.pci_bus.PciDeviceID); - break; - - default: - seq_printf(seq, " Unsupported Bus Type\n"); - } - } - else - seq_printf(seq, " Unknown Bus Type\n"); - } - - return 0; -} - -int i2o_seq_show_lct(struct seq_file *seq, void *v) -{ - struct i2o_controller *c = (struct i2o_controller*)seq->private; - i2o_lct *lct = (i2o_lct *)c->lct; - int entries; - int i; - -#define BUS_TABLE_SIZE 3 - static char *bus_ports[] = - { - "Generic Bus", - "SCSI Bus", - "Fibre Channel Bus" - }; - - entries = (lct->table_size - 3)/9; - - seq_printf(seq, "LCT contains %d %s\n", entries, - entries == 1 ? "entry" : "entries"); - if(lct->boot_tid) - seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid); - - seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind); - - for(i = 0; i < entries; i++) - { - seq_printf(seq, "Entry %d\n", i); - seq_printf(seq, " Class, SubClass : %s", i2o_get_class_name(lct->lct_entry[i].class_id)); - - /* - * Classes which we'll print subclass info for - */ - switch(lct->lct_entry[i].class_id & 0xFFF) - { - case I2O_CLASS_RANDOM_BLOCK_STORAGE: - switch(lct->lct_entry[i].sub_class) - { - case 0x00: - seq_printf(seq, ", Direct-Access Read/Write"); - break; - - case 0x04: - seq_printf(seq, ", WORM Drive"); - break; - - case 0x05: - seq_printf(seq, ", CD-ROM Drive"); - break; - - case 0x07: - seq_printf(seq, ", Optical Memory Device"); - break; - - default: - seq_printf(seq, ", Unknown (0x%02x)", - lct->lct_entry[i].sub_class); - break; - } - break; - - case I2O_CLASS_LAN: - switch(lct->lct_entry[i].sub_class & 0xFF) - { - case 0x30: - seq_printf(seq, ", Ethernet"); - break; - - case 0x40: - seq_printf(seq, ", 100base VG"); - break; - - case 0x50: - seq_printf(seq, ", IEEE 802.5/Token-Ring"); - break; - - case 0x60: - seq_printf(seq, ", ANSI X3T9.5 FDDI"); - break; - - case 0x70: - seq_printf(seq, ", Fibre Channel"); - break; - - default: - seq_printf(seq, ", Unknown Sub-Class (0x%02x)", - lct->lct_entry[i].sub_class & 0xFF); - break; - } - break; - - case I2O_CLASS_SCSI_PERIPHERAL: - if(lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE) - seq_printf(seq, ", %s", - scsi_devices[lct->lct_entry[i].sub_class]); - else - seq_printf(seq, ", Unknown Device Type"); - break; - - case I2O_CLASS_BUS_ADAPTER_PORT: - if(lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) - seq_printf(seq, ", %s", - bus_ports[lct->lct_entry[i].sub_class]); - else - seq_printf(seq, ", Unknown Bus Type"); - break; - } - seq_printf(seq, "\n"); - - seq_printf(seq, " Local TID : 0x%03x\n", lct->lct_entry[i].tid); - seq_printf(seq, " User TID : 0x%03x\n", lct->lct_entry[i].user_tid); - seq_printf(seq, " Parent TID : 0x%03x\n", - lct->lct_entry[i].parent_tid); - seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n", - lct->lct_entry[i].identity_tag[0], - lct->lct_entry[i].identity_tag[1], - lct->lct_entry[i].identity_tag[2], - lct->lct_entry[i].identity_tag[3], - lct->lct_entry[i].identity_tag[4], - lct->lct_entry[i].identity_tag[5], - lct->lct_entry[i].identity_tag[6], - lct->lct_entry[i].identity_tag[7]); - seq_printf(seq, " Change Indicator : %0#10x\n", - lct->lct_entry[i].change_ind); - seq_printf(seq, " Event Capab Mask : %0#10x\n", - lct->lct_entry[i].device_flags); - } - - return 0; -} - -int i2o_seq_show_status(struct seq_file *seq, void *v) -{ - struct i2o_controller *c = (struct i2o_controller*)seq->private; - char prodstr[25]; - int version; - - i2o_status_get(c); // reread the status block - - seq_printf(seq, "Organization ID : %0#6x\n", - c->status_block->org_id); - - version = c->status_block->i2o_version; - -/* FIXME for Spec 2.0 - if (version == 0x02) { - seq_printf(seq, "Lowest I2O version supported: "); - switch(workspace[2]) { - case 0x00: - seq_printf(seq, "1.0\n"); - break; - case 0x01: - seq_printf(seq, "1.5\n"); - break; - case 0x02: - seq_printf(seq, "2.0\n"); - break; - } - - seq_printf(seq, "Highest I2O version supported: "); - switch(workspace[3]) { - case 0x00: - seq_printf(seq, "1.0\n"); - break; - case 0x01: - seq_printf(seq, "1.5\n"); - break; - case 0x02: - seq_printf(seq, "2.0\n"); - break; - } - } -*/ - seq_printf(seq, "IOP ID : %0#5x\n", - c->status_block->iop_id); - seq_printf(seq, "Host Unit ID : %0#6x\n", - c->status_block->host_unit_id); - seq_printf(seq, "Segment Number : %0#5x\n", - c->status_block->segment_number); - - seq_printf(seq, "I2O version : "); - switch (version) { - case 0x00: - seq_printf(seq, "1.0\n"); - break; - case 0x01: - seq_printf(seq, "1.5\n"); - break; - case 0x02: - seq_printf(seq, "2.0\n"); - break; - default: - seq_printf(seq, "Unknown version\n"); - } - - seq_printf(seq, "IOP State : "); - switch (c->status_block->iop_state) { - case 0x01: - seq_printf(seq, "INIT\n"); - break; - - case 0x02: - seq_printf(seq, "RESET\n"); - break; - - case 0x04: - seq_printf(seq, "HOLD\n"); - break; - - case 0x05: - seq_printf(seq, "READY\n"); - break; - - case 0x08: - seq_printf(seq, "OPERATIONAL\n"); - break; - - case 0x10: - seq_printf(seq, "FAILED\n"); - break; - - case 0x11: - seq_printf(seq, "FAULTED\n"); - break; - - default: - seq_printf(seq, "Unknown\n"); - break; - } - - seq_printf(seq, "Messenger Type : "); - switch (c->status_block->msg_type) { - case 0x00: - seq_printf(seq, "Memory mapped\n"); - break; - case 0x01: - seq_printf(seq, "Memory mapped only\n"); - break; - case 0x02: - seq_printf(seq,"Remote only\n"); - break; - case 0x03: - seq_printf(seq, "Memory mapped and remote\n"); - break; - default: - seq_printf(seq, "Unknown\n"); - } - - seq_printf(seq, "Inbound Frame Size : %d bytes\n", - c->status_block->inbound_frame_size<<2); - seq_printf(seq, "Max Inbound Frames : %d\n", - c->status_block->max_inbound_frames); - seq_printf(seq, "Current Inbound Frames : %d\n", - c->status_block->cur_inbound_frames); - seq_printf(seq, "Max Outbound Frames : %d\n", - c->status_block->max_outbound_frames); - - /* Spec doesn't say if NULL terminated or not... */ - memcpy(prodstr, c->status_block->product_id, 24); - prodstr[24] = '\0'; - seq_printf(seq, "Product ID : %s\n", prodstr); - seq_printf(seq, "Expected LCT Size : %d bytes\n", - c->status_block->expected_lct_size); - - seq_printf(seq, "IOP Capabilities\n"); - seq_printf(seq, " Context Field Size Support : "); - switch (c->status_block->iop_capabilities & 0x0000003) { - case 0: - seq_printf(seq, "Supports only 32-bit context fields\n"); - break; - case 1: - seq_printf(seq, "Supports only 64-bit context fields\n"); - break; - case 2: - seq_printf(seq, "Supports 32-bit and 64-bit context fields, " - "but not concurrently\n"); - break; - case 3: - seq_printf(seq, "Supports 32-bit and 64-bit context fields " - "concurrently\n"); - break; - default: - seq_printf(seq, "0x%08x\n",c->status_block->iop_capabilities); - } - seq_printf(seq, " Current Context Field Size : "); - switch (c->status_block->iop_capabilities & 0x0000000C) { - case 0: - seq_printf(seq, "not configured\n"); - break; - case 4: - seq_printf(seq, "Supports only 32-bit context fields\n"); - break; - case 8: - seq_printf(seq, "Supports only 64-bit context fields\n"); - break; - case 12: - seq_printf(seq, "Supports both 32-bit or 64-bit context fields " - "concurrently\n"); - break; - default: - seq_printf(seq, "\n"); - } - seq_printf(seq, " Inbound Peer Support : %s\n", - (c->status_block->iop_capabilities & 0x00000010) ? "Supported" : "Not supported"); - seq_printf(seq, " Outbound Peer Support : %s\n", - (c->status_block->iop_capabilities & 0x00000020) ? "Supported" : "Not supported"); - seq_printf(seq, " Peer to Peer Support : %s\n", - (c->status_block->iop_capabilities & 0x00000040) ? "Supported" : "Not supported"); - - seq_printf(seq, "Desired private memory size : %d kB\n", - c->status_block->desired_mem_size>>10); - seq_printf(seq, "Allocated private memory size : %d kB\n", - c->status_block->current_mem_size>>10); - seq_printf(seq, "Private memory base address : %0#10x\n", - c->status_block->current_mem_base); - seq_printf(seq, "Desired private I/O size : %d kB\n", - c->status_block->desired_io_size>>10); - seq_printf(seq, "Allocated private I/O size : %d kB\n", - c->status_block->current_io_size>>10); - seq_printf(seq, "Private I/O base address : %0#10x\n", - c->status_block->current_io_base); - - return 0; -} - -int i2o_proc_read_hw(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_controller *c = (struct i2o_controller*)data; - static u32 work32[5]; - static u8 *work8 = (u8*)work32; - static u16 *work16 = (u16*)work32; - int token; - u32 hwcap; - - static char *cpu_table[] = - { - "Intel 80960 series", - "AMD2900 series", - "Motorola 68000 series", - "ARM series", - "MIPS series", - "Sparc series", - "PowerPC series", - "Intel x86 series" - }; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_scalar(c, ADAPTER_TID, 0x0000, -1, &work32, sizeof(work32)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0000 IOP Hardware"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "I2O Vendor ID : %0#6x\n", work16[0]); - len += sprintf(buf+len, "Product ID : %0#6x\n", work16[1]); - len += sprintf(buf+len, "CPU : "); - if(work8[16] > 8) - len += sprintf(buf+len, "Unknown\n"); - else - len += sprintf(buf+len, "%s\n", cpu_table[work8[16]]); - /* Anyone using ProcessorVersion? */ - - len += sprintf(buf+len, "RAM : %dkB\n", work32[1]>>10); - len += sprintf(buf+len, "Non-Volatile Mem : %dkB\n", work32[2]>>10); - - hwcap = work32[3]; - len += sprintf(buf+len, "Capabilities : 0x%08x\n", hwcap); - len += sprintf(buf+len, " [%s] Self booting\n", - (hwcap&0x00000001) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Upgradable IRTOS\n", - (hwcap&0x00000002) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Supports downloading DDMs\n", - (hwcap&0x00000004) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Supports installing DDMs\n", - (hwcap&0x00000008) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Battery-backed RAM\n", - (hwcap&0x00000010) ? "+" : "-"); - - spin_unlock(&i2o_proc_lock); - - return len; -} - - -/* Executive group 0003h - Executing DDM List (table) */ -int i2o_proc_read_ddm_table(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_controller *c = (struct i2o_controller*)data; - int token; - int i; - - typedef struct _i2o_exec_execute_ddm_table { - u16 ddm_tid; - u8 module_type; - u8 reserved; - u16 i2o_vendor_id; - u16 module_id; - u8 module_name_version[28]; - u32 data_size; - u32 code_size; - } i2o_exec_execute_ddm_table; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - i2o_exec_execute_ddm_table ddm_table[MAX_I2O_MODULES]; - } *result; - - i2o_exec_execute_ddm_table ddm_table; - - result = kmalloc(sizeof(*result), GFP_KERNEL); - if(!result) - return -ENOMEM; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - c, ADAPTER_TID, - 0x0003, -1, - NULL, 0, - result, sizeof(*result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0003 Executing DDM List"); - goto out; - } - - len += sprintf(buf+len, "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n"); - ddm_table=result->ddm_table[0]; - - for(i=0; i < result->row_count; ddm_table=result->ddm_table[++i]) - { - len += sprintf(buf+len, "0x%03x ", ddm_table.ddm_tid & 0xFFF); - - switch(ddm_table.module_type) - { - case 0x01: - len += sprintf(buf+len, "Downloaded DDM "); - break; - case 0x22: - len += sprintf(buf+len, "Embedded DDM "); - break; - default: - len += sprintf(buf+len, " "); - } - - len += sprintf(buf+len, "%-#7x", ddm_table.i2o_vendor_id); - len += sprintf(buf+len, "%-#8x", ddm_table.module_id); - len += sprintf(buf+len, "%-29s", chtostr(ddm_table.module_name_version, 28)); - len += sprintf(buf+len, "%9d ", ddm_table.data_size); - len += sprintf(buf+len, "%8d", ddm_table.code_size); - - len += sprintf(buf+len, "\n"); - } -out: - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; -} - - -/* Executive group 0004h - Driver Store (scalar) */ -int i2o_proc_read_driver_store(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_controller *c = (struct i2o_controller*)data; - u32 work32[8]; - int token; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_scalar(c, ADAPTER_TID, 0x0004, -1, &work32, sizeof(work32)); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0004 Driver Store"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "Module limit : %d\n" - "Module count : %d\n" - "Current space : %d kB\n" - "Free space : %d kB\n", - work32[0], work32[1], work32[2]>>10, work32[3]>>10); - - spin_unlock(&i2o_proc_lock); - - return len; -} - - -/* Executive group 0005h - Driver Store Table (table) */ -int i2o_proc_read_drivers_stored(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - typedef struct _i2o_driver_store { - u16 stored_ddm_index; - u8 module_type; - u8 reserved; - u16 i2o_vendor_id; - u16 module_id; - u8 module_name_version[28]; - u8 date[8]; - u32 module_size; - u32 mpb_size; - u32 module_flags; - } i2o_driver_store_table; - - struct i2o_controller *c = (struct i2o_controller*)data; - int token; - int i; - - typedef struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - i2o_driver_store_table dst[MAX_I2O_MODULES]; - } i2o_driver_result_table; - - i2o_driver_result_table *result; - i2o_driver_store_table *dst; - - - len = 0; - - result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); - if(result == NULL) - return -ENOMEM; - - spin_lock(&i2o_proc_lock); - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - c, ADAPTER_TID, 0x0005, -1, NULL, 0, - result, sizeof(*result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0005 DRIVER STORE TABLE"); - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; - } - - len += sprintf(buf+len, "# Module_type Vendor Mod_id Module_name Vrs" - "Date Mod_size Par_size Flags\n"); - for(i=0, dst=&result->dst[0]; i < result->row_count; dst=&result->dst[++i]) - { - len += sprintf(buf+len, "%-3d", dst->stored_ddm_index); - switch(dst->module_type) - { - case 0x01: - len += sprintf(buf+len, "Downloaded DDM "); - break; - case 0x22: - len += sprintf(buf+len, "Embedded DDM "); - break; - default: - len += sprintf(buf+len, " "); - } - -#if 0 - if(c->i2oversion == 0x02) - len += sprintf(buf+len, "%-d", dst->module_state); -#endif - - len += sprintf(buf+len, "%-#7x", dst->i2o_vendor_id); - len += sprintf(buf+len, "%-#8x", dst->module_id); - len += sprintf(buf+len, "%-29s", chtostr(dst->module_name_version,28)); - len += sprintf(buf+len, "%-9s", chtostr(dst->date,8)); - len += sprintf(buf+len, "%8d ", dst->module_size); - len += sprintf(buf+len, "%8d ", dst->mpb_size); - len += sprintf(buf+len, "0x%04x", dst->module_flags); -#if 0 - if(c->i2oversion == 0x02) - len += sprintf(buf+len, "%d", - dst->notification_level); -#endif - len += sprintf(buf+len, "\n"); - } - - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; -} - - -/* Generic group F000h - Params Descriptor (table) */ -int i2o_proc_read_groups(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - u8 properties; - - typedef struct _i2o_group_info - { - u16 group_number; - u16 field_count; - u16 row_count; - u8 properties; - u8 reserved; - } i2o_group_info; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - i2o_group_info group[256]; - } *result; - - result = kmalloc(sizeof(*result), GFP_KERNEL); - if(!result) - return -ENOMEM; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, 0xF000, -1, NULL, 0, - result, sizeof(*result)); - - if (token < 0) { - len = i2o_report_query_status(buf+len, token, "0xF000 Params Descriptor"); - goto out; - } - - len += sprintf(buf+len, "# Group FieldCount RowCount Type Add Del Clear\n"); - - for (i=0; i < result->row_count; i++) - { - len += sprintf(buf+len, "%-3d", i); - len += sprintf(buf+len, "0x%04X ", result->group[i].group_number); - len += sprintf(buf+len, "%10d ", result->group[i].field_count); - len += sprintf(buf+len, "%8d ", result->group[i].row_count); - - properties = result->group[i].properties; - if (properties & 0x1) len += sprintf(buf+len, "Table "); - else len += sprintf(buf+len, "Scalar "); - if (properties & 0x2) len += sprintf(buf+len, " + "); - else len += sprintf(buf+len, " - "); - if (properties & 0x4) len += sprintf(buf+len, " + "); - else len += sprintf(buf+len, " - "); - if (properties & 0x8) len += sprintf(buf+len, " + "); - else len += sprintf(buf+len, " - "); - - len += sprintf(buf+len, "\n"); - } - - if (result->more_flag) - len += sprintf(buf+len, "There is more...\n"); -out: - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; -} - - -/* Generic group F001h - Physical Device Table (table) */ -int i2o_proc_read_phys_device(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - u32 adapter_id[64]; - } result; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, - 0xF001, -1, NULL, 0, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF001 Physical Device Table"); - spin_unlock(&i2o_proc_lock); - return len; - } - - if (result.row_count) - len += sprintf(buf+len, "# AdapterId\n"); - - for (i=0; i < result.row_count; i++) - { - len += sprintf(buf+len, "%-2d", i); - len += sprintf(buf+len, "%#7x\n", result.adapter_id[i]); - } - - if (result.more_flag) - len += sprintf(buf+len, "There is more...\n"); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* Generic group F002h - Claimed Table (table) */ -int i2o_proc_read_claimed(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - - struct { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - u16 claimed_tid[64]; - } result; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, - 0xF002, -1, NULL, 0, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF002 Claimed Table"); - spin_unlock(&i2o_proc_lock); - return len; - } - - if (result.row_count) - len += sprintf(buf+len, "# ClaimedTid\n"); - - for (i=0; i < result.row_count; i++) - { - len += sprintf(buf+len, "%-2d", i); - len += sprintf(buf+len, "%#7x\n", result.claimed_tid[i]); - } - - if (result.more_flag) - len += sprintf(buf+len, "There is more...\n"); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* Generic group F003h - User Table (table) */ -int i2o_proc_read_users(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - - typedef struct _i2o_user_table - { - u16 instance; - u16 user_tid; - u8 claim_type; - u8 reserved1; - u16 reserved2; - } i2o_user_table; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - i2o_user_table user[64]; - } *result; - - result = kmalloc(sizeof(*result), GFP_KERNEL); - if(!result) - return -ENOMEM; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, - 0xF003, -1, NULL, 0, - result, sizeof(*result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF003 User Table"); - goto out; - } - - len += sprintf(buf+len, "# Instance UserTid ClaimType\n"); - - for(i=0; i < result->row_count; i++) - { - len += sprintf(buf+len, "%-3d", i); - len += sprintf(buf+len, "%#8x ", result->user[i].instance); - len += sprintf(buf+len, "%#7x ", result->user[i].user_tid); - len += sprintf(buf+len, "%#9x\n", result->user[i].claim_type); - } - - if (result->more_flag) - len += sprintf(buf+len, "There is more...\n"); -out: - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; -} - -/* Generic group F005h - Private message extensions (table) (optional) */ -int i2o_proc_read_priv_msgs(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - - typedef struct _i2o_private - { - u16 ext_instance; - u16 organization_id; - u16 x_function_code; - } i2o_private; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - i2o_private extension[64]; - } result; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, - 0xF000, -1, - NULL, 0, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF005 Private Message Extensions (optional)"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "Instance# OrgId FunctionCode\n"); - - for(i=0; i < result.row_count; i++) - { - len += sprintf(buf+len, "%0#9x ", result.extension[i].ext_instance); - len += sprintf(buf+len, "%0#6x ", result.extension[i].organization_id); - len += sprintf(buf+len, "%0#6x", result.extension[i].x_function_code); - - len += sprintf(buf+len, "\n"); - } - - if(result.more_flag) - len += sprintf(buf+len, "There is more...\n"); - - spin_unlock(&i2o_proc_lock); - - return len; -} - - -/* Generic group F006h - Authorized User Table (table) */ -int i2o_proc_read_authorized_users(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - u32 alternate_tid[64]; - } result; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, - 0xF006, -1, - NULL, 0, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF006 Autohorized User Table"); - spin_unlock(&i2o_proc_lock); - return len; - } - - if (result.row_count) - len += sprintf(buf+len, "# AlternateTid\n"); - - for(i=0; i < result.row_count; i++) - { - len += sprintf(buf+len, "%-2d", i); - len += sprintf(buf+len, "%#7x ", result.alternate_tid[i]); - } - - if (result.more_flag) - len += sprintf(buf+len, "There is more...\n"); - - spin_unlock(&i2o_proc_lock); - return len; -} - - -/* Generic group F100h - Device Identity (scalar) */ -int i2o_proc_read_dev_identity(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number - // == (allow) 512d bytes (max) - static u16 *work16 = (u16*)work32; - int token; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0xF100, -1, - &work32, sizeof(work32)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token ,"0xF100 Device Identity"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Device Class : %s\n", i2o_get_class_name(work16[0])); - len += sprintf(buf+len, "Owner TID : %0#5x\n", work16[2]); - len += sprintf(buf+len, "Parent TID : %0#5x\n", work16[3]); - len += sprintf(buf+len, "Vendor info : %s\n", chtostr((u8 *)(work32+2), 16)); - len += sprintf(buf+len, "Product info : %s\n", chtostr((u8 *)(work32+6), 16)); - len += sprintf(buf+len, "Description : %s\n", chtostr((u8 *)(work32+10), 16)); - len += sprintf(buf+len, "Product rev. : %s\n", chtostr((u8 *)(work32+14), 8)); - - len += sprintf(buf+len, "Serial number : "); - len = print_serial_number(buf, len, - (u8*)(work32+16), - /* allow for SNLen plus - * possible trailing '\0' - */ - sizeof(work32)-(16*sizeof(u32))-2 - ); - len += sprintf(buf+len, "\n"); - - spin_unlock(&i2o_proc_lock); - - return len; -} - - -int i2o_proc_read_dev_name(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - - if ( d->dev_name[0] == '\0' ) - return 0; - - len = sprintf(buf, "%s\n", d->dev_name); - - return len; -} - - -/* Generic group F101h - DDM Identity (scalar) */ -int i2o_proc_read_ddm_identity(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - - struct - { - u16 ddm_tid; - u8 module_name[24]; - u8 module_rev[8]; - u8 sn_format; - u8 serial_number[12]; - u8 pad[256]; // allow up to 256 byte (max) serial number - } result; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0xF101, -1, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF101 DDM Identity"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Registering DDM TID : 0x%03x\n", result.ddm_tid); - len += sprintf(buf+len, "Module name : %s\n", chtostr(result.module_name, 24)); - len += sprintf(buf+len, "Module revision : %s\n", chtostr(result.module_rev, 8)); - - len += sprintf(buf+len, "Serial number : "); - len = print_serial_number(buf, len, result.serial_number, sizeof(result)-36); - /* allow for SNLen plus possible trailing '\0' */ - - len += sprintf(buf+len, "\n"); - - spin_unlock(&i2o_proc_lock); - - return len; -} - -/* Generic group F102h - User Information (scalar) */ -int i2o_proc_read_uinfo(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - - struct - { - u8 device_name[64]; - u8 service_name[64]; - u8 physical_location[64]; - u8 instance_number[4]; - } result; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0xF102, -1, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF102 User Information"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Device name : %s\n", chtostr(result.device_name, 64)); - len += sprintf(buf+len, "Service name : %s\n", chtostr(result.service_name, 64)); - len += sprintf(buf+len, "Physical name : %s\n", chtostr(result.physical_location, 64)); - len += sprintf(buf+len, "Instance number : %s\n", chtostr(result.instance_number, 4)); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* Generic group F103h - SGL Operating Limits (scalar) */ -int i2o_proc_read_sgl_limits(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[12]; - static u16 *work16 = (u16 *)work32; - static u8 *work8 = (u8 *)work32; - int token; - - spin_lock(&i2o_proc_lock); - - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0xF103, -1, - &work32, sizeof(work32)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF103 SGL Operating Limits"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "SGL chain size : %d\n", work32[0]); - len += sprintf(buf+len, "Max SGL chain size : %d\n", work32[1]); - len += sprintf(buf+len, "SGL chain size target : %d\n", work32[2]); - len += sprintf(buf+len, "SGL frag count : %d\n", work16[6]); - len += sprintf(buf+len, "Max SGL frag count : %d\n", work16[7]); - len += sprintf(buf+len, "SGL frag count target : %d\n", work16[8]); - - if (d->i2oversion == 0x02) - { - len += sprintf(buf+len, "SGL data alignment : %d\n", work16[8]); - len += sprintf(buf+len, "SGL addr limit : %d\n", work8[20]); - len += sprintf(buf+len, "SGL addr sizes supported : "); - if (work8[21] & 0x01) - len += sprintf(buf+len, "32 bit "); - if (work8[21] & 0x02) - len += sprintf(buf+len, "64 bit "); - if (work8[21] & 0x04) - len += sprintf(buf+len, "96 bit "); - if (work8[21] & 0x08) - len += sprintf(buf+len, "128 bit "); - len += sprintf(buf+len, "\n"); - } - - spin_unlock(&i2o_proc_lock); - - return len; -} - -/* Generic group F200h - Sensors (scalar) */ -int i2o_proc_read_sensors(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - - struct - { - u16 sensor_instance; - u8 component; - u16 component_instance; - u8 sensor_class; - u8 sensor_type; - u8 scaling_exponent; - u32 actual_reading; - u32 minimum_reading; - u32 low2lowcat_treshold; - u32 lowcat2low_treshold; - u32 lowwarn2low_treshold; - u32 low2lowwarn_treshold; - u32 norm2lowwarn_treshold; - u32 lowwarn2norm_treshold; - u32 nominal_reading; - u32 hiwarn2norm_treshold; - u32 norm2hiwarn_treshold; - u32 high2hiwarn_treshold; - u32 hiwarn2high_treshold; - u32 hicat2high_treshold; - u32 hi2hicat_treshold; - u32 maximum_reading; - u8 sensor_state; - u16 event_enable; - } result; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0xF200, -1, - &result, sizeof(result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0xF200 Sensors (optional)"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "Sensor instance : %d\n", result.sensor_instance); - - len += sprintf(buf+len, "Component : %d = ", result.component); - switch (result.component) - { - case 0: len += sprintf(buf+len, "Other"); - break; - case 1: len += sprintf(buf+len, "Planar logic Board"); - break; - case 2: len += sprintf(buf+len, "CPU"); - break; - case 3: len += sprintf(buf+len, "Chassis"); - break; - case 4: len += sprintf(buf+len, "Power Supply"); - break; - case 5: len += sprintf(buf+len, "Storage"); - break; - case 6: len += sprintf(buf+len, "External"); - break; - } - len += sprintf(buf+len,"\n"); - - len += sprintf(buf+len, "Component instance : %d\n", result.component_instance); - len += sprintf(buf+len, "Sensor class : %s\n", - result.sensor_class ? "Analog" : "Digital"); - - len += sprintf(buf+len, "Sensor type : %d = ",result.sensor_type); - switch (result.sensor_type) - { - case 0: len += sprintf(buf+len, "Other\n"); - break; - case 1: len += sprintf(buf+len, "Thermal\n"); - break; - case 2: len += sprintf(buf+len, "DC voltage (DC volts)\n"); - break; - case 3: len += sprintf(buf+len, "AC voltage (AC volts)\n"); - break; - case 4: len += sprintf(buf+len, "DC current (DC amps)\n"); - break; - case 5: len += sprintf(buf+len, "AC current (AC volts)\n"); - break; - case 6: len += sprintf(buf+len, "Door open\n"); - break; - case 7: len += sprintf(buf+len, "Fan operational\n"); - break; - } - - len += sprintf(buf+len, "Scaling exponent : %d\n", result.scaling_exponent); - len += sprintf(buf+len, "Actual reading : %d\n", result.actual_reading); - len += sprintf(buf+len, "Minimum reading : %d\n", result.minimum_reading); - len += sprintf(buf+len, "Low2LowCat treshold : %d\n", result.low2lowcat_treshold); - len += sprintf(buf+len, "LowCat2Low treshold : %d\n", result.lowcat2low_treshold); - len += sprintf(buf+len, "LowWarn2Low treshold : %d\n", result.lowwarn2low_treshold); - len += sprintf(buf+len, "Low2LowWarn treshold : %d\n", result.low2lowwarn_treshold); - len += sprintf(buf+len, "Norm2LowWarn treshold : %d\n", result.norm2lowwarn_treshold); - len += sprintf(buf+len, "LowWarn2Norm treshold : %d\n", result.lowwarn2norm_treshold); - len += sprintf(buf+len, "Nominal reading : %d\n", result.nominal_reading); - len += sprintf(buf+len, "HiWarn2Norm treshold : %d\n", result.hiwarn2norm_treshold); - len += sprintf(buf+len, "Norm2HiWarn treshold : %d\n", result.norm2hiwarn_treshold); - len += sprintf(buf+len, "High2HiWarn treshold : %d\n", result.high2hiwarn_treshold); - len += sprintf(buf+len, "HiWarn2High treshold : %d\n", result.hiwarn2high_treshold); - len += sprintf(buf+len, "HiCat2High treshold : %d\n", result.hicat2high_treshold); - len += sprintf(buf+len, "High2HiCat treshold : %d\n", result.hi2hicat_treshold); - len += sprintf(buf+len, "Maximum reading : %d\n", result.maximum_reading); - - len += sprintf(buf+len, "Sensor state : %d = ", result.sensor_state); - switch (result.sensor_state) - { - case 0: len += sprintf(buf+len, "Normal\n"); - break; - case 1: len += sprintf(buf+len, "Abnormal\n"); - break; - case 2: len += sprintf(buf+len, "Unknown\n"); - break; - case 3: len += sprintf(buf+len, "Low Catastrophic (LoCat)\n"); - break; - case 4: len += sprintf(buf+len, "Low (Low)\n"); - break; - case 5: len += sprintf(buf+len, "Low Warning (LoWarn)\n"); - break; - case 6: len += sprintf(buf+len, "High Warning (HiWarn)\n"); - break; - case 7: len += sprintf(buf+len, "High (High)\n"); - break; - case 8: len += sprintf(buf+len, "High Catastrophic (HiCat)\n"); - break; - } - - len += sprintf(buf+len, "Event_enable : 0x%02X\n", result.event_enable); - len += sprintf(buf+len, " [%s] Operational state change. \n", - (result.event_enable & 0x01) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] Low catastrophic. \n", - (result.event_enable & 0x02) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] Low reading. \n", - (result.event_enable & 0x04) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] Low warning. \n", - (result.event_enable & 0x08) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] Change back to normal from out of range state. \n", - (result.event_enable & 0x10) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] High warning. \n", - (result.event_enable & 0x20) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] High reading. \n", - (result.event_enable & 0x40) ? "+" : "-" ); - len += sprintf(buf+len, " [%s] High catastrophic. \n", - (result.event_enable & 0x80) ? "+" : "-" ); - - spin_unlock(&i2o_proc_lock); - return len; -} - - -static int print_serial_number(char *buff, int pos, u8 *serialno, int max_len) -{ - int i; - - /* 19990419 -sralston - * The I2O v1.5 (and v2.0 so far) "official specification" - * got serial numbers WRONG! - * Apparently, and despite what Section 3.4.4 says and - * Figure 3-35 shows (pg 3-39 in the pdf doc), - * the convention / consensus seems to be: - * + First byte is SNFormat - * + Second byte is SNLen (but only if SNFormat==7 (?)) - * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format - */ - switch(serialno[0]) - { - case I2O_SNFORMAT_BINARY: /* Binary */ - pos += sprintf(buff+pos, "0x"); - for(i = 0; i < serialno[1]; i++) - { - pos += sprintf(buff+pos, "%02X", serialno[2+i]); - } - break; - - case I2O_SNFORMAT_ASCII: /* ASCII */ - if ( serialno[1] < ' ' ) /* printable or SNLen? */ - { - /* sanity */ - max_len = (max_len < serialno[1]) ? max_len : serialno[1]; - serialno[1+max_len] = '\0'; - - /* just print it */ - pos += sprintf(buff+pos, "%s", &serialno[2]); - } - else - { - /* print chars for specified length */ - for(i = 0; i < serialno[1]; i++) - { - pos += sprintf(buff+pos, "%c", serialno[2+i]); - } - } - break; - - case I2O_SNFORMAT_UNICODE: /* UNICODE */ - pos += sprintf(buff+pos, "UNICODE Format. Can't Display\n"); - break; - - case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ - pos += sprintf(buff+pos, - "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X", - serialno[2], serialno[3], - serialno[4], serialno[5], - serialno[6], serialno[7]); - break; - - case I2O_SNFORMAT_WAN: /* WAN MAC Address */ - /* FIXME: Figure out what a WAN access address looks like?? */ - pos += sprintf(buff+pos, "WAN Access Address"); - break; - -/* plus new in v2.0 */ - case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ - /* FIXME: Figure out what a LAN-64 address really looks like?? */ - pos += sprintf(buff+pos, - "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X", - serialno[8], serialno[9], - serialno[2], serialno[3], - serialno[4], serialno[5], - serialno[6], serialno[7]); - break; - - - case I2O_SNFORMAT_DDM: /* I2O DDM */ - pos += sprintf(buff+pos, - "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh", - *(u16*)&serialno[2], - *(u16*)&serialno[4], - *(u16*)&serialno[6]); - break; - - case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */ - case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */ - /* FIXME: Figure if this is even close?? */ - pos += sprintf(buff+pos, - "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n", - *(u32*)&serialno[2], - *(u32*)&serialno[6], - *(u32*)&serialno[10], - *(u32*)&serialno[14]); - break; - - - case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */ - case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */ - default: - pos += sprintf(buff+pos, "Unknown data format (0x%02x)", - serialno[0]); - break; - } - - return pos; -} - -const char * i2o_get_connector_type(int conn) -{ - int idx = 16; - static char *i2o_connector_type[] = { - "OTHER", - "UNKNOWN", - "AUI", - "UTP", - "BNC", - "RJ45", - "STP DB9", - "FIBER MIC", - "APPLE AUI", - "MII", - "DB9", - "HSSDC", - "DUPLEX SC FIBER", - "DUPLEX ST FIBER", - "TNC/BNC", - "HW DEFAULT" - }; - - switch(conn) - { - case 0x00000000: - idx = 0; - break; - case 0x00000001: - idx = 1; - break; - case 0x00000002: - idx = 2; - break; - case 0x00000003: - idx = 3; - break; - case 0x00000004: - idx = 4; - break; - case 0x00000005: - idx = 5; - break; - case 0x00000006: - idx = 6; - break; - case 0x00000007: - idx = 7; - break; - case 0x00000008: - idx = 8; - break; - case 0x00000009: - idx = 9; - break; - case 0x0000000A: - idx = 10; - break; - case 0x0000000B: - idx = 11; - break; - case 0x0000000C: - idx = 12; - break; - case 0x0000000D: - idx = 13; - break; - case 0x0000000E: - idx = 14; - break; - case 0xFFFFFFFF: - idx = 15; - break; - } - - return i2o_connector_type[idx]; -} - - -const char * i2o_get_connection_type(int conn) -{ - int idx = 0; - static char *i2o_connection_type[] = { - "Unknown", - "AUI", - "10BASE5", - "FIORL", - "10BASE2", - "10BROAD36", - "10BASE-T", - "10BASE-FP", - "10BASE-FB", - "10BASE-FL", - "100BASE-TX", - "100BASE-FX", - "100BASE-T4", - "1000BASE-SX", - "1000BASE-LX", - "1000BASE-CX", - "1000BASE-T", - "100VG-ETHERNET", - "100VG-TOKEN RING", - "4MBIT TOKEN RING", - "16 Mb Token Ring", - "125 MBAUD FDDI", - "Point-to-point", - "Arbitrated loop", - "Public loop", - "Fabric", - "Emulation", - "Other", - "HW default" - }; - - switch(conn) - { - case I2O_LAN_UNKNOWN: - idx = 0; - break; - case I2O_LAN_AUI: - idx = 1; - break; - case I2O_LAN_10BASE5: - idx = 2; - break; - case I2O_LAN_FIORL: - idx = 3; - break; - case I2O_LAN_10BASE2: - idx = 4; - break; - case I2O_LAN_10BROAD36: - idx = 5; - break; - case I2O_LAN_10BASE_T: - idx = 6; - break; - case I2O_LAN_10BASE_FP: - idx = 7; - break; - case I2O_LAN_10BASE_FB: - idx = 8; - break; - case I2O_LAN_10BASE_FL: - idx = 9; - break; - case I2O_LAN_100BASE_TX: - idx = 10; - break; - case I2O_LAN_100BASE_FX: - idx = 11; - break; - case I2O_LAN_100BASE_T4: - idx = 12; - break; - case I2O_LAN_1000BASE_SX: - idx = 13; - break; - case I2O_LAN_1000BASE_LX: - idx = 14; - break; - case I2O_LAN_1000BASE_CX: - idx = 15; - break; - case I2O_LAN_1000BASE_T: - idx = 16; - break; - case I2O_LAN_100VG_ETHERNET: - idx = 17; - break; - case I2O_LAN_100VG_TR: - idx = 18; - break; - case I2O_LAN_4MBIT: - idx = 19; - break; - case I2O_LAN_16MBIT: - idx = 20; - break; - case I2O_LAN_125MBAUD: - idx = 21; - break; - case I2O_LAN_POINT_POINT: - idx = 22; - break; - case I2O_LAN_ARB_LOOP: - idx = 23; - break; - case I2O_LAN_PUBLIC_LOOP: - idx = 24; - break; - case I2O_LAN_FABRIC: - idx = 25; - break; - case I2O_LAN_EMULATION: - idx = 26; - break; - case I2O_LAN_OTHER: - idx = 27; - break; - case I2O_LAN_DEFAULT: - idx = 28; - break; - } - - return i2o_connection_type[idx]; -} - - -/* LAN group 0000h - Device info (scalar) */ -int i2o_proc_read_lan_dev_info(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[56]; - static u8 *work8 = (u8*)work32; - static u16 *work16 = (u16*)work32; - static u64 *work64 = (u64*)work32; - int token; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0000, -1, &work32, 56*4); - if (token < 0) { - len += i2o_report_query_status(buf+len, token, "0x0000 LAN Device Info"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "LAN Type : "); - switch (work16[0]) - { - case 0x0030: - len += sprintf(buf+len, "Ethernet, "); - break; - case 0x0040: - len += sprintf(buf+len, "100Base VG, "); - break; - case 0x0050: - len += sprintf(buf+len, "Token Ring, "); - break; - case 0x0060: - len += sprintf(buf+len, "FDDI, "); - break; - case 0x0070: - len += sprintf(buf+len, "Fibre Channel, "); - break; - default: - len += sprintf(buf+len, "Unknown type (0x%04x), ", work16[0]); - break; - } - - if (work16[1]&0x00000001) - len += sprintf(buf+len, "emulated LAN, "); - else - len += sprintf(buf+len, "physical LAN port, "); - - if (work16[1]&0x00000002) - len += sprintf(buf+len, "full duplex\n"); - else - len += sprintf(buf+len, "simplex\n"); - - len += sprintf(buf+len, "Address format : "); - switch(work8[4]) { - case 0x00: - len += sprintf(buf+len, "IEEE 48bit\n"); - break; - case 0x01: - len += sprintf(buf+len, "FC IEEE\n"); - break; - default: - len += sprintf(buf+len, "Unknown (0x%02x)\n", work8[4]); - break; - } - - len += sprintf(buf+len, "State : "); - switch(work8[5]) - { - case 0x00: - len += sprintf(buf+len, "Unknown\n"); - break; - case 0x01: - len += sprintf(buf+len, "Unclaimed\n"); - break; - case 0x02: - len += sprintf(buf+len, "Operational\n"); - break; - case 0x03: - len += sprintf(buf+len, "Suspended\n"); - break; - case 0x04: - len += sprintf(buf+len, "Resetting\n"); - break; - case 0x05: - len += sprintf(buf+len, "ERROR: "); - if(work16[3]&0x0001) - len += sprintf(buf+len, "TxCU inoperative "); - if(work16[3]&0x0002) - len += sprintf(buf+len, "RxCU inoperative "); - if(work16[3]&0x0004) - len += sprintf(buf+len, "Local mem alloc "); - len += sprintf(buf+len, "\n"); - break; - case 0x06: - len += sprintf(buf+len, "Operational no Rx\n"); - break; - case 0x07: - len += sprintf(buf+len, "Suspended no Rx\n"); - break; - default: - len += sprintf(buf+len, "Unspecified\n"); - break; - } - - len += sprintf(buf+len, "Min packet size : %d\n", work32[2]); - len += sprintf(buf+len, "Max packet size : %d\n", work32[3]); - len += sprintf(buf+len, "HW address : " - "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - work8[16],work8[17],work8[18],work8[19], - work8[20],work8[21],work8[22],work8[23]); - - len += sprintf(buf+len, "Max Tx wire speed : %d bps\n", (int)work64[3]); - len += sprintf(buf+len, "Max Rx wire speed : %d bps\n", (int)work64[4]); - - len += sprintf(buf+len, "Min SDU packet size : 0x%08x\n", work32[10]); - len += sprintf(buf+len, "Max SDU packet size : 0x%08x\n", work32[11]); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0001h - MAC address table (scalar) */ -int i2o_proc_read_lan_mac_addr(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[48]; - static u8 *work8 = (u8*)work32; - int token; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0001, -1, &work32, 48*4); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0001 LAN MAC Address"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Active address : " - "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - work8[0],work8[1],work8[2],work8[3], - work8[4],work8[5],work8[6],work8[7]); - len += sprintf(buf+len, "Current address : " - "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - work8[8],work8[9],work8[10],work8[11], - work8[12],work8[13],work8[14],work8[15]); - len += sprintf(buf+len, "Functional address mask : " - "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - work8[16],work8[17],work8[18],work8[19], - work8[20],work8[21],work8[22],work8[23]); - - len += sprintf(buf+len,"HW/DDM capabilities : 0x%08x\n", work32[7]); - len += sprintf(buf+len," [%s] Unicast packets supported\n", - (work32[7]&0x00000001)?"+":"-"); - len += sprintf(buf+len," [%s] Promiscuous mode supported\n", - (work32[7]&0x00000002)?"+":"-"); - len += sprintf(buf+len," [%s] Promiscuous multicast mode supported\n", - (work32[7]&0x00000004)?"+":"-"); - len += sprintf(buf+len," [%s] Broadcast reception disabling supported\n", - (work32[7]&0x00000100)?"+":"-"); - len += sprintf(buf+len," [%s] Multicast reception disabling supported\n", - (work32[7]&0x00000200)?"+":"-"); - len += sprintf(buf+len," [%s] Functional address disabling supported\n", - (work32[7]&0x00000400)?"+":"-"); - len += sprintf(buf+len," [%s] MAC reporting supported\n", - (work32[7]&0x00000800)?"+":"-"); - - len += sprintf(buf+len,"Filter mask : 0x%08x\n", work32[6]); - len += sprintf(buf+len," [%s] Unicast packets disable\n", - (work32[6]&0x00000001)?"+":"-"); - len += sprintf(buf+len," [%s] Promiscuous mode enable\n", - (work32[6]&0x00000002)?"+":"-"); - len += sprintf(buf+len," [%s] Promiscuous multicast mode enable\n", - (work32[6]&0x00000004)?"+":"-"); - len += sprintf(buf+len," [%s] Broadcast packets disable\n", - (work32[6]&0x00000100)?"+":"-"); - len += sprintf(buf+len," [%s] Multicast packets disable\n", - (work32[6]&0x00000200)?"+":"-"); - len += sprintf(buf+len," [%s] Functional address disable\n", - (work32[6]&0x00000400)?"+":"-"); - - if (work32[7]&0x00000800) { - len += sprintf(buf+len, " MAC reporting mode : "); - if (work32[6]&0x00000800) - len += sprintf(buf+len, "Pass only priority MAC packets to user\n"); - else if (work32[6]&0x00001000) - len += sprintf(buf+len, "Pass all MAC packets to user\n"); - else if (work32[6]&0x00001800) - len += sprintf(buf+len, "Pass all MAC packets (promiscuous) to user\n"); - else - len += sprintf(buf+len, "Do not pass MAC packets to user\n"); - } - len += sprintf(buf+len, "Number of multicast addresses : %d\n", work32[8]); - len += sprintf(buf+len, "Perfect filtering for max %d multicast addresses\n", - work32[9]); - len += sprintf(buf+len, "Imperfect filtering for max %d multicast addresses\n", - work32[10]); - - spin_unlock(&i2o_proc_lock); - - return len; -} - -/* LAN group 0002h - Multicast MAC address table (table) */ -int i2o_proc_read_lan_mcast_addr(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - u8 mc_addr[8]; - - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - u8 mc_addr[256][8]; - } *result; - - result = kmalloc(sizeof(*result), GFP_KERNEL); - if(!result) - return -ENOMEM; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, 0x0002, -1, - NULL, 0, result, sizeof(*result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x002 LAN Multicast MAC Address"); - goto out; - } - - for (i = 0; i < result->row_count; i++) - { - memcpy(mc_addr, result->mc_addr[i], 8); - - len += sprintf(buf+len, "MC MAC address[%d]: " - "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - i, mc_addr[0], mc_addr[1], mc_addr[2], - mc_addr[3], mc_addr[4], mc_addr[5], - mc_addr[6], mc_addr[7]); - } -out: - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; -} - -/* LAN group 0003h - Batch Control (scalar) */ -int i2o_proc_read_lan_batch_control(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[9]; - int token; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0003, -1, &work32, 9*4); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0003 LAN Batch Control"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Batch mode "); - if (work32[0]&0x00000001) - len += sprintf(buf+len, "disabled"); - else - len += sprintf(buf+len, "enabled"); - if (work32[0]&0x00000002) - len += sprintf(buf+len, " (current setting)"); - if (work32[0]&0x00000004) - len += sprintf(buf+len, ", forced"); - else - len += sprintf(buf+len, ", toggle"); - len += sprintf(buf+len, "\n"); - - len += sprintf(buf+len, "Max Rx batch count : %d\n", work32[5]); - len += sprintf(buf+len, "Max Rx batch delay : %d\n", work32[6]); - len += sprintf(buf+len, "Max Tx batch delay : %d\n", work32[7]); - len += sprintf(buf+len, "Max Tx batch count : %d\n", work32[8]); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0004h - LAN Operation (scalar) */ -int i2o_proc_read_lan_operation(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[5]; - int token; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0004, -1, &work32, 20); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0004 LAN Operation"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Packet prepadding (32b words) : %d\n", work32[0]); - len += sprintf(buf+len, "Transmission error reporting : %s\n", - (work32[1]&1)?"on":"off"); - len += sprintf(buf+len, "Bad packet handling : %s\n", - (work32[1]&0x2)?"by host":"by DDM"); - len += sprintf(buf+len, "Packet orphan limit : %d\n", work32[2]); - - len += sprintf(buf+len, "Tx modes : 0x%08x\n", work32[3]); - len += sprintf(buf+len, " [%s] HW CRC suppression\n", - (work32[3]&0x00000004) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW IPv4 checksum\n", - (work32[3]&0x00000100) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW TCP checksum\n", - (work32[3]&0x00000200) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW UDP checksum\n", - (work32[3]&0x00000400) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW RSVP checksum\n", - (work32[3]&0x00000800) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW ICMP checksum\n", - (work32[3]&0x00001000) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Loopback suppression enable\n", - (work32[3]&0x00002000) ? "+" : "-"); - - len += sprintf(buf+len, "Rx modes : 0x%08x\n", work32[4]); - len += sprintf(buf+len, " [%s] FCS in payload\n", - (work32[4]&0x00000004) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW IPv4 checksum validation\n", - (work32[4]&0x00000100) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW TCP checksum validation\n", - (work32[4]&0x00000200) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW UDP checksum validation\n", - (work32[4]&0x00000400) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW RSVP checksum validation\n", - (work32[4]&0x00000800) ? "+" : "-"); - len += sprintf(buf+len, " [%s] HW ICMP checksum validation\n", - (work32[4]&0x00001000) ? "+" : "-"); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0005h - Media operation (scalar) */ -int i2o_proc_read_lan_media_operation(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - - struct - { - u32 connector_type; - u32 connection_type; - u64 current_tx_wire_speed; - u64 current_rx_wire_speed; - u8 duplex_mode; - u8 link_status; - u8 reserved; - u8 duplex_mode_target; - u32 connector_type_target; - u32 connection_type_target; - } result; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0005, -1, &result, sizeof(result)); - if (token < 0) { - len += i2o_report_query_status(buf+len, token, "0x0005 LAN Media Operation"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Connector type : %s\n", - i2o_get_connector_type(result.connector_type)); - len += sprintf(buf+len, "Connection type : %s\n", - i2o_get_connection_type(result.connection_type)); - - len += sprintf(buf+len, "Current Tx wire speed : %d bps\n", (int)result.current_tx_wire_speed); - len += sprintf(buf+len, "Current Rx wire speed : %d bps\n", (int)result.current_rx_wire_speed); - len += sprintf(buf+len, "Duplex mode : %s duplex\n", - (result.duplex_mode)?"Full":"Half"); - - len += sprintf(buf+len, "Link status : "); - switch (result.link_status) - { - case 0x00: - len += sprintf(buf+len, "Unknown\n"); - break; - case 0x01: - len += sprintf(buf+len, "Normal\n"); - break; - case 0x02: - len += sprintf(buf+len, "Failure\n"); - break; - case 0x03: - len += sprintf(buf+len, "Reset\n"); - break; - default: - len += sprintf(buf+len, "Unspecified\n"); - } - - len += sprintf(buf+len, "Duplex mode target : "); - switch (result.duplex_mode_target){ - case 0: - len += sprintf(buf+len, "Half duplex\n"); - break; - case 1: - len += sprintf(buf+len, "Full duplex\n"); - break; - default: - len += sprintf(buf+len, "\n"); - } - - len += sprintf(buf+len, "Connector type target : %s\n", - i2o_get_connector_type(result.connector_type_target)); - len += sprintf(buf+len, "Connection type target : %s\n", - i2o_get_connection_type(result.connection_type_target)); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0006h - Alternate address (table) (optional) */ -int i2o_proc_read_lan_alt_addr(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - int i; - u8 alt_addr[8]; - struct - { - u16 result_count; - u16 pad; - u16 block_size; - u8 block_status; - u8 error_info_size; - u16 row_count; - u16 more_flag; - u8 alt_addr[256][8]; - } *result; - - result = kmalloc(sizeof(*result), GFP_KERNEL); - if(!result) - return -ENOMEM; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_table(I2O_PARAMS_TABLE_GET, - d->controller, d->lct_data.tid, - 0x0006, -1, NULL, 0, result, sizeof(*result)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token, "0x0006 LAN Alternate Address (optional)"); - goto out; - } - - for (i=0; i < result->row_count; i++) - { - memcpy(alt_addr,result->alt_addr[i],8); - len += sprintf(buf+len, "Alternate address[%d]: " - "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - i, alt_addr[0], alt_addr[1], alt_addr[2], - alt_addr[3], alt_addr[4], alt_addr[5], - alt_addr[6], alt_addr[7]); - } -out: - spin_unlock(&i2o_proc_lock); - kfree(result); - return len; -} - - -/* LAN group 0007h - Transmit info (scalar) */ -int i2o_proc_read_lan_tx_info(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[8]; - int token; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0007, -1, &work32, 8*4); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0007 LAN Transmit Info"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "Tx Max SG elements per packet : %d\n", work32[0]); - len += sprintf(buf+len, "Tx Max SG elements per chain : %d\n", work32[1]); - len += sprintf(buf+len, "Tx Max outstanding packets : %d\n", work32[2]); - len += sprintf(buf+len, "Tx Max packets per request : %d\n", work32[3]); - - len += sprintf(buf+len, "Tx modes : 0x%08x\n", work32[4]); - len += sprintf(buf+len, " [%s] No DA in SGL\n", - (work32[4]&0x00000002) ? "+" : "-"); - len += sprintf(buf+len, " [%s] CRC suppression\n", - (work32[4]&0x00000004) ? "+" : "-"); - len += sprintf(buf+len, " [%s] MAC insertion\n", - (work32[4]&0x00000010) ? "+" : "-"); - len += sprintf(buf+len, " [%s] RIF insertion\n", - (work32[4]&0x00000020) ? "+" : "-"); - len += sprintf(buf+len, " [%s] IPv4 checksum generation\n", - (work32[4]&0x00000100) ? "+" : "-"); - len += sprintf(buf+len, " [%s] TCP checksum generation\n", - (work32[4]&0x00000200) ? "+" : "-"); - len += sprintf(buf+len, " [%s] UDP checksum generation\n", - (work32[4]&0x00000400) ? "+" : "-"); - len += sprintf(buf+len, " [%s] RSVP checksum generation\n", - (work32[4]&0x00000800) ? "+" : "-"); - len += sprintf(buf+len, " [%s] ICMP checksum generation\n", - (work32[4]&0x00001000) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Loopback enabled\n", - (work32[4]&0x00010000) ? "+" : "-"); - len += sprintf(buf+len, " [%s] Loopback suppression enabled\n", - (work32[4]&0x00020000) ? "+" : "-"); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0008h - Receive info (scalar) */ -int i2o_proc_read_lan_rx_info(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u32 work32[8]; - int token; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0008, -1, &work32, 8*4); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0008 LAN Receive Info"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf ,"Rx Max size of chain element : %d\n", work32[0]); - len += sprintf(buf+len, "Rx Max Buckets : %d\n", work32[1]); - len += sprintf(buf+len, "Rx Max Buckets in Reply : %d\n", work32[3]); - len += sprintf(buf+len, "Rx Max Packets in Bucket : %d\n", work32[4]); - len += sprintf(buf+len, "Rx Max Buckets in Post : %d\n", work32[5]); - - len += sprintf(buf+len, "Rx Modes : 0x%08x\n", work32[2]); - len += sprintf(buf+len, " [%s] FCS reception\n", - (work32[2]&0x00000004) ? "+" : "-"); - len += sprintf(buf+len, " [%s] IPv4 checksum validation \n", - (work32[2]&0x00000100) ? "+" : "-"); - len += sprintf(buf+len, " [%s] TCP checksum validation \n", - (work32[2]&0x00000200) ? "+" : "-"); - len += sprintf(buf+len, " [%s] UDP checksum validation \n", - (work32[2]&0x00000400) ? "+" : "-"); - len += sprintf(buf+len, " [%s] RSVP checksum validation \n", - (work32[2]&0x00000800) ? "+" : "-"); - len += sprintf(buf+len, " [%s] ICMP checksum validation \n", - (work32[2]&0x00001000) ? "+" : "-"); - - spin_unlock(&i2o_proc_lock); - return len; -} - -static int i2o_report_opt_field(char *buf, char *field_name, - int field_nbr, int supp_fields, u64 *value) -{ - if (supp_fields & (1 << field_nbr)) - return sprintf(buf, "%-24s : " FMT_U64_HEX "\n", field_name, U64_VAL(value)); - else - return sprintf(buf, "%-24s : Not supported\n", field_name); -} - -/* LAN group 0100h - LAN Historical statistics (scalar) */ -/* LAN group 0180h - Supported Optional Historical Statistics (scalar) */ -/* LAN group 0182h - Optional Non Media Specific Transmit Historical Statistics (scalar) */ -/* LAN group 0183h - Optional Non Media Specific Receive Historical Statistics (scalar) */ - -int i2o_proc_read_lan_hist_stats(char *buf, char **start, off_t offset, int len, - int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - - struct - { - u64 tx_packets; - u64 tx_bytes; - u64 rx_packets; - u64 rx_bytes; - u64 tx_errors; - u64 rx_errors; - u64 rx_dropped; - u64 adapter_resets; - u64 adapter_suspends; - } stats; // 0x0100 - - static u64 supp_groups[4]; // 0x0180 - - struct - { - u64 tx_retries; - u64 tx_directed_bytes; - u64 tx_directed_packets; - u64 tx_multicast_bytes; - u64 tx_multicast_packets; - u64 tx_broadcast_bytes; - u64 tx_broadcast_packets; - u64 tx_group_addr_packets; - u64 tx_short_packets; - } tx_stats; // 0x0182 - - struct - { - u64 rx_crc_errors; - u64 rx_directed_bytes; - u64 rx_directed_packets; - u64 rx_multicast_bytes; - u64 rx_multicast_packets; - u64 rx_broadcast_bytes; - u64 rx_broadcast_packets; - u64 rx_group_addr_packets; - u64 rx_short_packets; - u64 rx_long_packets; - u64 rx_runt_packets; - } rx_stats; // 0x0183 - - struct - { - u64 ipv4_generate; - u64 ipv4_validate_success; - u64 ipv4_validate_errors; - u64 tcp_generate; - u64 tcp_validate_success; - u64 tcp_validate_errors; - u64 udp_generate; - u64 udp_validate_success; - u64 udp_validate_errors; - u64 rsvp_generate; - u64 rsvp_validate_success; - u64 rsvp_validate_errors; - u64 icmp_generate; - u64 icmp_validate_success; - u64 icmp_validate_errors; - } chksum_stats; // 0x0184 - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0100, -1, &stats, sizeof(stats)); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x100 LAN Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "Tx packets : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_packets)); - len += sprintf(buf+len, "Tx bytes : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_bytes)); - len += sprintf(buf+len, "Rx packets : " FMT_U64_HEX "\n", - U64_VAL(&stats.rx_packets)); - len += sprintf(buf+len, "Rx bytes : " FMT_U64_HEX "\n", - U64_VAL(&stats.rx_bytes)); - len += sprintf(buf+len, "Tx errors : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_errors)); - len += sprintf(buf+len, "Rx errors : " FMT_U64_HEX "\n", - U64_VAL(&stats.rx_errors)); - len += sprintf(buf+len, "Rx dropped : " FMT_U64_HEX "\n", - U64_VAL(&stats.rx_dropped)); - len += sprintf(buf+len, "Adapter resets : " FMT_U64_HEX "\n", - U64_VAL(&stats.adapter_resets)); - len += sprintf(buf+len, "Adapter suspends : " FMT_U64_HEX "\n", - U64_VAL(&stats.adapter_suspends)); - - /* Optional statistics follows */ - /* Get 0x0180 to see which optional groups/fields are supported */ - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0180, -1, &supp_groups, sizeof(supp_groups)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token, "0x180 LAN Supported Optional Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - if (supp_groups[1]) /* 0x0182 */ - { - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0182, -1, &tx_stats, sizeof(tx_stats)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x182 LAN Optional Tx Historical Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "==== Optional TX statistics (group 0182h)\n"); - - len += i2o_report_opt_field(buf+len, "Tx RetryCount", - 0, supp_groups[1], &tx_stats.tx_retries); - len += i2o_report_opt_field(buf+len, "Tx DirectedBytes", - 1, supp_groups[1], &tx_stats.tx_directed_bytes); - len += i2o_report_opt_field(buf+len, "Tx DirectedPackets", - 2, supp_groups[1], &tx_stats.tx_directed_packets); - len += i2o_report_opt_field(buf+len, "Tx MulticastBytes", - 3, supp_groups[1], &tx_stats.tx_multicast_bytes); - len += i2o_report_opt_field(buf+len, "Tx MulticastPackets", - 4, supp_groups[1], &tx_stats.tx_multicast_packets); - len += i2o_report_opt_field(buf+len, "Tx BroadcastBytes", - 5, supp_groups[1], &tx_stats.tx_broadcast_bytes); - len += i2o_report_opt_field(buf+len, "Tx BroadcastPackets", - 6, supp_groups[1], &tx_stats.tx_broadcast_packets); - len += i2o_report_opt_field(buf+len, "Tx TotalGroupAddrPackets", - 7, supp_groups[1], &tx_stats.tx_group_addr_packets); - len += i2o_report_opt_field(buf+len, "Tx TotalPacketsTooShort", - 8, supp_groups[1], &tx_stats.tx_short_packets); - } - - if (supp_groups[2]) /* 0x0183 */ - { - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0183, -1, &rx_stats, sizeof(rx_stats)); - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x183 LAN Optional Rx Historical Stats"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "==== Optional RX statistics (group 0183h)\n"); - - len += i2o_report_opt_field(buf+len, "Rx CRCErrorCount", - 0, supp_groups[2], &rx_stats.rx_crc_errors); - len += i2o_report_opt_field(buf+len, "Rx DirectedBytes", - 1, supp_groups[2], &rx_stats.rx_directed_bytes); - len += i2o_report_opt_field(buf+len, "Rx DirectedPackets", - 2, supp_groups[2], &rx_stats.rx_directed_packets); - len += i2o_report_opt_field(buf+len, "Rx MulticastBytes", - 3, supp_groups[2], &rx_stats.rx_multicast_bytes); - len += i2o_report_opt_field(buf+len, "Rx MulticastPackets", - 4, supp_groups[2], &rx_stats.rx_multicast_packets); - len += i2o_report_opt_field(buf+len, "Rx BroadcastBytes", - 5, supp_groups[2], &rx_stats.rx_broadcast_bytes); - len += i2o_report_opt_field(buf+len, "Rx BroadcastPackets", - 6, supp_groups[2], &rx_stats.rx_broadcast_packets); - len += i2o_report_opt_field(buf+len, "Rx TotalGroupAddrPackets", - 7, supp_groups[2], &rx_stats.rx_group_addr_packets); - len += i2o_report_opt_field(buf+len, "Rx TotalPacketsTooShort", - 8, supp_groups[2], &rx_stats.rx_short_packets); - len += i2o_report_opt_field(buf+len, "Rx TotalPacketsTooLong", - 9, supp_groups[2], &rx_stats.rx_long_packets); - len += i2o_report_opt_field(buf+len, "Rx TotalPacketsRunt", - 10, supp_groups[2], &rx_stats.rx_runt_packets); - } - - if (supp_groups[3]) /* 0x0184 */ - { - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0184, -1, &chksum_stats, sizeof(chksum_stats)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x184 LAN Optional Chksum Historical Stats"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "==== Optional CHKSUM statistics (group 0x0184)\n"); - - len += i2o_report_opt_field(buf+len, "IPv4 Generate", - 0, supp_groups[3], &chksum_stats.ipv4_generate); - len += i2o_report_opt_field(buf+len, "IPv4 ValidateSuccess", - 1, supp_groups[3], &chksum_stats.ipv4_validate_success); - len += i2o_report_opt_field(buf+len, "IPv4 ValidateError", - 2, supp_groups[3], &chksum_stats.ipv4_validate_errors); - len += i2o_report_opt_field(buf+len, "TCP Generate", - 3, supp_groups[3], &chksum_stats.tcp_generate); - len += i2o_report_opt_field(buf+len, "TCP ValidateSuccess", - 4, supp_groups[3], &chksum_stats.tcp_validate_success); - len += i2o_report_opt_field(buf+len, "TCP ValidateError", - 5, supp_groups[3], &chksum_stats.tcp_validate_errors); - len += i2o_report_opt_field(buf+len, "UDP Generate", - 6, supp_groups[3], &chksum_stats.udp_generate); - len += i2o_report_opt_field(buf+len, "UDP ValidateSuccess", - 7, supp_groups[3], &chksum_stats.udp_validate_success); - len += i2o_report_opt_field(buf+len, "UDP ValidateError", - 8, supp_groups[3], &chksum_stats.udp_validate_errors); - len += i2o_report_opt_field(buf+len, "RSVP Generate", - 9, supp_groups[3], &chksum_stats.rsvp_generate); - len += i2o_report_opt_field(buf+len, "RSVP ValidateSuccess", - 10, supp_groups[3], &chksum_stats.rsvp_validate_success); - len += i2o_report_opt_field(buf+len, "RSVP ValidateError", - 11, supp_groups[3], &chksum_stats.rsvp_validate_errors); - len += i2o_report_opt_field(buf+len, "ICMP Generate", - 12, supp_groups[3], &chksum_stats.icmp_generate); - len += i2o_report_opt_field(buf+len, "ICMP ValidateSuccess", - 13, supp_groups[3], &chksum_stats.icmp_validate_success); - len += i2o_report_opt_field(buf+len, "ICMP ValidateError", - 14, supp_groups[3], &chksum_stats.icmp_validate_errors); - } - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0200h - Required Ethernet Statistics (scalar) */ -/* LAN group 0280h - Optional Ethernet Statistics Supported (scalar) */ -/* LAN group 0281h - Optional Ethernet Historical Statistics (scalar) */ -int i2o_proc_read_lan_eth_stats(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - int token; - - struct - { - u64 rx_align_errors; - u64 tx_one_collisions; - u64 tx_multiple_collisions; - u64 tx_deferred; - u64 tx_late_collisions; - u64 tx_max_collisions; - u64 tx_carrier_lost; - u64 tx_excessive_deferrals; - } stats; - - static u64 supp_fields; - struct - { - u64 rx_overrun; - u64 tx_underrun; - u64 tx_heartbeat_failure; - } hist_stats; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0200, -1, &stats, sizeof(stats)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0200 LAN Ethernet Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "Rx alignment errors : " FMT_U64_HEX "\n", - U64_VAL(&stats.rx_align_errors)); - len += sprintf(buf+len, "Tx one collisions : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_one_collisions)); - len += sprintf(buf+len, "Tx multicollisions : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_multiple_collisions)); - len += sprintf(buf+len, "Tx deferred : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_deferred)); - len += sprintf(buf+len, "Tx late collisions : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_late_collisions)); - len += sprintf(buf+len, "Tx max collisions : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_max_collisions)); - len += sprintf(buf+len, "Tx carrier lost : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_carrier_lost)); - len += sprintf(buf+len, "Tx excessive deferrals : " FMT_U64_HEX "\n", - U64_VAL(&stats.tx_excessive_deferrals)); - - /* Optional Ethernet statistics follows */ - /* Get 0x0280 to see which optional fields are supported */ - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0280, -1, &supp_fields, sizeof(supp_fields)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0280 LAN Supported Optional Ethernet Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - if (supp_fields) /* 0x0281 */ - { - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0281, -1, &stats, sizeof(stats)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0281 LAN Optional Ethernet Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "==== Optional ETHERNET statistics (group 0x0281)\n"); - - len += i2o_report_opt_field(buf+len, "Rx Overrun", - 0, supp_fields, &hist_stats.rx_overrun); - len += i2o_report_opt_field(buf+len, "Tx Underrun", - 1, supp_fields, &hist_stats.tx_underrun); - len += i2o_report_opt_field(buf+len, "Tx HeartbeatFailure", - 2, supp_fields, &hist_stats.tx_heartbeat_failure); - } - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0300h - Required Token Ring Statistics (scalar) */ -/* LAN group 0380h, 0381h - Optional Statistics not yet defined (TODO) */ -int i2o_proc_read_lan_tr_stats(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u64 work64[13]; - int token; - - static char *ring_status[] = - { - "", - "", - "", - "", - "", - "Ring Recovery", - "Single Station", - "Counter Overflow", - "Remove Received", - "", - "Auto-Removal Error 1", - "Lobe Wire Fault", - "Transmit Beacon", - "Soft Error", - "Hard Error", - "Signal Loss" - }; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0300, -1, &work64, sizeof(work64)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0300 Token Ring Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf, "LineErrors : " FMT_U64_HEX "\n", - U64_VAL(&work64[0])); - len += sprintf(buf+len, "LostFrames : " FMT_U64_HEX "\n", - U64_VAL(&work64[1])); - len += sprintf(buf+len, "ACError : " FMT_U64_HEX "\n", - U64_VAL(&work64[2])); - len += sprintf(buf+len, "TxAbortDelimiter : " FMT_U64_HEX "\n", - U64_VAL(&work64[3])); - len += sprintf(buf+len, "BursErrors : " FMT_U64_HEX "\n", - U64_VAL(&work64[4])); - len += sprintf(buf+len, "FrameCopiedErrors : " FMT_U64_HEX "\n", - U64_VAL(&work64[5])); - len += sprintf(buf+len, "FrequencyErrors : " FMT_U64_HEX "\n", - U64_VAL(&work64[6])); - len += sprintf(buf+len, "InternalErrors : " FMT_U64_HEX "\n", - U64_VAL(&work64[7])); - len += sprintf(buf+len, "LastRingStatus : %s\n", ring_status[work64[8]]); - len += sprintf(buf+len, "TokenError : " FMT_U64_HEX "\n", - U64_VAL(&work64[9])); - len += sprintf(buf+len, "UpstreamNodeAddress : " FMT_U64_HEX "\n", - U64_VAL(&work64[10])); - len += sprintf(buf+len, "LastRingID : " FMT_U64_HEX "\n", - U64_VAL(&work64[11])); - len += sprintf(buf+len, "LastBeaconType : " FMT_U64_HEX "\n", - U64_VAL(&work64[12])); - - spin_unlock(&i2o_proc_lock); - return len; -} - -/* LAN group 0400h - Required FDDI Statistics (scalar) */ -/* LAN group 0480h, 0481h - Optional Statistics, not yet defined (TODO) */ -int i2o_proc_read_lan_fddi_stats(char *buf, char **start, off_t offset, - int len, int *eof, void *data) -{ - struct i2o_device *d = (struct i2o_device*)data; - static u64 work64[11]; - int token; - - static char *conf_state[] = - { - "Isolated", - "Local a", - "Local b", - "Local ab", - "Local s", - "Wrap a", - "Wrap b", - "Wrap ab", - "Wrap s", - "C-Wrap a", - "C-Wrap b", - "C-Wrap s", - "Through", - }; - - static char *ring_state[] = - { - "Isolated", - "Non-op", - "Rind-op", - "Detect", - "Non-op-Dup", - "Ring-op-Dup", - "Directed", - "Trace" - }; - - static char *link_state[] = - { - "Off", - "Break", - "Trace", - "Connect", - "Next", - "Signal", - "Join", - "Verify", - "Active", - "Maintenance" - }; - - spin_lock(&i2o_proc_lock); - len = 0; - - token = i2o_query_scalar(d->controller, d->lct_data.tid, - 0x0400, -1, &work64, sizeof(work64)); - - if (token < 0) { - len += i2o_report_query_status(buf+len, token,"0x0400 FDDI Required Statistics"); - spin_unlock(&i2o_proc_lock); - return len; - } - - len += sprintf(buf+len, "ConfigurationState : %s\n", conf_state[work64[0]]); - len += sprintf(buf+len, "UpstreamNode : " FMT_U64_HEX "\n", - U64_VAL(&work64[1])); - len += sprintf(buf+len, "DownStreamNode : " FMT_U64_HEX "\n", - U64_VAL(&work64[2])); - len += sprintf(buf+len, "FrameErrors : " FMT_U64_HEX "\n", - U64_VAL(&work64[3])); - len += sprintf(buf+len, "FramesLost : " FMT_U64_HEX "\n", - U64_VAL(&work64[4])); - len += sprintf(buf+len, "RingMgmtState : %s\n", ring_state[work64[5]]); - len += sprintf(buf+len, "LCTFailures : " FMT_U64_HEX "\n", - U64_VAL(&work64[6])); - len += sprintf(buf+len, "LEMRejects : " FMT_U64_HEX "\n", - U64_VAL(&work64[7])); - len += sprintf(buf+len, "LEMCount : " FMT_U64_HEX "\n", - U64_VAL(&work64[8])); - len += sprintf(buf+len, "LConnectionState : %s\n", - link_state[work64[9]]); - - spin_unlock(&i2o_proc_lock); - return len; -} - -static int i2o_proc_create_entries(void *data, i2o_proc_entry *pentry, - struct proc_dir_entry *parent) -{ - struct proc_dir_entry *ent; - - while(pentry->name != NULL) - { - ent = create_proc_entry(pentry->name, pentry->mode, parent); - if(!ent) return -1; - - ent->data = data; - ent->read_proc = pentry->read_proc; - ent->write_proc = pentry->write_proc; - if(pentry->fops_proc) - ent->proc_fops = pentry->fops_proc; - - ent->nlink = 1; - - pentry++; - } - - return 0; -} - -static void i2o_proc_remove_entries(i2o_proc_entry *pentry, - struct proc_dir_entry *parent) -{ - while(pentry->name != NULL) - { - remove_proc_entry(pentry->name, parent); - pentry++; - } -} - -static int i2o_proc_add_controller(struct i2o_controller *pctrl, - struct proc_dir_entry *root ) -{ - struct proc_dir_entry *dir, *dir1; - struct i2o_device *dev; - char buff[10]; - - sprintf(buff, "iop%d", pctrl->unit); - - dir = proc_mkdir(buff, root); - if(!dir) - return -1; - - pctrl->proc_entry = dir; - - i2o_proc_create_entries(pctrl, generic_iop_entries, dir); - - for(dev = pctrl->devices; dev; dev = dev->next) - { - sprintf(buff, "%0#5x", dev->lct_data.tid); - - dir1 = proc_mkdir(buff, dir); - dev->proc_entry = dir1; - - if(!dir1) - printk(KERN_INFO "i2o_proc: Could not allocate proc dir\n"); - - i2o_proc_add_device(dev, dir1); - } - - return 0; -} - -void i2o_proc_new_dev(struct i2o_controller *c, struct i2o_device *d) -{ - char buff[10]; - -#ifdef DRIVERDEBUG - printk(KERN_INFO "Adding new device to /proc/i2o/iop%d\n", c->unit); -#endif - sprintf(buff, "%0#5x", d->lct_data.tid); - - d->proc_entry = proc_mkdir(buff, c->proc_entry); - - if(!d->proc_entry) - { - printk(KERN_WARNING "i2o: Could not allocate procdir!\n"); - return; - } - - i2o_proc_add_device(d, d->proc_entry); -} - -void i2o_proc_add_device(struct i2o_device *dev, struct proc_dir_entry *dir) -{ - i2o_proc_create_entries(dev, generic_dev_entries, dir); - - /* Inform core that we want updates about this device's status */ - i2o_device_notify_on(dev, &i2o_proc_handler); - switch(dev->lct_data.class_id) - { - case I2O_CLASS_SCSI_PERIPHERAL: - case I2O_CLASS_RANDOM_BLOCK_STORAGE: - i2o_proc_create_entries(dev, rbs_dev_entries, dir); - break; - case I2O_CLASS_LAN: - i2o_proc_create_entries(dev, lan_entries, dir); - switch(dev->lct_data.sub_class) - { - case I2O_LAN_ETHERNET: - i2o_proc_create_entries(dev, lan_eth_entries, dir); - break; - case I2O_LAN_FDDI: - i2o_proc_create_entries(dev, lan_fddi_entries, dir); - break; - case I2O_LAN_TR: - i2o_proc_create_entries(dev, lan_tr_entries, dir); - break; - default: - break; - } - break; - default: - break; - } -} - -static void i2o_proc_remove_controller(struct i2o_controller *pctrl, - struct proc_dir_entry *parent) -{ - char buff[10]; - struct i2o_device *dev; - - /* Remove unused device entries */ - for(dev=pctrl->devices; dev; dev=dev->next) - i2o_proc_remove_device(dev); - - if(!atomic_read(&pctrl->proc_entry->count)) - { - sprintf(buff, "iop%d", pctrl->unit); - - i2o_proc_remove_entries(generic_iop_entries, pctrl->proc_entry); - remove_proc_entry(buff, parent); - pctrl->proc_entry = NULL; - } -} - -void i2o_proc_remove_device(struct i2o_device *dev) -{ - struct proc_dir_entry *de=dev->proc_entry; - char dev_id[10]; - - sprintf(dev_id, "%0#5x", dev->lct_data.tid); - - i2o_device_notify_off(dev, &i2o_proc_handler); - /* Would it be safe to remove _files_ even if they are in use? */ - if((de) && (!atomic_read(&de->count))) - { - i2o_proc_remove_entries(generic_dev_entries, de); - switch(dev->lct_data.class_id) - { - case I2O_CLASS_SCSI_PERIPHERAL: - case I2O_CLASS_RANDOM_BLOCK_STORAGE: - i2o_proc_remove_entries(rbs_dev_entries, de); - break; - case I2O_CLASS_LAN: - { - i2o_proc_remove_entries(lan_entries, de); - switch(dev->lct_data.sub_class) - { - case I2O_LAN_ETHERNET: - i2o_proc_remove_entries(lan_eth_entries, de); - break; - case I2O_LAN_FDDI: - i2o_proc_remove_entries(lan_fddi_entries, de); - break; - case I2O_LAN_TR: - i2o_proc_remove_entries(lan_tr_entries, de); - break; - } - } - } - remove_proc_entry(dev_id, dev->controller->proc_entry); - } -} - -void i2o_proc_dev_del(struct i2o_controller *c, struct i2o_device *d) -{ -#ifdef DRIVERDEBUG - printk(KERN_INFO "Deleting device %d from iop%d\n", - d->lct_data.tid, c->unit); -#endif - - i2o_proc_remove_device(d); -} - -static int create_i2o_procfs(void) -{ - struct i2o_controller *pctrl = NULL; - int i; - - i2o_proc_dir_root = proc_mkdir("i2o", NULL); - if(!i2o_proc_dir_root) - return -1; - i2o_proc_dir_root->owner = THIS_MODULE; - - for(i = 0; i < MAX_I2O_CONTROLLERS; i++) - { - pctrl = i2o_find_controller(i); - if(pctrl) - { - i2o_proc_add_controller(pctrl, i2o_proc_dir_root); - i2o_unlock_controller(pctrl); - } - }; - - return 0; -} - -static int __exit destroy_i2o_procfs(void) -{ - struct i2o_controller *pctrl = NULL; - int i; - - for(i = 0; i < MAX_I2O_CONTROLLERS; i++) - { - pctrl = i2o_find_controller(i); - if(pctrl) - { - i2o_proc_remove_controller(pctrl, i2o_proc_dir_root); - i2o_unlock_controller(pctrl); - } - } - - if(!atomic_read(&i2o_proc_dir_root->count)) - remove_proc_entry("i2o", NULL); - else - return -1; - - return 0; -} - -int __init i2o_proc_init(void) -{ - if (i2o_install_handler(&i2o_proc_handler) < 0) - { - printk(KERN_ERR "i2o_proc: Unable to install PROC handler.\n"); - return 0; - } - - if(create_i2o_procfs()) - return -EBUSY; - - return 0; -} - -MODULE_AUTHOR("Deepak Saxena"); -MODULE_DESCRIPTION("I2O procfs Handler"); -MODULE_LICENSE("GPL"); - -static void __exit i2o_proc_exit(void) -{ - destroy_i2o_procfs(); - i2o_remove_handler(&i2o_proc_handler); -} - -#ifdef MODULE -module_init(i2o_proc_init); -#endif -module_exit(i2o_proc_exit); - diff -L drivers/message/i2o/i2o_scsi.c -puN drivers/message/i2o/i2o_scsi.c~i2o-build_99 /dev/null --- 25/drivers/message/i2o/i2o_scsi.c +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,1047 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * For the avoidance of doubt the "preferred form" of this code is one which - * is in an open non patent encumbered format. Where cryptographic key signing - * forms part of the process of creating an executable the information - * including keys needed to generate an equivalently functional executable - * are deemed to be part of the source code. - * - * Complications for I2O scsi - * - * o Each (bus,lun) is a logical device in I2O. We keep a map - * table. We spoof failed selection for unmapped units - * o Request sense buffers can come back for free. - * o Scatter gather is a bit dynamic. We have to investigate at - * setup time. - * o Some of our resources are dynamically shared. The i2o core - * needs a message reservation protocol to avoid swap v net - * deadlocking. We need to back off queue requests. - * - * In general the firmware wants to help. Where its help isn't performance - * useful we just ignore the aid. Its not worth the code in truth. - * - * Fixes/additions: - * Steve Ralston: - * Scatter gather now works - * Markus Lidel : - * Minor fixes for 2.6. - * - * To Do: - * 64bit cleanups - * Fix the resource management problems. - */ - - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - - -#define VERSION_STRING "Version 0.1.2" - -//#define DRIVERDEBUG - -#ifdef DRIVERDEBUG -#define dprintk(s, args...) printk(s, ## args) -#else -#define dprintk(s, args...) -#endif - -#define I2O_SCSI_CAN_QUEUE 4 -#define MAXHOSTS 32 - -struct i2o_scsi_host -{ - struct i2o_controller *controller; - s16 task[16][8]; /* Allow 16 devices for now */ - unsigned long tagclock[16][8]; /* Tag clock for queueing */ - s16 bus_task; /* The adapter TID */ -}; - -static int scsi_context; -static int lun_done; -static int i2o_scsi_hosts; - -static u32 *retry[32]; -static struct i2o_controller *retry_ctrl[32]; -static struct timer_list retry_timer; -static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED; -static int retry_ct = 0; - -static atomic_t queue_depth; - -/* - * SG Chain buffer support... - */ - -#define SG_MAX_FRAGS 64 - -/* - * FIXME: we should allocate one of these per bus we find as we - * locate them not in a lump at boot. - */ - -typedef struct _chain_buf -{ - u32 sg_flags_cnt[SG_MAX_FRAGS]; - u32 sg_buf[SG_MAX_FRAGS]; -} chain_buf; - -#define SG_CHAIN_BUF_SZ sizeof(chain_buf) - -#define SG_MAX_BUFS (i2o_num_controllers * I2O_SCSI_CAN_QUEUE) -#define SG_CHAIN_POOL_SZ (SG_MAX_BUFS * SG_CHAIN_BUF_SZ) - -static int max_sg_len = 0; -static chain_buf *sg_chain_pool = NULL; -static int sg_chain_tag = 0; -static int sg_max_frags = SG_MAX_FRAGS; - -/** - * i2o_retry_run - retry on timeout - * @f: unused - * - * Retry congested frames. This actually needs pushing down into - * i2o core. We should only bother the OSM with this when we can't - * queue and retry the frame. Or perhaps we should call the OSM - * and its default handler should be this in the core, and this - * call a 2nd "I give up" handler in the OSM ? - */ - -static void i2o_retry_run(unsigned long f) -{ - int i; - unsigned long flags; - - spin_lock_irqsave(&retry_lock, flags); - for(i=0;i>12)&0xFFF, - m[1]&0xFFF, - m[1]>>24); - printk("Failure Code %d.\n", m[4]>>24); - if(m[4]&(1<<16)) - printk("Format error.\n"); - if(m[4]&(1<<17)) - printk("Path error.\n"); - if(m[4]&(1<<18)) - printk("Path State.\n"); - if(m[4]&(1<<18)) - printk("Congestion.\n"); - - m=(u32 *)bus_to_virt(m[7]); - printk("Failing message is %p.\n", m); - - /* This isnt a fast path .. */ - spin_lock_irqsave(&retry_lock, flags); - - if((m[4]&(1<<18)) && retry_ct < 32) - { - retry_ctrl[retry_ct]=c; - retry[retry_ct]=m; - if(!retry_ct++) - { - retry_timer.expires=jiffies+1; - add_timer(&retry_timer); - } - spin_unlock_irqrestore(&retry_lock, flags); - } - else - { - spin_unlock_irqrestore(&retry_lock, flags); - /* Create a scsi error for this */ - current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c); - if(!current_command) - return; - - lock = current_command->device->host->host_lock; - printk("Aborted %ld\n", current_command->serial_number); - - spin_lock_irqsave(lock, flags); - current_command->result = DID_ERROR << 16; - current_command->scsi_done(current_command); - spin_unlock_irqrestore(lock, flags); - - /* Now flush the message by making it a NOP */ - m[0]&=0x00FFFFFF; - m[0]|=(I2O_CMD_UTIL_NOP)<<24; - i2o_post_message(c,virt_to_bus(m)); - } - return; - } - - prefetchw(&queue_depth); - - - /* - * Low byte is device status, next is adapter status, - * (then one byte reserved), then request status. - */ - ds=(u8)le32_to_cpu(m[4]); - as=(u8)le32_to_cpu(m[4]>>8); - st=(u8)le32_to_cpu(m[4]>>24); - - dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]); - dprintk(KERN_INFO "m[2]=%08X: ", m[2]); - dprintk(KERN_INFO "m[4]=%08X\n", m[4]); - - if(m[2]&0x80000000) - { - if(m[2]&0x40000000) - { - dprintk(KERN_INFO "Event.\n"); - lun_done=1; - return; - } - printk(KERN_INFO "i2o_scsi: bus reset completed.\n"); - return; - } - - current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c); - - /* - * Is this a control request coming back - eg an abort ? - */ - - atomic_dec(&queue_depth); - - if(current_command==NULL) - { - if(st) - dprintk(KERN_WARNING "SCSI abort: %08X", m[4]); - dprintk(KERN_INFO "SCSI abort completed.\n"); - return; - } - - dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number); - - if(st == 0x06) - { - if(le32_to_cpu(m[5]) < current_command->underflow) - { - int i; - printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n", - le32_to_cpu(m[5]), current_command->underflow); - printk("Cmd: "); - for(i=0;i<15;i++) - printk("%02X ", current_command->cmnd[i]); - printk(".\n"); - } - else st=0; - } - - if(st) - { - /* An error has occurred */ - - dprintk(KERN_WARNING "SCSI error %08X", m[4]); - - if (as == 0x0E) - /* SCSI Reset */ - current_command->result = DID_RESET << 16; - else if (as == 0x0F) - current_command->result = DID_PARITY << 16; - else - current_command->result = DID_ERROR << 16; - } - else - /* - * It worked maybe ? - */ - current_command->result = DID_OK << 16 | ds; - - if (current_command->use_sg) { - pci_unmap_sg(c->pdev, - (struct scatterlist *)current_command->buffer, - current_command->use_sg, - current_command->sc_data_direction); - } else if (current_command->request_bufflen) { - pci_unmap_single(c->pdev, - (dma_addr_t)((long)current_command->SCp.ptr), - current_command->request_bufflen, - current_command->sc_data_direction); - } - - lock = current_command->device->host->host_lock; - spin_lock_irqsave(lock, flags); - current_command->scsi_done(current_command); - spin_unlock_irqrestore(lock, flags); - return; -} - -struct i2o_handler i2o_scsi_handler = { - .reply = i2o_scsi_reply, - .name = "I2O SCSI OSM", - .class = I2O_CLASS_SCSI_PERIPHERAL, -}; - -/** - * i2o_find_lun - report the lun of an i2o device - * @c: i2o controller owning the device - * @d: i2o disk device - * @target: filled in with target id - * @lun: filled in with target lun - * - * Query an I2O device to find out its SCSI lun and target numbering. We - * don't currently handle some of the fancy SCSI-3 stuff although our - * querying is sufficient to do so. - */ - -static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun) -{ - u8 reply[8]; - - if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0) - return -1; - - *target=reply[0]; - - if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0) - return -1; - - *lun=reply[1]; - - dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun); - return 0; -} - -/** - * i2o_scsi_init - initialize an i2o device for scsi - * @c: i2o controller owning the device - * @d: scsi controller - * @shpnt: scsi device we wish it to become - * - * Enumerate the scsi peripheral/fibre channel peripheral class - * devices that are children of the controller. From that we build - * a translation map for the command queue code. Since I2O works on - * its own tid's we effectively have to think backwards to get what - * the midlayer wants - */ - -static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt) -{ - struct i2o_device *unit; - struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata; - int lun; - int target; - - h->controller=c; - h->bus_task=d->lct_data.tid; - - for(target=0;target<16;target++) - for(lun=0;lun<8;lun++) - h->task[target][lun] = -1; - - for(unit=c->devices;unit!=NULL;unit=unit->next) - { - dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n", - unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid); - - /* Only look at scsi and fc devices */ - if ( (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL) - && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL) - ) - continue; - - /* On our bus ? */ - dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid); - if ((unit->lct_data.parent_tid == d->lct_data.tid) - || (unit->lct_data.parent_tid == d->lct_data.parent_tid) - ) - { - u16 limit; - dprintk(KERN_INFO "Its ours.\n"); - if(i2o_find_lun(c, unit, &target, &lun)==-1) - { - printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid); - continue; - } - dprintk(KERN_INFO "Found disk %d %d.\n", target, lun); - h->task[target][lun]=unit->lct_data.tid; - h->tagclock[target][lun]=jiffies; - - /* Get the max fragments/request */ - i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2); - - /* sanity */ - if ( limit == 0 ) - { - printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n"); - limit = 1; - } - - shpnt->sg_tablesize = limit; - - dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n", - shpnt->sg_tablesize); - } - } -} - -/** - * i2o_scsi_detect - probe for I2O scsi devices - * @tpnt: scsi layer template - * - * I2O is a little odd here. The I2O core already knows what the - * devices are. It also knows them by disk and tape as well as - * by controller. We register each I2O scsi class object as a - * scsi controller and then let the enumeration fake up the rest - */ - -static int i2o_scsi_detect(struct scsi_host_template * tpnt) -{ - struct Scsi_Host *shpnt = NULL; - int i; - int count; - - printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING); - - if(i2o_install_handler(&i2o_scsi_handler)<0) - { - printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n"); - return 0; - } - scsi_context = i2o_scsi_handler.context; - - if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL) - { - printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ); - printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n"); - sg_max_frags = 11; - } - else - { - printk(KERN_INFO " chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool); - printk(KERN_INFO " (%d byte buffers X %d can_queue X %d i2o controllers)\n", - SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers); - sg_max_frags = SG_MAX_FRAGS; // 64 - } - - init_timer(&retry_timer); - retry_timer.data = 0UL; - retry_timer.function = i2o_retry_run; - -// printk("SCSI OSM at %d.\n", scsi_context); - - for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++) - { - struct i2o_controller *c=i2o_find_controller(i); - struct i2o_device *d; - /* - * This controller doesn't exist. - */ - - if(c==NULL) - continue; - - /* - * Fixme - we need some altered device locking. This - * is racing with device addition in theory. Easy to fix. - */ - - for(d=c->devices;d!=NULL;d=d->next) - { - /* - * bus_adapter, SCSI (obsolete), or FibreChannel busses only - */ - if( (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter -// && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT - ) - continue; - - shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host)); - if(shpnt==NULL) - continue; - shpnt->unique_id = (u32)d; - shpnt->io_port = 0; - shpnt->n_io_port = 0; - shpnt->irq = 0; - shpnt->this_id = /* Good question */15; - i2o_scsi_init(c, d, shpnt); - count++; - } - } - i2o_scsi_hosts = count; - - if(count==0) - { - if(sg_chain_pool!=NULL) - { - kfree(sg_chain_pool); - sg_chain_pool = NULL; - } - flush_pending(); - del_timer(&retry_timer); - i2o_remove_handler(&i2o_scsi_handler); - } - - return count; -} - -static int i2o_scsi_release(struct Scsi_Host *host) -{ - if(--i2o_scsi_hosts==0) - { - if(sg_chain_pool!=NULL) - { - kfree(sg_chain_pool); - sg_chain_pool = NULL; - } - flush_pending(); - del_timer(&retry_timer); - i2o_remove_handler(&i2o_scsi_handler); - } - - scsi_unregister(host); - - return 0; -} - - -static const char *i2o_scsi_info(struct Scsi_Host *SChost) -{ - struct i2o_scsi_host *hostdata; - hostdata = (struct i2o_scsi_host *)SChost->hostdata; - return(&hostdata->controller->name[0]); -} - -/** - * i2o_scsi_queuecommand - queue a SCSI command - * @SCpnt: scsi command pointer - * @done: callback for completion - * - * Issue a scsi comamnd asynchronously. Return 0 on success or 1 if - * we hit an error (normally message queue congestion). The only - * minor complication here is that I2O deals with the device addressing - * so we have to map the bus/dev/lun back to an I2O handle as well - * as faking absent devices ourself. - * - * Locks: takes the controller lock on error path only - */ - -static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, - void (*done) (struct scsi_cmnd *)) -{ - int i; - int tid; - struct i2o_controller *c; - struct scsi_cmnd *current_command; - struct Scsi_Host *host; - struct i2o_scsi_host *hostdata; - u32 *msg, *mptr; - u32 m; - u32 *lenptr; - int direction; - int scsidir; - u32 len; - u32 reqlen; - u32 tag; - unsigned long flags; - - static int max_qd = 1; - - /* - * Do the incoming paperwork - */ - - host = SCpnt->device->host; - hostdata = (struct i2o_scsi_host *)host->hostdata; - - c = hostdata->controller; - prefetch(c); - prefetchw(&queue_depth); - - SCpnt->scsi_done = done; - - if(SCpnt->device->id > 15) - { - printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id); - return -1; - } - - tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun]; - - dprintk(KERN_INFO "qcmd: Tid = %d\n", tid); - - current_command = SCpnt; /* set current command */ - current_command->scsi_done = done; /* set ptr to done function */ - - /* We don't have such a device. Pretend we did the command - and that selection timed out */ - - if(tid == -1) - { - SCpnt->result = DID_NO_CONNECT << 16; - done(SCpnt); - return 0; - } - - dprintk(KERN_INFO "Real scsi messages.\n"); - - /* - * Obtain an I2O message. If there are none free then - * throw it back to the scsi layer - */ - - m = le32_to_cpu(I2O_POST_READ32(c)); - if(m==0xFFFFFFFF) - return 1; - - msg = (u32 *)(c->msg_virt + m); - - /* - * Put together a scsi execscb message - */ - - len = SCpnt->request_bufflen; - direction = 0x00000000; // SGL IN (osm<--iop) - - if (SCpnt->sc_data_direction == DMA_NONE) { - scsidir = 0x00000000; // DATA NO XFER - } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) { - direction = 0x04000000; // SGL OUT (osm-->iop) - scsidir = 0x80000000; // DATA OUT (iop-->dev) - } else if(SCpnt->sc_data_direction == DMA_FROM_DEVICE) { - scsidir = 0x40000000; // DATA IN (iop<--dev) - } else { - /* Unknown - kill the command */ - SCpnt->result = DID_NO_CONNECT << 16; - - /* We must lock the request queue while completing */ - spin_lock_irqsave(host->host_lock, flags); - done(SCpnt); - spin_unlock_irqrestore(host->host_lock, flags); - return 0; - } - - - i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]); - i2o_raw_writel(scsi_context, &msg[2]); /* So the I2O layer passes to us */ - i2o_raw_writel(i2o_context_list_add(SCpnt, c), &msg[3]); /* We want the SCSI control block back */ - - /* LSI_920_PCI_QUIRK - * - * Intermittant observations of msg frame word data corruption - * observed on msg[4] after: - * WRITE, READ-MODIFY-WRITE - * operations. 19990606 -sralston - * - * (Hence we build this word via tag. Its good practice anyway - * we don't want fetches over PCI needlessly) - */ - - tag=0; - - /* - * Attach tags to the devices - */ - if(SCpnt->device->tagged_supported) - { - /* - * Some drives are too stupid to handle fairness issues - * with tagged queueing. We throw in the odd ordered - * tag to stop them starving themselves. - */ - if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ)) - { - tag=0x01800000; /* ORDERED! */ - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies; - } - else - { - /* Hmmm... I always see value of 0 here, - * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston - */ - if(SCpnt->tag == HEAD_OF_QUEUE_TAG) - tag=0x01000000; - else if(SCpnt->tag == ORDERED_QUEUE_TAG) - tag=0x01800000; - } - } - - /* Direction, disconnect ok, tag, CDBLen */ - i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]); - - mptr=msg+5; - - /* - * Write SCSI command into the message - always 16 byte block - */ - - memcpy_toio(mptr, SCpnt->cmnd, 16); - mptr+=4; - lenptr=mptr++; /* Remember me - fill in when we know */ - - reqlen = 12; // SINGLE SGE - - /* - * Now fill in the SGList and command - * - * FIXME: we need to set the sglist limits according to the - * message size of the I2O controller. We might only have room - * for 6 or so worst case - */ - - if(SCpnt->use_sg) - { - struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer; - int sg_count; - int chain = 0; - - len = 0; - - sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg, - SCpnt->sc_data_direction); - - /* FIXME: handle fail */ - if(!sg_count) - BUG(); - - if((sg_max_frags > 11) && (SCpnt->use_sg > 11)) - { - chain = 1; - /* - * Need to chain! - */ - i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++); - i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr); - mptr = (u32*)(sg_chain_pool + sg_chain_tag); - if (SCpnt->use_sg > max_sg_len) - { - max_sg_len = SCpnt->use_sg; - printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n", - SCpnt, SCpnt->use_sg, sg_chain_tag); - } - if ( ++sg_chain_tag == SG_MAX_BUFS ) - sg_chain_tag = 0; - for(i = 0 ; i < SCpnt->use_sg; i++) - { - *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg)); - len+=sg_dma_len(sg); - *mptr++=cpu_to_le32(sg_dma_address(sg)); - sg++; - } - mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1)); - } - else - { - for(i = 0 ; i < SCpnt->use_sg; i++) - { - i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++); - len+=sg->length; - i2o_raw_writel(sg_dma_address(sg), mptr++); - sg++; - } - - /* Make this an end of list. Again evade the 920 bug and - unwanted PCI read traffic */ - - i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]); - } - - if(!chain) - reqlen = mptr - msg; - - i2o_raw_writel(len, lenptr); - - if(len != SCpnt->underflow) - printk("Cmd len %08X Cmd underflow %08X\n", - len, SCpnt->underflow); - } - else - { - dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer, - SCpnt->request_bufflen); - i2o_raw_writel(len = SCpnt->request_bufflen, lenptr); - if(len == 0) - { - reqlen = 9; - } - else - { - dma_addr_t dma_addr; - dma_addr = pci_map_single(c->pdev, - SCpnt->request_buffer, - SCpnt->request_bufflen, - SCpnt->sc_data_direction); - if(dma_addr == 0) - BUG(); /* How to handle ?? */ - SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr; - i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++); - i2o_raw_writel(dma_addr, mptr++); - } - } - - /* - * Stick the headers on - */ - - i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg); - - /* Queue the message */ - i2o_post_message(c,m); - - atomic_inc(&queue_depth); - - if(atomic_read(&queue_depth)> max_qd) - { - max_qd=atomic_read(&queue_depth); - printk("Queue depth now %d.\n", max_qd); - } - - mb(); - dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number); - - return 0; -} - -/** - * i2o_scsi_abort - abort a running command - * @SCpnt: command to abort - * - * Ask the I2O controller to abort a command. This is an asynchrnous - * process and our callback handler will see the command complete - * with an aborted message if it succeeds. - * - * Locks: no locks are held or needed - */ - -static int i2o_scsi_abort(struct scsi_cmnd * SCpnt) -{ - struct i2o_controller *c; - struct Scsi_Host *host; - struct i2o_scsi_host *hostdata; - u32 msg[5]; - int tid; - int status = FAILED; - - printk(KERN_WARNING "i2o_scsi: Aborting command block.\n"); - - host = SCpnt->device->host; - hostdata = (struct i2o_scsi_host *)host->hostdata; - tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun]; - if(tid==-1) - { - printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n"); - return status; - } - c = hostdata->controller; - - spin_unlock_irq(host->host_lock); - - msg[0] = FIVE_WORD_MSG_SIZE; - msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid; - msg[2] = scsi_context; - msg[3] = 0; - msg[4] = i2o_context_list_remove(SCpnt, c); - if(i2o_post_wait(c, msg, sizeof(msg), 240)) - status = SUCCESS; - - spin_lock_irq(host->host_lock); - return status; -} - -/** - * i2o_scsi_bus_reset - Issue a SCSI reset - * @SCpnt: the command that caused the reset - * - * Perform a SCSI bus reset operation. In I2O this is just a message - * we pass. I2O can do clever multi-initiator and shared reset stuff - * but we don't support this. - * - * Locks: called with no lock held, requires no locks. - */ - -static int i2o_scsi_bus_reset(struct scsi_cmnd * SCpnt) -{ - int tid; - struct i2o_controller *c; - struct Scsi_Host *host; - struct i2o_scsi_host *hostdata; - u32 m; - void *msg; - unsigned long timeout; - - - /* - * Find the TID for the bus - */ - - - host = SCpnt->device->host; - - spin_unlock_irq(host->host_lock); - - printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n"); - - hostdata = (struct i2o_scsi_host *)host->hostdata; - tid = hostdata->bus_task; - c = hostdata->controller; - - /* - * Now send a SCSI reset request. Any remaining commands - * will be aborted by the IOP. We need to catch the reply - * possibly ? - */ - - timeout = jiffies+2*HZ; - do - { - m = le32_to_cpu(I2O_POST_READ32(c)); - if(m != 0xFFFFFFFF) - break; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); - mb(); - } - while(time_before(jiffies, timeout)); - - - msg = c->msg_virt + m; - i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg); - i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4); - i2o_raw_writel(scsi_context|0x80000000, msg+8); - /* We use the top bit to split controller and unit transactions */ - /* Now store unit,tid so we can tie the completion back to a specific device */ - __raw_writel(c->unit << 16 | tid, msg+12); - wmb(); - - /* We want the command to complete after we return */ - spin_lock_irq(host->host_lock); - i2o_post_message(c,m); - - /* Should we wait for the reset to complete ? */ - return SUCCESS; -} - -/** - * i2o_scsi_bios_param - Invent disk geometry - * @sdev: scsi device - * @dev: block layer device - * @capacity: size in sectors - * @ip: geometry array - * - * This is anyones guess quite frankly. We use the same rules everyone - * else appears to and hope. It seems to work. - */ - -static int i2o_scsi_bios_param(struct scsi_device * sdev, - struct block_device *dev, sector_t capacity, int *ip) -{ - int size; - - size = capacity; - ip[0] = 64; /* heads */ - ip[1] = 32; /* sectors */ - if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */ - ip[0] = 255; /* heads */ - ip[1] = 63; /* sectors */ - ip[2] = size / (255 * 63); /* cylinders */ - } - return 0; -} - -MODULE_AUTHOR("Red Hat Software"); -MODULE_LICENSE("GPL"); - - -static struct scsi_host_template driver_template = { - .proc_name = "i2o_scsi", - .name = "I2O SCSI Layer", - .detect = i2o_scsi_detect, - .release = i2o_scsi_release, - .info = i2o_scsi_info, - .queuecommand = i2o_scsi_queuecommand, - .eh_abort_handler = i2o_scsi_abort, - .eh_bus_reset_handler = i2o_scsi_bus_reset, - .bios_param = i2o_scsi_bios_param, - .can_queue = I2O_SCSI_CAN_QUEUE, - .this_id = 15, - .sg_tablesize = 8, - .cmd_per_lun = 6, - .use_clustering = ENABLE_CLUSTERING, -}; - -#include "../../scsi/scsi_module.c" diff -puN /dev/null drivers/message/i2o/iop.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/iop.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,1261 @@ +/* + * Functions to handle I2O controllers and I2O message handling + * + * Copyright (C) 1999-2002 Red Hat Software + * + * Written by Alan Cox, Building Number Three Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * A lot of the I2O message side code from this is taken from the + * Red Creek RCPCI45 adapter driver by Red Creek Communications + * + * Fixes/additions: + * Philipp Rumpf + * Juha Sievänen + * Auvo Häkkinen + * Deepak Saxena + * Boji T Kannanthanam + * Alan Cox : + * Ported to Linux 2.5. + * Markus Lidel : + * Minor fixes for 2.6. + */ + +#include +#include + + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +/* global I2O controller list */ +LIST_HEAD(i2o_controllers); + +/* + * global I2O System Table. Contains information about all the IOPs in the + * system. Used to inform IOPs about each others existence. + */ +static struct i2o_dma i2o_systab; + + +/* Module internal functions from other sources */ +extern struct i2o_driver i2o_exec_driver; +extern int i2o_exec_lct_get(struct i2o_controller *); +extern void i2o_device_remove(struct i2o_device *); + +extern int __init i2o_driver_init(void); +extern void __exit i2o_driver_exit(void); +extern int __init i2o_exec_init(void); +extern void __exit i2o_exec_exit(void); +extern int __init i2o_pci_init(void); +extern void __exit i2o_pci_exit(void); +extern int i2o_device_init(void); +extern void i2o_device_exit(void); + + +/* Module internal functions */ +struct i2o_controller *i2o_iop_alloc(void); +void i2o_iop_free(struct i2o_controller *); +int i2o_iop_add(struct i2o_controller *); +void i2o_iop_remove(struct i2o_controller *); + +/* Internal used functions */ +static inline void i2o_iop_quiesce_all(void); +static inline void i2o_iop_enable_all(void); +static int i2o_iop_quiesce(struct i2o_controller *); +static int i2o_iop_enable(struct i2o_controller *); +static int i2o_iop_activate(struct i2o_controller *); +static int i2o_iop_online(struct i2o_controller *); +static int i2o_iop_clear(struct i2o_controller *); +static int i2o_iop_reset(struct i2o_controller *); +static int i2o_iop_init_outbound_queue(struct i2o_controller *); +static int i2o_systab_build(void); +static int i2o_iop_systab_set(struct i2o_controller *); +static int i2o_parse_hrt(struct i2o_controller *); + +/* Module init and exit functions */ +static int __init i2o_iop_init(void); +static void __exit i2o_iop_exit(void); + + +/** + * i2o_msg_nop - Returns a message which is not used + * @c: I2O controller from which the message was created + * @m: message which should be returned + * + * If you fetch a message via i2o_msg_get, and can't use it, you must + * return the message with this function. Otherwise the message frame + * is lost. + */ +void i2o_msg_nop(struct i2o_controller *c, u32 m) +{ + struct i2o_message *msg = c->in_queue.virt + m; + + writel(THREE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_UTIL_NOP<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + writel(0, &msg->head[2]); + writel(0, &msg->head[3]); + i2o_msg_post(c, m); +}; + +/** + * i2o_msg_get_wait - obtain an I2O message from the IOP + * @c: I2O controller + * @msg: pointer to a I2O message pointer + * @wait: how long to wait until timeout + * + * This function waits up to wait seconds for a message slot to be + * available. + * + * On a success the message is returned and the pointer to the message is + * set in msg. The returned message is the physical page frame offset + * address from the read port (see the i2o spec). If no message is + * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. + */ +u32 i2o_msg_get_wait(struct i2o_controller *c,struct i2o_message **msg,int wait) +{ + unsigned long timeout=jiffies + wait * HZ; + u32 m; + + while((m=i2o_msg_get(c, msg))==I2O_QUEUE_EMPTY) { + if(time_after(jiffies, timeout)) { + DBG("%s: Timeout waiting for message frame.\n",c->name); + return I2O_QUEUE_EMPTY; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + } + + return m; +}; + +#if BITS_PER_LONG == 64 +/** + * i2o_cntxt_list_add - Append a pointer to context list and return a id + * @ptr: pointer to add to the context list + * @c: controller to which the context list belong + * + * Because the context field in I2O is only 32-bit large, on 64-bit the + * pointer is to large to fit in the context field. The i2o_cntxt_list + * functions therefore map pointers to context fields. + * + * Returns context id > 0 on success or 0 on failure. + */ +u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) +{ + struct i2o_context_list_element *entry; + unsigned long flags; + + if(!ptr) + printk(KERN_ERR "NULL pointer found!\n"); + + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + if(!entry) { + printk(KERN_ERR "i2o: Could not allocate memory for context " + "list element\n"); + return 0; + } + + entry->ptr = ptr; + entry->timestamp = jiffies; + INIT_LIST_HEAD(&entry->list); + + spin_lock_irqsave(&c->context_list_lock, flags); + + if(unlikely(atomic_inc_and_test(&c->context_list_counter))) + atomic_inc(&c->context_list_counter); + + entry->context = atomic_read(&c->context_list_counter); + + list_add(&entry->list, &c->context_list); + + spin_unlock_irqrestore(&c->context_list_lock, flags); + + DBG("Add context to list %p -> %d\n", ptr, context); + + return entry->context; +}; + +/** + * i2o_cntxt_list_remove - Remove a pointer from the context list + * @ptr: pointer which should be removed from the context list + * @c: controller to which the context list belong + * + * Removes a previously added pointer from the context list and returns + * the matching context id. + * + * Returns context id on succes or 0 on failure. + */ +u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr) +{ + struct i2o_context_list_element *entry; + u32 context = 0; + unsigned long flags; + + spin_lock_irqsave(&c->context_list_lock, flags); + list_for_each_entry(entry, &c->context_list, list) + if(entry->ptr == ptr) { + list_del(&entry->list); + context = entry->context; + kfree(entry); + break; + } + spin_unlock_irqrestore(&c->context_list_lock, flags); + + if(!context) + printk(KERN_WARNING "i2o: Could not remove nonexistent ptr " + "%p\n", ptr); + + DBG("remove ptr from context list %d -> %p\n", context, ptr); + + return context; +}; + +/** + * i2o_cntxt_list_get - Get a pointer from the context list and remove it + * @context: context id to which the pointer belong + * @c: controller to which the context list belong + * returns pointer to the matching context id + */ +void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) +{ + struct i2o_context_list_element *entry; + unsigned long flags; + void *ptr = NULL; + + spin_lock_irqsave(&c->context_list_lock, flags); + list_for_each_entry(entry, &c->context_list, list) + if(entry->context == context) { + list_del(&entry->list); + ptr = entry->ptr; + kfree(entry); + break; + } + spin_unlock_irqrestore(&c->context_list_lock, flags); + + if(!ptr) + printk(KERN_WARNING "i2o: context id %d not found\n", context); + + DBG("get ptr from context list %d -> %p\n", context, ptr); + + return ptr; +}; +#endif + +/** + * i2o_iop_find - Find an I2O controller by id + * @unit: unit number of the I2O controller to search for + * + * Lookup the I2O controller on the controller list. + * + * Returns pointer to the I2O controller on success or NULL if not found. + */ +struct i2o_controller *i2o_find_iop(int unit) { + struct i2o_controller *c; + + list_for_each_entry(c, &i2o_controllers, list) { + if(c->unit == unit) + return c; + } + + return NULL; +}; + +/** + * i2o_iop_find_device - Find a I2O device on an I2O controller + * @c: I2O controller where the I2O device hangs on + * @tid: TID of the I2O device to search for + * + * Searches the devices of the I2O controller for a device with TID tid and + * returns it. + * + * Returns a pointer to the I2O device if found, otherwise NULL. + */ +struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid) { + struct i2o_device *dev; + + list_for_each_entry(dev, &c->devices, list) + if(dev->lct_data.tid == tid) + return dev; + + return 0; +}; + +/** + * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system + * + * Quiesce all I2O controllers which are connected to the system. + */ +static inline void i2o_iop_quiesce_all(void) +{ + struct i2o_controller *c, *tmp; + + list_for_each_entry_safe(c, tmp, &i2o_controllers, list) { + if(!c->no_quiesce) + i2o_iop_quiesce(c); + } +}; + +/** + * i2o_iop_enable_all - Enables all controllers on the system + * + * Enables all I2O controllers which are connected to the system. + */ +static inline void i2o_iop_enable_all(void) +{ + struct i2o_controller *c, *tmp; + + list_for_each_entry_safe(c, tmp, &i2o_controllers, list) + i2o_iop_enable(c); +}; + +/** + * i2o_quiesce_controller - quiesce controller + * @c: controller + * + * Quiesce an IOP. Causes IOP to make external operation quiescent + * (i2o 'READY' state). Internal operation of the IOP continues normally. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_iop_quiesce(struct i2o_controller *c) +{ + struct i2o_message *msg; + u32 m; + i2o_status_block *sb = c->status_block.virt; + int rc; + + i2o_status_get(c); + + /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ + if((sb->iop_state != ADAPTER_STATE_READY) && + (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) + return 0; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + + /* Long timeout needed for quiesce if lots of devices */ + if ((rc=i2o_msg_post_wait(c, m, 240))) + printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n", + c->name, -rc); + else + DBG("%s: Quiesced.\n", c->name); + + i2o_status_get(c); // Entered READY state + + return rc; +}; + +/** + * i2o_iop_enable - move controller from ready to OPERATIONAL + * @c: I2O controller + * + * Enable IOP. This allows the IOP to resume external operations and + * reverses the effect of a quiesce. Returns zero or an error code if + * an error occurs. + */ +static int i2o_iop_enable(struct i2o_controller *c) +{ + struct i2o_message *msg; + u32 m; + i2o_status_block *sb = c->status_block.virt; + int rc; + + i2o_status_get(c); + + /* Enable only allowed on READY state */ + if(sb->iop_state != ADAPTER_STATE_READY) + return -EINVAL; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + + /* How long of a timeout do we need? */ + if((rc = i2o_msg_post_wait(c, m, 240))) + printk(KERN_ERR "%s: Could not enable (status=%#x).\n", + c->name, -rc); + else + DBG("%s: Enabled.\n", c->name); + + i2o_status_get(c); // entered OPERATIONAL state + + return rc; +}; + +/** + * i2o_iop_activate - Bring controller up to HOLD + * @c: controller + * + * This function brings an I2O controller into HOLD state. The adapter + * is reset if necessary and then the queues and resource table are read. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_iop_activate(struct i2o_controller *c) +{ + i2o_status_block *sb = c->status_block.virt; + int rc; + /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ + /* In READY state, Get status */ + + rc = i2o_status_get(c); + if(rc) { + printk(KERN_INFO "Unable to obtain status of %s, " + "attempting a reset.\n", c->name); + if(i2o_iop_reset(c)) + return rc; + } + + if(sb->i2o_version > I2OVER15) { + printk(KERN_ERR "%s: Not running vrs. 1.5. of the I2O " + "Specification.\n", c->name); + return -ENODEV; + } + + switch(sb->iop_state) { + case ADAPTER_STATE_FAULTED: + printk(KERN_CRIT "%s: hardware fault\n", c->name); + return -ENODEV; + + case ADAPTER_STATE_READY: + case ADAPTER_STATE_OPERATIONAL: + case ADAPTER_STATE_HOLD: + case ADAPTER_STATE_FAILED: + DBG("already running, trying to reset...\n"); + if(i2o_iop_reset(c)) + return -ENODEV; + } + + rc = i2o_iop_init_outbound_queue(c); + if(rc) + return rc; + + /* In HOLD state */ + + rc = i2o_hrt_get(c); + if(rc) + return rc; + + return 0; +}; + +/** + * i2o_iop_online - Bring a controller online into OPERATIONAL state. + * @c: I2O controller + * + * Send the system table and enable the I2O controller. + * + * Returns 0 on success or negativer error code on failure. + */ +static int i2o_iop_online(struct i2o_controller *c) +{ + int rc; + + rc =i2o_iop_systab_set(c); + if(rc) + return rc; + + /* In READY state */ + DBG("%s: Attempting to enable...\n", c->name); + rc = i2o_iop_enable(c); + if(rc) + return rc; + + return 0; +}; + +/** + * i2o_clear_controller - Bring I2O controller into HOLD state + * @c: controller + * + * Clear an IOP to HOLD state, ie. terminate external operations, clear all + * input queues and prepare for a system restart. IOP's internal operation + * continues normally and the outbound queue is alive. The IOP is not + * expected to rebuild its LCT. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_iop_clear(struct i2o_controller *c) +{ + struct i2o_message *msg; + u32 m; + int rc; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + /* Quiesce all IOPs first */ + i2o_iop_quiesce_all(); + + writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID, + &msg->head[1]); + + if ((rc=i2o_msg_post_wait(c, m, 30))) + printk(KERN_INFO "%s: Unable to clear (status=%#x).\n", + c->name, -rc); + else + DBG("%s: Cleared.\n",c->name); + + /* Enable all IOPs */ + i2o_iop_enable_all(); + + i2o_status_get(c); + + return rc; +} + +/** + * i2o_iop_reset - reset an I2O controller + * @c: controller to reset + * + * Reset the IOP into INIT state and wait until IOP gets into RESET state. + * Terminate all external operations, clear IOP's inbound and outbound + * queues, terminate all DDMs, and reload the IOP's operating environment + * and all local DDMs. The IOP rebuilds its LCT. + */ +static int i2o_iop_reset(struct i2o_controller *c) +{ + u8 *status = c->status.virt; + struct i2o_message *msg; + u32 m; + unsigned long timeout; + i2o_status_block *sb = c->status_block.virt; + int rc = 0; + + DBG("Resetting controller\n"); + + m=i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + memset(status, 0, 4); + + /* Quiesce all IOPs first */ + i2o_iop_quiesce_all(); + + writel(EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID, + &msg->head[1]); + writel(i2o_exec_driver.context, &msg->icntxt); + writel(0, &msg->tcntxt); //FIXME: use reasonable transaction context + writel(0, &msg->body[0]); + writel(0, &msg->body[1]); + writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]); + writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]); + + i2o_msg_post(c,m); + + /* Wait for a reply */ + timeout=jiffies + I2O_TIMEOUT_RESET * HZ; + while(!*status) { + if(time_after(jiffies, timeout)) { + printk(KERN_ERR "IOP reset timeout.\n"); + rc = -ETIMEDOUT; + goto exit; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + + rmb(); + } + + if (*status==I2O_CMD_IN_PROGRESS) { + /* + * Once the reset is sent, the IOP goes into the INIT state + * which is indeterminate. We need to wait until the IOP + * has rebooted before we can let the system talk to + * it. We read the inbound Free_List until a message is + * available. If we can't read one in the given ammount of + * time, we assume the IOP could not reboot properly. + */ + DBG("%s: Reset in progress, waiting for reboot...\n", c->name); + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); + while(m == I2O_QUEUE_EMPTY) { + if(time_after(jiffies, timeout)) { + printk(KERN_ERR "IOP reset timeout.\n"); + rc = -ETIMEDOUT; + goto exit; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); + } + i2o_msg_nop(c, m); + } + + /* from here all quiesce commands are safe */ + c->no_quiesce = 0; + + /* If IopReset was rejected or didn't perform reset, try IopClear */ + i2o_status_get(c); + if(*status==I2O_CMD_REJECTED || sb->iop_state!=ADAPTER_STATE_RESET) { + printk(KERN_WARNING "%s: Reset rejected, trying to clear\n", + c->name); + i2o_iop_clear(c); + } else + DBG("%s: Reset completed.\n", c->name); + +exit: + /* Enable all IOPs */ + i2o_iop_enable_all(); + + return rc; +}; + +/** + * i2o_systab_build - Build system table + * + * The system table contains information about all the IOPs in the system + * (duh) and is used by the Executives on the IOPs to establish peer2peer + * connections. We're not supporting peer2peer at the moment, but this + * will be needed down the road for things like lan2lan forwarding. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_systab_build(void) +{ + struct i2o_controller *c, *tmp; + int num_controllers = 0; + u32 change_ind = 0; + int count = 0; + struct i2o_sys_tbl *systab = i2o_systab.virt; + + list_for_each_entry_safe(c, tmp, &i2o_controllers, list) + num_controllers ++; + + if(systab) { + change_ind = systab->change_ind; + kfree(i2o_systab.virt); + } + + /* Header + IOPs */ + i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * + sizeof(struct i2o_sys_tbl_entry); + + systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); + if(!systab) { + printk(KERN_ERR "i2o: unable to allocate memory for System " + "Table\n"); + return -ENOMEM; + } + memset(systab, 0, i2o_systab.len); + + systab->version = I2OVERSION; + systab->change_ind = change_ind + 1; + + list_for_each_entry_safe(c, tmp, &i2o_controllers, list) { + i2o_status_block *sb; + + if(count >= num_controllers) { + printk(KERN_ERR "i2o: controller added while building " + "system table\n"); + break; + } + + sb = c->status_block.virt; + + /* + * Get updated IOP state so we have the latest information + * + * We should delete the controller at this point if it + * doesn't respond since if it's not on the system table + * it is techninically not part of the I2O subsystem... + */ + if(unlikely(i2o_status_get(c))) { + printk(KERN_ERR "%s: Deleting b/c could not get status" + " while attempting to build system table\n", + c->name); + i2o_iop_remove(c); + continue; // try the next one + } + + systab->iops[count].org_id = sb->org_id; + systab->iops[count].iop_id = c->unit + 2; + systab->iops[count].seg_num = 0; + systab->iops[count].i2o_version = sb->i2o_version; + systab->iops[count].iop_state = sb->iop_state; + systab->iops[count].msg_type = sb->msg_type; + systab->iops[count].frame_size = sb->inbound_frame_size; + systab->iops[count].last_changed = change_ind; + systab->iops[count].iop_capabilities = sb->iop_capabilities; + systab->iops[count].inbound_low = i2o_ptr_low(c->post_port); + systab->iops[count].inbound_high = i2o_ptr_high(c->post_port); + + count ++; + } + + systab->num_entries = count; + + return 0; +}; + +/** + * i2o_iop_systab_set - Set the I2O System Table of the specified IOP + * @c: I2O controller to which the system table should be send + * + * Before the systab could be set i2o_systab_build() must be called. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_iop_systab_set(struct i2o_controller *c) +{ + struct i2o_message *msg; + u32 m; + i2o_status_block *sb = c->status_block.virt; + struct device *dev = &c->pdev->dev; + struct resource *root; + int rc; + + if(sb->current_mem_size < sb->desired_mem_size) + { + struct resource *res = &c->mem_resource; + res->name = c->pdev->bus->name; + res->flags = IORESOURCE_MEM; + res->start = 0; + res->end = 0; + printk("%s: requires private memory resources.\n", c->name); + root = pci_find_parent_resource(c->pdev, res); + if(root==NULL) + printk("Can't find parent resource!\n"); + if(root && allocate_resource(root, res, + sb->desired_mem_size, + sb->desired_mem_size, + sb->desired_mem_size, + 1<<20, /* Unspecified, so use 1Mb and play safe */ + NULL, + NULL)>=0) + { + c->mem_alloc = 1; + sb->current_mem_size = 1 + res->end - res->start; + sb->current_mem_base = res->start; + printk(KERN_INFO "%s: allocated %ld bytes of PCI memory at 0x%08lX.\n", + c->name, 1+res->end-res->start, res->start); + } + } + + if(sb->current_io_size < sb->desired_io_size) + { + struct resource *res = &c->io_resource; + res->name = c->pdev->bus->name; + res->flags = IORESOURCE_IO; + res->start = 0; + res->end = 0; + printk("%s: requires private memory resources.\n", c->name); + root = pci_find_parent_resource(c->pdev, res); + if(root==NULL) + printk("Can't find parent resource!\n"); + if(root && allocate_resource(root, res, + sb->desired_io_size, + sb->desired_io_size, + sb->desired_io_size, + 1<<20, /* Unspecified, so use 1Mb and play safe */ + NULL, + NULL)>=0) + { + c->io_alloc = 1; + sb->current_io_size = 1 + res->end - res->start; + sb->current_mem_base = res->start; + printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at 0x%08lX.\n", + c->name, 1+res->end-res->start, res->start); + } + } + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, + PCI_DMA_TODEVICE); + if(!i2o_systab.phys) { + i2o_msg_nop(c, m); + return -ENOMEM; + } + + writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->head[0]); + writel(I2O_CMD_SYS_TAB_SET<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + + /* + * Provide three SGL-elements: + * System table (SysTab), Private memory space declaration and + * Private i/o space declaration + * + * FIXME: is this still true? + * Nasty one here. We can't use dma_alloc_coherent to send the + * same table to everyone. We have to go remap it for them all + */ + + writel(c->unit + 2, &msg->body[0]); + writel(0, &msg->body[1]); + writel(0x54000000 | i2o_systab.phys, &msg->body[2]); + writel(i2o_systab.phys, &msg->body[3]); + writel(0x54000000 | sb->current_mem_size, &msg->body[4]); + writel(sb->current_mem_base, &msg->body[5]); + writel(0xd4000000 | sb->current_io_size, &msg->body[6]); + writel(sb->current_io_base, &msg->body[6]); + + rc=i2o_msg_post_wait(c, m, 120); + + dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,PCI_DMA_TODEVICE); + + if(rc<0) + printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n", + c->name, -rc); + else + DBG("%s: SysTab set.\n", c->name); + + i2o_status_get(c); // Entered READY state + + return rc; +} + +/** + * i2o_parse_hrt - Parse the hardware resource table. + * @c: I2O controller + * + * We don't do anything with it except dumping it (in debug mode). + * + * Returns 0. + */ +static int i2o_parse_hrt(struct i2o_controller *c) +{ + i2o_dump_hrt(c); + return 0; +}; + +/** + * i2o_status_get - Get the status block from the I2O controller + * @c: I2O controller + * + * Issue a status query on the controller. This updates the attached + * status block. The status block could then be accessed through + * c->status_block. + * + * Returns 0 on sucess or negative error code on failure. + */ +int i2o_status_get(struct i2o_controller *c) +{ + struct i2o_message *msg; + u32 m; + u8 *status_block; + unsigned long timeout; + + status_block = (u8*)c->status_block.virt; + memset(status_block, 0, sizeof(i2o_status_block)); + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg->head[1]); + writel(i2o_exec_driver.context, &msg->icntxt); + writel(0, &msg->tcntxt); // FIXME: use resonable transaction context + writel(0, &msg->body[0]); + writel(0, &msg->body[1]); + writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]); + writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]); + writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ + + i2o_msg_post(c,m); + + /* Wait for a reply */ + timeout=jiffies + I2O_TIMEOUT_STATUS_GET * HZ; + while(status_block[87]!=0xFF) { + if(time_after(jiffies, timeout)) { + printk(KERN_ERR "%s: Get status timeout.\n", c->name); + return -ETIMEDOUT; + } + + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + + rmb(); + } + +#if DEBUG + i2o_debug_state(c); +#endif + + return 0; +} + +/* + * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller + * @c: I2O controller from which the HRT should be fetched + * + * The HRT contains information about possible hidden devices but is + * mostly useless to us. + * + * Returns 0 on success or negativer error code on failure. + */ +int i2o_hrt_get(struct i2o_controller *c) +{ + int rc; + int i; + i2o_hrt *hrt = c->hrt.virt; + u32 size = sizeof(i2o_hrt); + struct device *dev = &c->pdev->dev; + + for(i = 0; i < I2O_HRT_GET_TRIES; i ++) { + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(SIX_WORD_MSG_SIZE| SGL_OFFSET_4, &msg->head[0]); + writel(I2O_CMD_HRT_GET<<24|HOST_TID<<12|ADAPTER_TID, + &msg->head[1]); + writel(0xd0000000 | c->hrt.len, &msg->body[0]); + writel(c->hrt.phys, &msg->body[1]); + + rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); + + if(rc<0) { + printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n", + c->name, -rc); + return rc; + } + + size = hrt->num_entries * hrt->entry_len << 2; + if(size > c->hrt.len) { + if(i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL)) + return -ENOMEM; + else + hrt = c->hrt.virt; + } else + return i2o_parse_hrt(c); + } + + printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n", + c->name, I2O_HRT_GET_TRIES); + + return -EBUSY; +} + +/** + * i2o_iop_init_outbound_queue - setup the outbound message queue + * @c: I2O controller + * + * Clear and (re)initialize IOP's outbound queue and post the message + * frames to the IOP. + * + * Returns 0 on success or a negative errno code on failure. + */ +int i2o_iop_init_outbound_queue(struct i2o_controller *c) +{ + u8 *status = c->status.virt; + u32 m; + struct i2o_message *msg; + ulong timeout; + int i; + + DBG("%s: Initializing Outbound Queue...\n", c->name); + + memset(status, 0, 4); + + m=i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6, &msg->head[0]); + writel(I2O_CMD_OUTBOUND_INIT<<24|HOST_TID<<12|ADAPTER_TID, + &msg->head[1]); + writel(i2o_exec_driver.context, &msg->icntxt); + writel(0x0106, &msg->tcntxt); /* FIXME: why 0x0106, maybe in + Spec? */ + writel(PAGE_SIZE, &msg->body[0]); + writel(MSG_FRAME_SIZE<<16|0x80, &msg->body[1]); /* Outbound msg frame + size in words and Initcode */ + writel(0xd0000004, &msg->body[2]); + writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]); + writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]); + + i2o_msg_post(c, m); + + timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; + while(*status <= I2O_CMD_IN_PROGRESS) { + if(time_after(jiffies, timeout)) { + printk(KERN_WARNING "%s: Timeout Initializing\n", + c->name); + return -ETIMEDOUT; + } + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + + rmb(); + } + + m = c->out_queue.phys; + + /* Post frames */ + for(i=0; i < NMBR_MSG_FRAMES; i++) { + i2o_flush_reply(c, m); + m += MSG_FRAME_SIZE * 4; + } + + return 0; +} + +/** + * i2o_iop_alloc - Allocate and initialize a i2o_controller struct + * + * Allocate the necessary memory for a i2o_controller struct and + * initialize the lists. + * + * Returns a pointer to the I2O controller or a negative error code on + * failure. + */ +struct i2o_controller *i2o_iop_alloc(void) +{ + static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ + struct i2o_controller *c; + + c = kmalloc(sizeof(*c), GFP_KERNEL); + if(!c) + { + printk(KERN_ERR "i2o: Insufficient memory to allocate the " + "controller.\n"); + return ERR_PTR(-ENOMEM); + } + memset(c, 0, sizeof(*c)); + + INIT_LIST_HEAD(&c->devices); + c->lock = SPIN_LOCK_UNLOCKED; + init_MUTEX(&c->lct_lock); + c->unit = unit ++; + sprintf(c->name, "iop%d", c->unit); + +#if BITS_PER_LONG == 64 + c->context_list_lock = SPIN_LOCK_UNLOCKED; + atomic_set(&c->context_list_counter, 0); + INIT_LIST_HEAD(&c->context_list); +#endif + + return c; +}; + +/** + * i2o_iop_free - Free the i2o_controller struct + * @c: I2O controller to free + */ +void i2o_iop_free(struct i2o_controller *c) +{ + kfree(c); +}; + +/** + * i2o_iop_add - Initialize the I2O controller and add him to the I2O core + * @c: controller + * + * Initialize the I2O controller and if no error occurs add him to the I2O + * core. + * + * Returns 0 on success or negative error code on failure. + */ +int i2o_iop_add(struct i2o_controller *c) +{ + int rc; + + printk(KERN_INFO "%s: Activating I2O controller...\n", c->name); + printk(KERN_INFO "%s: This may take a few minutes if there are many " + "devices\n", c->name); + + if((rc = i2o_iop_activate(c))) { + printk(KERN_ERR "%s: controller could not activated\n", + c->name); + i2o_iop_reset(c); + return rc; + } + + DBG("building sys table %s...\n", c->name); + + if((rc = i2o_systab_build())) { + i2o_iop_reset(c); + return rc; + } + + DBG("online controller %s...\n", c->name); + + if((rc = i2o_iop_online(c))) { + i2o_iop_reset(c); + return rc; + } + + DBG("getting LCT %s...\n", c->name); + + if((rc = i2o_exec_lct_get(c))) { + i2o_iop_reset(c); + return rc; + } + + list_add(&c->list, &i2o_controllers); + + printk(KERN_INFO "%s: Controller added\n", c->name); + + return 0; +}; + +/** + * i2o_iop_remove - Remove the I2O controller from the I2O core + * @c: I2O controller + * + * Remove the I2O controller from the I2O core. If devices are attached to + * the controller remove these also and finally reset the controller. + */ +void i2o_iop_remove(struct i2o_controller *c) +{ + struct i2o_device *dev, *tmp; + + DBG("Deleting controller %s\n", c->name); + + list_del(&c->list); + + list_for_each_entry_safe(dev, tmp, &c->devices, list) + i2o_device_remove(dev); + + /* Ask the IOP to switch to RESET state */ + i2o_iop_reset(c); +} + +/** + * i2o_event_register - Turn on/off event notification for a I2O device + * @dev: I2O device which should receive the event registration request + * @drv: driver which want to get notified + * @tcntxt: transaction context to use with this notifier + * @evt_mask: mask of events + * + * Create and posts an event registration message to the task. No reply + * is waited for, or expected. If you do not want further notifications, + * call the i2o_event_register again with a evt_mask of 0. + * + * Returns 0 on success or -ETIMEDOUT if no message could be fetched for + * sending the request. + */ +int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, int tcntxt, u32 evt_mask) +{ + struct i2o_controller *c = dev->iop; + struct i2o_message *msg; + u32 m; + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m == I2O_QUEUE_EMPTY) + return -ETIMEDOUT; + + writel(FIVE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg->head[0]); + writel(I2O_CMD_UTIL_EVT_REGISTER<<24|HOST_TID<<12|dev->lct_data.tid, + &msg->head[1]); + writel(drv->context, &msg->icntxt); + writel(tcntxt, &msg->tcntxt); + writel(evt_mask, &msg->body[0]); + + i2o_msg_post(c, m); + + return 0; +}; + +/** + * i2o_iop_init - I2O main initialization function + * + * Initialize the I2O drivers (OSM) functions, register the Executive OSM, + * initialize the I2O PCI part and finally initialize I2O device stuff. + * + * Returns 0 on success or negative error code on failure. + */ +static int __init i2o_iop_init(void) +{ + int rc = 0; + + printk(KERN_INFO "I2O Core - (C) Copyright 1999 Red Hat Software\n"); + + rc = i2o_device_init(); + if(rc) + goto exit; + + rc = i2o_driver_init(); + if(rc) + goto device_exit; + + rc = i2o_exec_init(); + if(rc) + goto driver_exit; + + rc = i2o_pci_init(); + if(rc<0) + goto exec_exit; + + return 0; + +exec_exit: + i2o_exec_exit(); + +driver_exit: + i2o_driver_exit(); + +device_exit: + i2o_device_exit(); + +exit: + return rc; +} + +/** + * i2o_iop_exit - I2O main exit function + * + * Removes I2O controllers from PCI subsystem and shut down OSMs. + */ +static void __exit i2o_iop_exit(void) +{ + i2o_pci_exit(); + i2o_exec_exit(); + i2o_driver_exit(); + i2o_device_exit(); +}; + + +module_init(i2o_iop_init); +module_exit(i2o_iop_exit); + + +MODULE_AUTHOR("Red Hat Software"); +MODULE_DESCRIPTION("I2O Core"); +MODULE_LICENSE("GPL"); + + +#if BITS_PER_LONG == 64 +EXPORT_SYMBOL(i2o_cntxt_list_add); +EXPORT_SYMBOL(i2o_cntxt_list_get); +EXPORT_SYMBOL(i2o_cntxt_list_remove); +#endif +EXPORT_SYMBOL(i2o_msg_get_wait); +EXPORT_SYMBOL(i2o_msg_nop); +EXPORT_SYMBOL(i2o_find_iop); +EXPORT_SYMBOL(i2o_iop_find_device); +EXPORT_SYMBOL(i2o_event_register); +EXPORT_SYMBOL(i2o_status_get); +EXPORT_SYMBOL(i2o_hrt_get); +EXPORT_SYMBOL(i2o_controllers); diff -puN drivers/message/i2o/Makefile~i2o-build_99 drivers/message/i2o/Makefile --- 25/drivers/message/i2o/Makefile~i2o-build_99 Tue Jul 27 14:06:32 2004 +++ 25-akpm/drivers/message/i2o/Makefile Tue Jul 27 14:06:32 2004 @@ -5,6 +5,11 @@ # In the future, some of these should be built conditionally. # +i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o +i2o_block-y += block-osm.o +i2o_proc-y += proc-osm.o +i2o_config-y += config-osm.o +i2o_scsi-y += scsi-osm.o obj-$(CONFIG_I2O) += i2o_core.o obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o obj-$(CONFIG_I2O_BLOCK) += i2o_block.o diff -puN /dev/null drivers/message/i2o/pci.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/pci.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,541 @@ +/* + * PCI handling of I2O controller + * + * Copyright (C) 1999-2002 Red Hat Software + * + * Written by Alan Cox, Building Number Three Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * A lot of the I2O message side code from this is taken from the Red + * Creek RCPCI45 adapter driver by Red Creek Communications + * + * Fixes/additions: + * Philipp Rumpf + * Juha Sievänen + * Auvo Häkkinen + * Deepak Saxena + * Boji T Kannanthanam + * Alan Cox : + * Ported to Linux 2.5. + * Markus Lidel : + * Minor fixes for 2.6. + * Markus Lidel : + * Support for sysfs included. + */ + +#include +#include + +#ifdef CONFIG_MTRR +#include +#endif // CONFIG_MTRR + +#include + + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +/* Module internal functions from other sources */ +extern struct i2o_controller *i2o_iop_alloc(void); +extern void i2o_iop_free(struct i2o_controller *); + +extern int i2o_iop_add(struct i2o_controller *); +extern void i2o_iop_remove(struct i2o_controller *); + +extern int i2o_driver_dispatch(struct i2o_controller *,u32,struct i2o_message*); + + +/* Internal used functions */ +static int __devinit i2o_pci_probe(struct pci_dev *, + const struct pci_device_id *); +static void __devexit i2o_pci_remove(struct pci_dev *); + +static int __devinit i2o_pci_alloc(struct i2o_controller *); +static void __devexit i2o_pci_free(struct i2o_controller *); + +static int i2o_pci_irq_enable(struct i2o_controller *); +static void i2o_pci_irq_disable(struct i2o_controller *); +static irqreturn_t i2o_pci_interrupt(int, void *, struct pt_regs *); + +/* init / exit functions */ +int __init i2o_pci_init(void); +void __exit i2o_pci_exit(void); + + +/* PCI device id table for all I2O controllers */ +static struct pci_device_id __devinitdata i2o_pci_ids[] = { + { PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O<<8, 0xffff00) }, + { PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511) }, + { 0 } +}; + +/* PCI driver for I2O controller */ +static struct pci_driver i2o_pci_driver = { + .name = "I2O controller", + .id_table = i2o_pci_ids, + .probe = i2o_pci_probe, + .remove = __devexit_p(i2o_pci_remove), +}; + + +/** + * i2o_dma_realloc - Realloc DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: pointer to a i2o_dma struct DMA buffer + * @len: new length of memory + * @gfp_mask: GFP mask + * + * If there was something allocated in the addr, free it first. If len > 0 + * than try to allocate it and write the addresses back to the addr + * structure. If len == 0 set the virtual address to NULL. + * + * Returns the 0 on success or negative error code on failure. + */ +int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len, + unsigned int gfp_mask) +{ + i2o_dma_free(dev, addr); + + if(len) + return i2o_dma_alloc(dev, addr, len, gfp_mask); + + return 0; +}; + +/** + * i2o_pci_probe - Probe the PCI device for an I2O controller + * @dev: PCI device to test + * @id: id which matched with the PCI device id table + * + * Probe the PCI device for any device which is a memory of the + * Intelligent, I2O class or an Adaptec Zero Channel Controller. We + * attempt to set up each such device and register it with the core. + * + * Returns 0 on success or negative error code on failure. + */ +static int __devinit i2o_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct i2o_controller *c; + int rc; + + printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); + + if((pdev->class&0xff)>1) { + printk(KERN_WARNING "i2o: I2O controller found but does not " + "support I2O 1.5 (skipping).\n"); + return -ENODEV; + } + + if((rc=pci_enable_device(pdev))) { + printk(KERN_WARNING "i2o: I2O controller found but could not be" + " enabled.\n"); + return rc; + } + + printk(KERN_INFO "i2o: I2O controller found on bus %d at %d.\n", + pdev->bus->number, pdev->devfn); + + if(pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { + printk(KERN_WARNING "i2o: I2O controller on bus %d at %d: No " + "suitable DMA available!\n", pdev->bus->number, + pdev->devfn); + rc = -ENODEV; + goto disable; + } + + pci_set_master(pdev); + + c = i2o_iop_alloc(); + if(IS_ERR(c)) { + printk(KERN_ERR "i2o: memory for I2O controller could not be " + "allocated\n"); + rc = PTR_ERR(c); + goto disable; + } + + c->pdev = pdev; + c->device = pdev->dev; + + /* Cards that fall apart if you hit them with large I/O loads... */ + if(pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { + c->short_req = 1; + printk(KERN_INFO "i2o: Symbios FC920 workarounds activated.\n"); + } + + if(pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) { + c->promise = 1; + printk(KERN_INFO "i2o: Promise workarounds activated.\n"); + } + + /* Cards that go bananas if you quiesce them before you reset them. */ + if(pdev->vendor == PCI_VENDOR_ID_DPT) { + c->no_quiesce = 1; + if(pdev->device == 0xa511) + c->raptor = 1; + } + + if((rc=i2o_pci_alloc(c))) { + printk(KERN_ERR "i2o: DMA / IO allocation for I2O controller " + " failed\n"); + goto free_controller; + } + + if(i2o_pci_irq_enable(c)) { + printk(KERN_ERR "i2o: unable to enable interrupts for I2O " + "controller\n"); + goto free_pci; + } + + if((rc = i2o_iop_add(c))) + goto uninstall; + + return 0; + +uninstall: + i2o_pci_irq_disable(c); + +free_pci: + i2o_pci_free(c); + +free_controller: + i2o_iop_free(c); + +disable: + pci_disable_device(pdev); + + return rc; +} + +/** + * i2o_pci_remove - Removes a I2O controller from the system + * pdev: I2O controller which should be removed + * + * Reset the I2O controller, disable interrupts and remove all allocated + * resources. + */ +static void __devexit i2o_pci_remove(struct pci_dev *pdev) +{ + struct i2o_controller *c; + c = pci_get_drvdata(pdev); + + i2o_iop_remove(c); + i2o_pci_irq_disable(c); + i2o_pci_free(c); + + printk(KERN_INFO "%s: Controller removed.\n", c->name); + + i2o_iop_free(c); + pci_disable_device(pdev); +}; + +/** + * i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller + * @c: I2O controller + * + * Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All + * IO mappings are also done here. If MTRR is enabled, also do add memory + * regions here. + * + * Returns 0 on success or negative error code on failure. + */ +static int __devinit i2o_pci_alloc(struct i2o_controller *c) +{ + struct pci_dev *pdev = c->pdev; + struct device *dev = &pdev->dev; + int i; + + for(i=0; i<6; i++) { + /* Skip I/O spaces */ + if(!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) { + if(!c->base.phys) { + c->base.phys = pci_resource_start(pdev, i); + c->base.len = pci_resource_len(pdev, i); + if(!c->raptor) + break; + } else { + c->in_queue.phys = pci_resource_start(pdev, i); + c->in_queue.len = pci_resource_len(pdev, i); + break; + } + } + } + + if(i==6) { + printk(KERN_ERR "i2o: I2O controller has no memory regions" + " defined.\n"); + i2o_pci_free(c); + return -EINVAL; + } + + /* Map the I2O controller */ + if(c->raptor) { + printk(KERN_INFO "i2o: PCI I2O controller\n"); + printk(KERN_INFO " BAR0 at 0x%08lX size=%ld\n", + (unsigned long)c->base.phys, (unsigned long)c->base.len); + printk(KERN_INFO " BAR1 at 0x%08lX size=%ld\n", + (unsigned long)c->in_queue.phys, + (unsigned long)c->in_queue.len); + } else + printk(KERN_INFO "i2o: PCI I2O controller at %08lX size=%ld\n", + (unsigned long)c->base.phys, (unsigned long)c->base.len); + + c->base.virt = ioremap(c->base.phys, c->base.len); + if(!c->base.virt) { + printk(KERN_ERR "i2o: Unable to map controller.\n"); + return -ENOMEM; + } + + if(c->raptor) { + c->in_queue.virt = ioremap(c->in_queue.phys, c->in_queue.len); + if(!c->in_queue.virt) { + printk(KERN_ERR "i2o: Unable to map controller.\n"); + i2o_pci_free(c); + return -ENOMEM; + } + } else + c->in_queue = c->base; + + c->irq_mask = c->base.virt+0x34; + c->post_port = c->base.virt+0x40; + c->reply_port = c->base.virt+0x44; + +#ifdef CONFIG_MTRR + /* Enable Write Combining MTRR for IOP's memory region */ + c->mtrr_reg0 = mtrr_add(c->in_queue.phys, c->in_queue.len, + MTRR_TYPE_WRCOMB, 1); + c->mtrr_reg1 = -1; + + if(c->mtrr_reg0<0) + printk(KERN_WARNING "i2o: could not enable write combining " + "MTRR\n"); + else + printk(KERN_INFO "i2o: using write combining MTRR\n"); + + /* + * If it is an INTEL i960 I/O processor then set the first 64K to + * Uncacheable since the region contains the messaging unit which + * shouldn't be cached. + */ + if((pdev->vendor == PCI_VENDOR_ID_INTEL || + pdev->vendor == PCI_VENDOR_ID_DPT) && !c->raptor) { + printk(KERN_INFO "i2o: MTRR workaround for Intel i960 processor" + "\n"); + c->mtrr_reg1 = mtrr_add(c->base.phys, 0x10000, + MTRR_TYPE_UNCACHABLE, 1); + + if(c->mtrr_reg1<0) { + printk(KERN_WARNING "i2o_pci: Error in setting " + "MTRR_TYPE_UNCACHABLE\n"); + mtrr_del(c->mtrr_reg0,c->in_queue.phys,c->in_queue.len); + c->mtrr_reg0 = -1; + } + } +#endif + + if(i2o_dma_alloc(dev, &c->status, 4, GFP_KERNEL)) { + i2o_pci_free(c); + return -ENOMEM; + } + + if(i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) { + i2o_pci_free(c); + return -ENOMEM; + } + + if(i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) { + i2o_pci_free(c); + return -ENOMEM; + } + + if(i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block), + GFP_KERNEL)) { + i2o_pci_free(c); + return -ENOMEM; + } + + if(i2o_dma_alloc(dev, &c->out_queue, MSG_POOL_SIZE, GFP_KERNEL)) { + i2o_pci_free(c); + return -ENOMEM; + } + + pci_set_drvdata(pdev, c); + + return 0; +} + +/** + * i2o_pci_free - Frees the DMA memory for the I2O controller + * @c: I2O controller to free + * + * Remove all allocated DMA memory and unmap memory IO regions. If MTRR + * is enabled, also remove it again. + */ +static void __devexit i2o_pci_free(struct i2o_controller *c) +{ + struct device *dev; + + dev = &c->pdev->dev; + + i2o_dma_free(dev, &c->out_queue); + i2o_dma_free(dev, &c->status_block); + if(c->lct) + kfree(c->lct); + i2o_dma_free(dev, &c->dlct); + i2o_dma_free(dev, &c->hrt); + i2o_dma_free(dev, &c->status); + +#ifdef CONFIG_MTRR + if(c->mtrr_reg0 >= 0) + mtrr_del(c->mtrr_reg0, 0, 0); + if(c->mtrr_reg1 >= 0) + mtrr_del(c->mtrr_reg1, 0, 0); +#endif + + if(c->raptor && c->in_queue.virt) + iounmap(c->in_queue.virt); + + if(c->base.virt) + iounmap(c->base.virt); +} + +/** + * i2o_pci_irq_enable - Allocate interrupt for I2O controller + * + * Allocate an interrupt for the I2O controller, and activate interrupts + * on the I2O controller. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_pci_irq_enable(struct i2o_controller *c) +{ + struct pci_dev *pdev = c->pdev; + int rc; + + I2O_IRQ_WRITE32(c, 0xffffffff); + + if(pdev->irq) { + rc=request_irq(pdev->irq,i2o_pci_interrupt,SA_SHIRQ,c->name,c); + if(rc<0) { + printk(KERN_ERR "%s: unable to allocate interrupt %d." + "\n", c->name, pdev->irq); + return rc; + } + } + + I2O_IRQ_WRITE32(c, 0x00000000); + + printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq); + + return 0; +} + +/** + * i2o_pci_irq_disable - Free interrupt for I2O controller + * @c: I2O controller + * + * Disable interrupts in I2O controller and then free interrupt. + */ +static void i2o_pci_irq_disable(struct i2o_controller *c) +{ + I2O_IRQ_WRITE32(c, 0xffffffff); + + if(c->pdev->irq > 0) + free_irq(c->pdev->irq, c); +} + +/** + * i2o_pci_interrupt - Interrupt handler for I2O controller + * @irq: interrupt line + * @dev_id: pointer to the I2O controller + * @r: pointer to registers + * + * Handle an interrupt from a PCI based I2O controller. This turns out + * to be rather simple. We keep the controller pointer in the cookie. + */ +static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) +{ + struct i2o_controller *c = dev_id; + struct device *dev = &c->pdev->dev; + struct i2o_message *m; + u32 mv; + u32 *msg; + + /* + * Old 960 steppings had a bug in the I2O unit that caused + * the queue to appear empty when it wasn't. + */ + mv=I2O_REPLY_READ32(c); + if(mv == I2O_QUEUE_EMPTY) { + mv=I2O_REPLY_READ32(c); + if(unlikely(mv == I2O_QUEUE_EMPTY)) { + printk(KERN_INFO "i2o: interrupt and message queue " + "empty!\n"); + return IRQ_NONE; + } else + DBG("960 bug detected\n"); + } + + while(mv != I2O_QUEUE_EMPTY) { + /* + * Map the message from the page frame map to kernel virtual. + * Because bus_to_virt is deprecated, we have calculate the + * location by ourself! + */ + m=(struct i2o_message *)(mv - (unsigned long)c->out_queue.phys + + (unsigned long)c->out_queue.virt); + + msg=(u32*)m; + + /* + * Ensure this message is seen coherently but cachably by + * the processor + */ + dma_sync_single_for_cpu(dev, c->out_queue.phys, MSG_FRAME_SIZE, + PCI_DMA_FROMDEVICE); + + /* dispatch it */ + if(i2o_driver_dispatch(c, mv, m)) + /* flush it if result != 0 */ + i2o_flush_reply(c, mv); + + /* + * That 960 bug again... + */ + mv=I2O_REPLY_READ32(c); + if(mv==I2O_QUEUE_EMPTY) + mv=I2O_REPLY_READ32(c); + } + return IRQ_HANDLED; +} + +/** + * i2o_pci_init - registers I2O PCI driver in PCI subsystem + * + * Returns > 0 on success or negative error code on failure. + */ +int __init i2o_pci_init(void) +{ + return pci_register_driver(&i2o_pci_driver); +}; + +/** + * i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem + */ +void __exit i2o_pci_exit(void) +{ + pci_unregister_driver(&i2o_pci_driver); +}; + +EXPORT_SYMBOL(i2o_dma_realloc); diff -puN /dev/null drivers/message/i2o/proc-osm.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/proc-osm.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,2123 @@ +/* + * procfs handler for Linux I2O subsystem + * + * (c) Copyright 1999 Deepak Saxena + * + * Originally written by Deepak Saxena(deepak@plexity.net) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This is an initial test release. The code is based on the design of the + * ide procfs system (drivers/block/ide-proc.c). Some code taken from + * i2o-core module by Alan Cox. + * + * DISCLAIMER: This code is still under development/test and may cause + * your system to behave unpredictably. Use at your own discretion. + * + * + * Fixes/additions: + * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI), + * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI) + * University of Helsinki, Department of Computer Science + * LAN entries + * Markus Lidel + * Changes for new I2O API + */ + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +#define I2O_MAX_MODULES 4 +// FIXME! +#define FMT_U64_HEX "0x%08x%08x" +#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64)) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +/* Structure used to define /proc entries */ +typedef struct _i2o_proc_entry_t +{ + char *name; /* entry name */ + mode_t mode; /* mode */ + struct file_operations *fops; /* open function */ +} i2o_proc_entry; + +/* helper functions */ +static int print_serial_number(struct seq_file *, u8 *, int); +static const char *i2o_get_class_name(int); + +static int i2o_proc_create_entries(struct proc_dir_entry *, i2o_proc_entry *, + void *); +static void i2o_proc_subdir_remove(struct proc_dir_entry *); + +static int i2o_proc_iop_add(struct proc_dir_entry *, struct i2o_controller *); +static void i2o_proc_iop_remove(struct proc_dir_entry *,struct i2o_controller*); + +static void i2o_proc_device_add(struct proc_dir_entry *, struct i2o_device *); + +static int __init i2o_proc_fs_create(void); +static int __exit i2o_proc_fs_destroy(void); + +static int __init i2o_proc_init(void); +static void __exit i2o_proc_exit(void); + +/* show functions */ +static int i2o_seq_show_lct(struct seq_file *, void *); +static int i2o_seq_show_hrt(struct seq_file *, void *); +static int i2o_seq_show_status(struct seq_file *, void *); + +static int i2o_seq_show_hw(struct seq_file *, void *); +static int i2o_seq_show_ddm_table(struct seq_file *, void *); +static int i2o_seq_show_driver_store(struct seq_file *, void *); +static int i2o_seq_show_drivers_stored(struct seq_file *, void *); + +static int i2o_seq_show_groups(struct seq_file *, void *); +static int i2o_seq_show_phys_device(struct seq_file *, void *); +static int i2o_seq_show_claimed(struct seq_file *, void *); +static int i2o_seq_show_users(struct seq_file *, void *); +static int i2o_seq_show_priv_msgs(struct seq_file *, void *); +static int i2o_seq_show_authorized_users(struct seq_file *, void *); + +static int i2o_seq_show_dev_name(struct seq_file *, void *); +static int i2o_seq_show_dev_identity(struct seq_file *, void *); +static int i2o_seq_show_ddm_identity(struct seq_file *, void *); +static int i2o_seq_show_uinfo(struct seq_file *, void *); +static int i2o_seq_show_sgl_limits(struct seq_file *, void *); + +static int i2o_seq_show_sensors(struct seq_file *, void *); + + +/* global I2O /proc/i2o entry */ +static struct proc_dir_entry *i2o_proc_dir_root; + +/* proc OSM driver struct */ +static struct i2o_driver i2o_proc_driver = { + .name = "proc-osm", +}; + + +static int print_serial_number(struct seq_file *seq, u8 *serialno, int max_len) +{ + int i; + + /* 19990419 -sralston + * The I2O v1.5 (and v2.0 so far) "official specification" + * got serial numbers WRONG! + * Apparently, and despite what Section 3.4.4 says and + * Figure 3-35 shows (pg 3-39 in the pdf doc), + * the convention / consensus seems to be: + * + First byte is SNFormat + * + Second byte is SNLen (but only if SNFormat==7 (?)) + * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format + */ + switch(serialno[0]) + { + case I2O_SNFORMAT_BINARY: /* Binary */ + seq_printf(seq, "0x"); + for(i = 0; i < serialno[1]; i++) + { + seq_printf(seq, "%02X", serialno[2+i]); + } + break; + + case I2O_SNFORMAT_ASCII: /* ASCII */ + if ( serialno[1] < ' ' ) /* printable or SNLen? */ + { + /* sanity */ + max_len = (max_len < serialno[1]) ? max_len : serialno[1]; + serialno[1+max_len] = '\0'; + + /* just print it */ + seq_printf(seq, "%s", &serialno[2]); + } + else + { + /* print chars for specified length */ + for(i = 0; i < serialno[1]; i++) + { + seq_printf(seq, "%c", serialno[2+i]); + } + } + break; + + case I2O_SNFORMAT_UNICODE: /* UNICODE */ + seq_printf(seq, "UNICODE Format. Can't Display\n"); + break; + + case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ + seq_printf(seq, + "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X", + serialno[2], serialno[3], + serialno[4], serialno[5], + serialno[6], serialno[7]); + break; + + case I2O_SNFORMAT_WAN: /* WAN MAC Address */ + /* FIXME: Figure out what a WAN access address looks like?? */ + seq_printf(seq, "WAN Access Address"); + break; + +/* plus new in v2.0 */ + case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ + /* FIXME: Figure out what a LAN-64 address really looks like?? */ + seq_printf(seq, + "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X", + serialno[8], serialno[9], + serialno[2], serialno[3], + serialno[4], serialno[5], + serialno[6], serialno[7]); + break; + + + case I2O_SNFORMAT_DDM: /* I2O DDM */ + seq_printf(seq, + "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh", + *(u16*)&serialno[2], + *(u16*)&serialno[4], + *(u16*)&serialno[6]); + break; + + case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */ + case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */ + /* FIXME: Figure if this is even close?? */ + seq_printf(seq, + "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n", + *(u32*)&serialno[2], + *(u32*)&serialno[6], + *(u32*)&serialno[10], + *(u32*)&serialno[14]); + break; + + + case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */ + case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */ + default: + seq_printf(seq, "Unknown data format (0x%02x)", + serialno[0]); + break; + } + + return 0; +} + +/** + * i2o_get_class_name - do i2o class name lookup + * @class: class number + * + * Return a descriptive string for an i2o class + */ +static const char *i2o_get_class_name(int class) +{ + int idx = 16; + static char *i2o_class_name[] = { + "Executive", + "Device Driver Module", + "Block Device", + "Tape Device", + "LAN Interface", + "WAN Interface", + "Fibre Channel Port", + "Fibre Channel Device", + "SCSI Device", + "ATE Port", + "ATE Device", + "Floppy Controller", + "Floppy Device", + "Secondary Bus Port", + "Peer Transport Agent", + "Peer Transport", + "Unknown" + }; + + switch(class&0xfff) { + case I2O_CLASS_EXECUTIVE: + idx = 0; break; + case I2O_CLASS_DDM: + idx = 1; break; + case I2O_CLASS_RANDOM_BLOCK_STORAGE: + idx = 2; break; + case I2O_CLASS_SEQUENTIAL_STORAGE: + idx = 3; break; + case I2O_CLASS_LAN: + idx = 4; break; + case I2O_CLASS_WAN: + idx = 5; break; + case I2O_CLASS_FIBRE_CHANNEL_PORT: + idx = 6; break; + case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: + idx = 7; break; + case I2O_CLASS_SCSI_PERIPHERAL: + idx = 8; break; + case I2O_CLASS_ATE_PORT: + idx = 9; break; + case I2O_CLASS_ATE_PERIPHERAL: + idx = 10; break; + case I2O_CLASS_FLOPPY_CONTROLLER: + idx = 11; break; + case I2O_CLASS_FLOPPY_DEVICE: + idx = 12; break; + case I2O_CLASS_BUS_ADAPTER_PORT: + idx = 13; break; + case I2O_CLASS_PEER_TRANSPORT_AGENT: + idx = 14; break; + case I2O_CLASS_PEER_TRANSPORT: + idx = 15; break; + } + + return i2o_class_name[idx]; +} + +static int i2o_seq_open_hrt(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_hrt, PDE(inode)->data); +}; + +static int i2o_seq_open_lct(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_lct, PDE(inode)->data); +}; + +static int i2o_seq_open_status(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_status, PDE(inode)->data); +}; + +static int i2o_seq_open_hw(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_hw, PDE(inode)->data); +}; + +static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data); +}; + +static int i2o_seq_open_driver_store(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data); +}; + +static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data); +}; + +static int i2o_seq_open_groups(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_groups, PDE(inode)->data); +}; + +static int i2o_seq_open_phys_device(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data); +}; + +static int i2o_seq_open_claimed(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_claimed, PDE(inode)->data); +}; + +static int i2o_seq_open_users(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_users, PDE(inode)->data); +}; + +static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data); +}; + +static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_authorized_users, PDE(inode)->data); +}; + +static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data); +}; + +static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data); +}; + +static int i2o_seq_open_uinfo(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data); +}; + +static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data); +}; + +static int i2o_seq_open_sensors(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_sensors, PDE(inode)->data); +}; + +static int i2o_seq_open_dev_name(struct inode *inode, struct file *file) +{ + return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data); +}; + + +static struct file_operations i2o_seq_fops_lct = { + .open = i2o_seq_open_lct, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_hrt = { + .open = i2o_seq_open_hrt, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_status = { + .open = i2o_seq_open_status, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_hw = { + .open = i2o_seq_open_hw, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_ddm_table = { + .open = i2o_seq_open_ddm_table, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_driver_store = { + .open = i2o_seq_open_driver_store, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_drivers_stored = { + .open = i2o_seq_open_drivers_stored, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_groups = { + .open = i2o_seq_open_groups, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_phys_device = { + .open = i2o_seq_open_phys_device, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_claimed = { + .open = i2o_seq_open_claimed, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_users = { + .open = i2o_seq_open_users, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_priv_msgs = { + .open = i2o_seq_open_priv_msgs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_authorized_users = { + .open = i2o_seq_open_authorized_users, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_dev_name = { + .open = i2o_seq_open_dev_name, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_dev_identity = { + .open = i2o_seq_open_dev_identity, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_ddm_identity = { + .open = i2o_seq_open_ddm_identity, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_uinfo = { + .open = i2o_seq_open_uinfo, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_sgl_limits = { + .open = i2o_seq_open_sgl_limits, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct file_operations i2o_seq_fops_sensors = { + .open = i2o_seq_open_sensors, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +/* + * IOP specific entries...write field just in case someone + * ever wants one. + */ +static i2o_proc_entry i2o_proc_generic_iop_entries[] = +{ + {"hrt", S_IFREG|S_IRUGO, &i2o_seq_fops_hrt}, + {"lct", S_IFREG|S_IRUGO, &i2o_seq_fops_lct}, + {"status", S_IFREG|S_IRUGO, &i2o_seq_fops_status}, + {"hw", S_IFREG|S_IRUGO, &i2o_seq_fops_hw}, + {"ddm_table", S_IFREG|S_IRUGO, &i2o_seq_fops_ddm_table}, + {"driver_store", S_IFREG|S_IRUGO, &i2o_seq_fops_driver_store}, + {"drivers_stored", S_IFREG|S_IRUGO, &i2o_seq_fops_drivers_stored}, + {NULL, 0, NULL} +}; + +/* + * Device specific entries + */ +static i2o_proc_entry generic_dev_entries[] = +{ + {"groups", S_IFREG|S_IRUGO, &i2o_seq_fops_groups}, + {"phys_dev", S_IFREG|S_IRUGO, &i2o_seq_fops_phys_device}, + {"claimed", S_IFREG|S_IRUGO, &i2o_seq_fops_claimed}, + {"users", S_IFREG|S_IRUGO, &i2o_seq_fops_users}, + {"priv_msgs", S_IFREG|S_IRUGO, &i2o_seq_fops_priv_msgs}, + {"authorized_users", S_IFREG|S_IRUGO, &i2o_seq_fops_authorized_users}, + {"dev_identity", S_IFREG|S_IRUGO, &i2o_seq_fops_dev_identity}, + {"ddm_identity", S_IFREG|S_IRUGO, &i2o_seq_fops_ddm_identity}, + {"user_info", S_IFREG|S_IRUGO, &i2o_seq_fops_uinfo}, + {"sgl_limits", S_IFREG|S_IRUGO, &i2o_seq_fops_sgl_limits}, + {"sensors", S_IFREG|S_IRUGO, &i2o_seq_fops_sensors}, + {NULL, 0, NULL} +}; + +/* + * Storage unit specific entries (SCSI Periph, BS) with device names + */ +static i2o_proc_entry rbs_dev_entries[] = +{ + {"dev_name", S_IFREG|S_IRUGO, &i2o_seq_fops_dev_name}, + {NULL, 0, NULL} +}; + +#define SCSI_TABLE_SIZE 13 +static char *scsi_devices[] = +{ + "Direct-Access Read/Write", + "Sequential-Access Storage", + "Printer", + "Processor", + "WORM Device", + "CD-ROM Device", + "Scanner Device", + "Optical Memory Device", + "Medium Changer Device", + "Communications Device", + "Graphics Art Pre-Press Device", + "Graphics Art Pre-Press Device", + "Array Controller Device" +}; + + +static char *chtostr(u8 *chars, int n) +{ + char tmp[256]; + tmp[0] = 0; + return strncat(tmp, (char *)chars, n); +} + +static int i2o_report_query_status(struct seq_file *seq, int block_status, char *group) +{ + switch (block_status) + { + case -ETIMEDOUT: + return seq_printf(seq, "Timeout reading group %s.\n",group); + case -ENOMEM: + return seq_printf(seq, "No free memory to read the table.\n"); + case -I2O_PARAMS_STATUS_INVALID_GROUP_ID: + return seq_printf(seq, "Group %s not supported.\n", group); + default: + return seq_printf(seq, "Error reading group %s. BlockStatus 0x%02X\n", + group, -block_status); + } +} + +static char* bus_strings[] = +{ + "Local Bus", + "ISA", + "EISA", + "MCA", + "PCI", + "PCMCIA", + "NUBUS", + "CARDBUS" +}; + +int i2o_seq_show_hrt(struct seq_file *seq, void *v) +{ + struct i2o_controller *c = (struct i2o_controller *)seq->private; + i2o_hrt *hrt = (i2o_hrt *)c->hrt.virt; + u32 bus; + int i; + + if(hrt->hrt_version) + { + seq_printf(seq, "HRT table for controller is too new a version.\n"); + return 0; + } + + seq_printf(seq, "HRT has %d entries of %d bytes each.\n", + hrt->num_entries, hrt->entry_len << 2); + + for(i = 0; i < hrt->num_entries; i++) + { + seq_printf(seq, "Entry %d:\n", i); + seq_printf(seq, " Adapter ID: %0#10x\n", + hrt->hrt_entry[i].adapter_id); + seq_printf(seq, " Controlling tid: %0#6x\n", + hrt->hrt_entry[i].parent_tid); + + if(hrt->hrt_entry[i].bus_type != 0x80) + { + bus = hrt->hrt_entry[i].bus_type; + seq_printf(seq, " %s Information\n", bus_strings[bus]); + + switch(bus) + { + case I2O_BUS_LOCAL: + seq_printf(seq, " IOBase: %0#6x,", + hrt->hrt_entry[i].bus.local_bus.LbBaseIOPort); + seq_printf(seq, " MemoryBase: %0#10x\n", + hrt->hrt_entry[i].bus.local_bus.LbBaseMemoryAddress); + break; + + case I2O_BUS_ISA: + seq_printf(seq, " IOBase: %0#6x,", + hrt->hrt_entry[i].bus.isa_bus.IsaBaseIOPort); + seq_printf(seq, " MemoryBase: %0#10x,", + hrt->hrt_entry[i].bus.isa_bus.IsaBaseMemoryAddress); + seq_printf(seq, " CSN: %0#4x,", + hrt->hrt_entry[i].bus.isa_bus.CSN); + break; + + case I2O_BUS_EISA: + seq_printf(seq, " IOBase: %0#6x,", + hrt->hrt_entry[i].bus.eisa_bus.EisaBaseIOPort); + seq_printf(seq, " MemoryBase: %0#10x,", + hrt->hrt_entry[i].bus.eisa_bus.EisaBaseMemoryAddress); + seq_printf(seq, " Slot: %0#4x,", + hrt->hrt_entry[i].bus.eisa_bus.EisaSlotNumber); + break; + + case I2O_BUS_MCA: + seq_printf(seq, " IOBase: %0#6x,", + hrt->hrt_entry[i].bus.mca_bus.McaBaseIOPort); + seq_printf(seq, " MemoryBase: %0#10x,", + hrt->hrt_entry[i].bus.mca_bus.McaBaseMemoryAddress); + seq_printf(seq, " Slot: %0#4x,", + hrt->hrt_entry[i].bus.mca_bus.McaSlotNumber); + break; + + case I2O_BUS_PCI: + seq_printf(seq, " Bus: %0#4x", + hrt->hrt_entry[i].bus.pci_bus.PciBusNumber); + seq_printf(seq, " Dev: %0#4x", + hrt->hrt_entry[i].bus.pci_bus.PciDeviceNumber); + seq_printf(seq, " Func: %0#4x", + hrt->hrt_entry[i].bus.pci_bus.PciFunctionNumber); + seq_printf(seq, " Vendor: %0#6x", + hrt->hrt_entry[i].bus.pci_bus.PciVendorID); + seq_printf(seq, " Device: %0#6x\n", + hrt->hrt_entry[i].bus.pci_bus.PciDeviceID); + break; + + default: + seq_printf(seq, " Unsupported Bus Type\n"); + } + } + else + seq_printf(seq, " Unknown Bus Type\n"); + } + + return 0; +} + +int i2o_seq_show_lct(struct seq_file *seq, void *v) +{ + struct i2o_controller *c = (struct i2o_controller*)seq->private; + i2o_lct *lct = (i2o_lct *)c->lct; + int entries; + int i; + +#define BUS_TABLE_SIZE 3 + static char *bus_ports[] = + { + "Generic Bus", + "SCSI Bus", + "Fibre Channel Bus" + }; + + entries = (lct->table_size - 3)/9; + + seq_printf(seq, "LCT contains %d %s\n", entries, + entries == 1 ? "entry" : "entries"); + if(lct->boot_tid) + seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid); + + seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind); + + for(i = 0; i < entries; i++) + { + seq_printf(seq, "Entry %d\n", i); + seq_printf(seq, " Class, SubClass : %s", i2o_get_class_name(lct->lct_entry[i].class_id)); + + /* + * Classes which we'll print subclass info for + */ + switch(lct->lct_entry[i].class_id & 0xFFF) + { + case I2O_CLASS_RANDOM_BLOCK_STORAGE: + switch(lct->lct_entry[i].sub_class) + { + case 0x00: + seq_printf(seq, ", Direct-Access Read/Write"); + break; + + case 0x04: + seq_printf(seq, ", WORM Drive"); + break; + + case 0x05: + seq_printf(seq, ", CD-ROM Drive"); + break; + + case 0x07: + seq_printf(seq, ", Optical Memory Device"); + break; + + default: + seq_printf(seq, ", Unknown (0x%02x)", + lct->lct_entry[i].sub_class); + break; + } + break; + + case I2O_CLASS_LAN: + switch(lct->lct_entry[i].sub_class & 0xFF) + { + case 0x30: + seq_printf(seq, ", Ethernet"); + break; + + case 0x40: + seq_printf(seq, ", 100base VG"); + break; + + case 0x50: + seq_printf(seq, ", IEEE 802.5/Token-Ring"); + break; + + case 0x60: + seq_printf(seq, ", ANSI X3T9.5 FDDI"); + break; + + case 0x70: + seq_printf(seq, ", Fibre Channel"); + break; + + default: + seq_printf(seq, ", Unknown Sub-Class (0x%02x)", + lct->lct_entry[i].sub_class & 0xFF); + break; + } + break; + + case I2O_CLASS_SCSI_PERIPHERAL: + if(lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE) + seq_printf(seq, ", %s", + scsi_devices[lct->lct_entry[i].sub_class]); + else + seq_printf(seq, ", Unknown Device Type"); + break; + + case I2O_CLASS_BUS_ADAPTER_PORT: + if(lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) + seq_printf(seq, ", %s", + bus_ports[lct->lct_entry[i].sub_class]); + else + seq_printf(seq, ", Unknown Bus Type"); + break; + } + seq_printf(seq, "\n"); + + seq_printf(seq, " Local TID : 0x%03x\n", lct->lct_entry[i].tid); + seq_printf(seq, " User TID : 0x%03x\n", lct->lct_entry[i].user_tid); + seq_printf(seq, " Parent TID : 0x%03x\n", + lct->lct_entry[i].parent_tid); + seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n", + lct->lct_entry[i].identity_tag[0], + lct->lct_entry[i].identity_tag[1], + lct->lct_entry[i].identity_tag[2], + lct->lct_entry[i].identity_tag[3], + lct->lct_entry[i].identity_tag[4], + lct->lct_entry[i].identity_tag[5], + lct->lct_entry[i].identity_tag[6], + lct->lct_entry[i].identity_tag[7]); + seq_printf(seq, " Change Indicator : %0#10x\n", + lct->lct_entry[i].change_ind); + seq_printf(seq, " Event Capab Mask : %0#10x\n", + lct->lct_entry[i].device_flags); + } + + return 0; +} + +int i2o_seq_show_status(struct seq_file *seq, void *v) +{ + struct i2o_controller *c = (struct i2o_controller*)seq->private; + char prodstr[25]; + int version; + i2o_status_block *sb = c->status_block.virt; + + i2o_status_get(c); // reread the status block + + seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id); + + version = sb->i2o_version; + +/* FIXME for Spec 2.0 + if (version == 0x02) { + seq_printf(seq, "Lowest I2O version supported: "); + switch(workspace[2]) { + case 0x00: + seq_printf(seq, "1.0\n"); + break; + case 0x01: + seq_printf(seq, "1.5\n"); + break; + case 0x02: + seq_printf(seq, "2.0\n"); + break; + } + + seq_printf(seq, "Highest I2O version supported: "); + switch(workspace[3]) { + case 0x00: + seq_printf(seq, "1.0\n"); + break; + case 0x01: + seq_printf(seq, "1.5\n"); + break; + case 0x02: + seq_printf(seq, "2.0\n"); + break; + } + } +*/ + seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id); + seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id); + seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number); + + seq_printf(seq, "I2O version : "); + switch (version) { + case 0x00: + seq_printf(seq, "1.0\n"); + break; + case 0x01: + seq_printf(seq, "1.5\n"); + break; + case 0x02: + seq_printf(seq, "2.0\n"); + break; + default: + seq_printf(seq, "Unknown version\n"); + } + + seq_printf(seq, "IOP State : "); + switch (sb->iop_state) { + case 0x01: + seq_printf(seq, "INIT\n"); + break; + + case 0x02: + seq_printf(seq, "RESET\n"); + break; + + case 0x04: + seq_printf(seq, "HOLD\n"); + break; + + case 0x05: + seq_printf(seq, "READY\n"); + break; + + case 0x08: + seq_printf(seq, "OPERATIONAL\n"); + break; + + case 0x10: + seq_printf(seq, "FAILED\n"); + break; + + case 0x11: + seq_printf(seq, "FAULTED\n"); + break; + + default: + seq_printf(seq, "Unknown\n"); + break; + } + + seq_printf(seq, "Messenger Type : "); + switch (sb->msg_type) { + case 0x00: + seq_printf(seq, "Memory mapped\n"); + break; + case 0x01: + seq_printf(seq, "Memory mapped only\n"); + break; + case 0x02: + seq_printf(seq,"Remote only\n"); + break; + case 0x03: + seq_printf(seq, "Memory mapped and remote\n"); + break; + default: + seq_printf(seq, "Unknown\n"); + } + + seq_printf(seq, "Inbound Frame Size : %d bytes\n", + sb->inbound_frame_size<<2); + seq_printf(seq, "Max Inbound Frames : %d\n",sb->max_inbound_frames); + seq_printf(seq, "Current Inbound Frames : %d\n",sb->cur_inbound_frames); + seq_printf(seq, "Max Outbound Frames : %d\n", + sb->max_outbound_frames); + + /* Spec doesn't say if NULL terminated or not... */ + memcpy(prodstr, sb->product_id, 24); + prodstr[24] = '\0'; + seq_printf(seq, "Product ID : %s\n", prodstr); + seq_printf(seq, "Expected LCT Size : %d bytes\n", + sb->expected_lct_size); + + seq_printf(seq, "IOP Capabilities\n"); + seq_printf(seq, " Context Field Size Support : "); + switch (sb->iop_capabilities & 0x0000003) { + case 0: + seq_printf(seq, "Supports only 32-bit context fields\n"); + break; + case 1: + seq_printf(seq, "Supports only 64-bit context fields\n"); + break; + case 2: + seq_printf(seq, "Supports 32-bit and 64-bit context fields, " + "but not concurrently\n"); + break; + case 3: + seq_printf(seq, "Supports 32-bit and 64-bit context fields " + "concurrently\n"); + break; + default: + seq_printf(seq, "0x%08x\n",sb->iop_capabilities); + } + seq_printf(seq, " Current Context Field Size : "); + switch (sb->iop_capabilities & 0x0000000C) { + case 0: + seq_printf(seq, "not configured\n"); + break; + case 4: + seq_printf(seq, "Supports only 32-bit context fields\n"); + break; + case 8: + seq_printf(seq, "Supports only 64-bit context fields\n"); + break; + case 12: + seq_printf(seq, "Supports both 32-bit or 64-bit context fields " + "concurrently\n"); + break; + default: + seq_printf(seq, "\n"); + } + seq_printf(seq, " Inbound Peer Support : %s\n", + (sb->iop_capabilities & 0x00000010) ? "Supported" : "Not supported"); + seq_printf(seq, " Outbound Peer Support : %s\n", + (sb->iop_capabilities & 0x00000020) ? "Supported" : "Not supported"); + seq_printf(seq, " Peer to Peer Support : %s\n", + (sb->iop_capabilities & 0x00000040) ? "Supported" : "Not supported"); + + seq_printf(seq, "Desired private memory size : %d kB\n", + sb->desired_mem_size>>10); + seq_printf(seq, "Allocated private memory size : %d kB\n", + sb->current_mem_size>>10); + seq_printf(seq, "Private memory base address : %0#10x\n", + sb->current_mem_base); + seq_printf(seq, "Desired private I/O size : %d kB\n", + sb->desired_io_size>>10); + seq_printf(seq, "Allocated private I/O size : %d kB\n", + sb->current_io_size>>10); + seq_printf(seq, "Private I/O base address : %0#10x\n", + sb->current_io_base); + + return 0; +} + +int i2o_seq_show_hw(struct seq_file *seq, void *v) +{ + struct i2o_controller *c = (struct i2o_controller*)seq->private; + static u32 work32[5]; + static u8 *work8 = (u8*)work32; + static u16 *work16 = (u16*)work32; + int token; + u32 hwcap; + + static char *cpu_table[] = + { + "Intel 80960 series", + "AMD2900 series", + "Motorola 68000 series", + "ARM series", + "MIPS series", + "Sparc series", + "PowerPC series", + "Intel x86 series" + }; + + token = i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0x0000 IOP Hardware"); + return 0; + } + + seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]); + seq_printf(seq, "Product ID : %0#6x\n", work16[1]); + seq_printf(seq, "CPU : "); + if(work8[16] > 8) + seq_printf(seq, "Unknown\n"); + else + seq_printf(seq, "%s\n", cpu_table[work8[16]]); + /* Anyone using ProcessorVersion? */ + + seq_printf(seq, "RAM : %dkB\n", work32[1]>>10); + seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2]>>10); + + hwcap = work32[3]; + seq_printf(seq, "Capabilities : 0x%08x\n", hwcap); + seq_printf(seq, " [%s] Self booting\n", + (hwcap&0x00000001) ? "+" : "-"); + seq_printf(seq, " [%s] Upgradable IRTOS\n", + (hwcap&0x00000002) ? "+" : "-"); + seq_printf(seq, " [%s] Supports downloading DDMs\n", + (hwcap&0x00000004) ? "+" : "-"); + seq_printf(seq, " [%s] Supports installing DDMs\n", + (hwcap&0x00000008) ? "+" : "-"); + seq_printf(seq, " [%s] Battery-backed RAM\n", + (hwcap&0x00000010) ? "+" : "-"); + + return 0; +} + + +/* Executive group 0003h - Executing DDM List (table) */ +int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) +{ + struct i2o_controller *c = (struct i2o_controller*)seq->private; + int token; + int i; + + typedef struct _i2o_exec_execute_ddm_table { + u16 ddm_tid; + u8 module_type; + u8 reserved; + u16 i2o_vendor_id; + u16 module_id; + u8 module_name_version[28]; + u32 data_size; + u32 code_size; + } i2o_exec_execute_ddm_table; + + struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES]; + } *result; + + i2o_exec_execute_ddm_table ddm_table; + + result = kmalloc(sizeof(*result), GFP_KERNEL); + if(!result) + return -ENOMEM; + + token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1, + NULL, 0, result, sizeof(*result)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0x0003 Executing DDM List"); + goto out; + } + + seq_printf(seq, "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n"); + ddm_table=result->ddm_table[0]; + + for(i=0; i < result->row_count; ddm_table=result->ddm_table[++i]) + { + seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF); + + switch(ddm_table.module_type) + { + case 0x01: + seq_printf(seq, "Downloaded DDM "); + break; + case 0x22: + seq_printf(seq, "Embedded DDM "); + break; + default: + seq_printf(seq, " "); + } + + seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); + seq_printf(seq, "%-#8x", ddm_table.module_id); + seq_printf(seq, "%-29s", chtostr(ddm_table.module_name_version, 28)); + seq_printf(seq, "%9d ", ddm_table.data_size); + seq_printf(seq, "%8d", ddm_table.code_size); + + seq_printf(seq, "\n"); + } +out: + kfree(result); + return 0; +} + + +/* Executive group 0004h - Driver Store (scalar) */ +int i2o_seq_show_driver_store(struct seq_file *seq, void *v) +{ + struct i2o_controller *c = (struct i2o_controller*)seq->private; + u32 work32[8]; + int token; + + token = i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32)); + if (token < 0) { + i2o_report_query_status(seq, token, "0x0004 Driver Store"); + return 0; + } + + seq_printf(seq, "Module limit : %d\n" + "Module count : %d\n" + "Current space : %d kB\n" + "Free space : %d kB\n", + work32[0], work32[1], work32[2]>>10, work32[3]>>10); + + return 0; +} + + +/* Executive group 0005h - Driver Store Table (table) */ +int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) +{ + typedef struct _i2o_driver_store { + u16 stored_ddm_index; + u8 module_type; + u8 reserved; + u16 i2o_vendor_id; + u16 module_id; + u8 module_name_version[28]; + u8 date[8]; + u32 module_size; + u32 mpb_size; + u32 module_flags; + } i2o_driver_store_table; + + struct i2o_controller *c = (struct i2o_controller*)seq->private; + int token; + int i; + + typedef struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + i2o_driver_store_table dst[I2O_MAX_MODULES]; + } i2o_driver_result_table; + + i2o_driver_result_table *result; + i2o_driver_store_table *dst; + + + result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); + if(result == NULL) + return -ENOMEM; + + token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1, + NULL, 0, result, sizeof(*result)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0x0005 DRIVER STORE TABLE"); + kfree(result); + return 0; + } + + seq_printf(seq, "# Module_type Vendor Mod_id Module_name Vrs" + "Date Mod_size Par_size Flags\n"); + for(i=0, dst=&result->dst[0]; i < result->row_count; dst=&result->dst[++i]) + { + seq_printf(seq, "%-3d", dst->stored_ddm_index); + switch(dst->module_type) + { + case 0x01: + seq_printf(seq, "Downloaded DDM "); + break; + case 0x22: + seq_printf(seq, "Embedded DDM "); + break; + default: + seq_printf(seq, " "); + } + +#if 0 + if(c->i2oversion == 0x02) + seq_printf(seq, "%-d", dst->module_state); +#endif + + seq_printf(seq, "%-#7x", dst->i2o_vendor_id); + seq_printf(seq, "%-#8x", dst->module_id); + seq_printf(seq, "%-29s", chtostr(dst->module_name_version,28)); + seq_printf(seq, "%-9s", chtostr(dst->date,8)); + seq_printf(seq, "%8d ", dst->module_size); + seq_printf(seq, "%8d ", dst->mpb_size); + seq_printf(seq, "0x%04x", dst->module_flags); +#if 0 + if(c->i2oversion == 0x02) + seq_printf(seq, "%d", + dst->notification_level); +#endif + seq_printf(seq, "\n"); + } + + kfree(result); + return 0; +} + + +/* Generic group F000h - Params Descriptor (table) */ +int i2o_seq_show_groups(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + int i; + u8 properties; + + typedef struct _i2o_group_info + { + u16 group_number; + u16 field_count; + u16 row_count; + u8 properties; + u8 reserved; + } i2o_group_info; + + struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + i2o_group_info group[256]; + } *result; + + result = kmalloc(sizeof(*result), GFP_KERNEL); + if(!result) + return -ENOMEM; + + token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, + result, sizeof(*result)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0xF000 Params Descriptor"); + goto out; + } + + seq_printf(seq, "# Group FieldCount RowCount Type Add Del Clear\n"); + + for (i=0; i < result->row_count; i++) + { + seq_printf(seq, "%-3d", i); + seq_printf(seq, "0x%04X ", result->group[i].group_number); + seq_printf(seq, "%10d ", result->group[i].field_count); + seq_printf(seq, "%8d ", result->group[i].row_count); + + properties = result->group[i].properties; + if (properties & 0x1) seq_printf(seq, "Table "); + else seq_printf(seq, "Scalar "); + if (properties & 0x2) seq_printf(seq, " + "); + else seq_printf(seq, " - "); + if (properties & 0x4) seq_printf(seq, " + "); + else seq_printf(seq, " - "); + if (properties & 0x8) seq_printf(seq, " + "); + else seq_printf(seq, " - "); + + seq_printf(seq, "\n"); + } + + if (result->more_flag) + seq_printf(seq, "There is more...\n"); +out: + kfree(result); + return 0; +} + + +/* Generic group F001h - Physical Device Table (table) */ +int i2o_seq_show_phys_device(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + int i; + + struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + u32 adapter_id[64]; + } result; + + token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0, + &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0xF001 Physical Device Table"); + return 0; + } + + if (result.row_count) + seq_printf(seq, "# AdapterId\n"); + + for (i=0; i < result.row_count; i++) + { + seq_printf(seq, "%-2d", i); + seq_printf(seq, "%#7x\n", result.adapter_id[i]); + } + + if (result.more_flag) + seq_printf(seq, "There is more...\n"); + + return 0; +} + +/* Generic group F002h - Claimed Table (table) */ +int i2o_seq_show_claimed(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + int i; + + struct { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + u16 claimed_tid[64]; + } result; + + token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0, + &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0xF002 Claimed Table"); + return 0; + } + + if (result.row_count) + seq_printf(seq, "# ClaimedTid\n"); + + for (i=0; i < result.row_count; i++) + { + seq_printf(seq, "%-2d", i); + seq_printf(seq, "%#7x\n", result.claimed_tid[i]); + } + + if (result.more_flag) + seq_printf(seq, "There is more...\n"); + + return 0; +} + +/* Generic group F003h - User Table (table) */ +int i2o_seq_show_users(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + int i; + + typedef struct _i2o_user_table + { + u16 instance; + u16 user_tid; + u8 claim_type; + u8 reserved1; + u16 reserved2; + } i2o_user_table; + + struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + i2o_user_table user[64]; + } *result; + + result = kmalloc(sizeof(*result), GFP_KERNEL); + if(!result) + return -ENOMEM; + + token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0, + result, sizeof(*result)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0xF003 User Table"); + goto out; + } + + seq_printf(seq, "# Instance UserTid ClaimType\n"); + + for(i=0; i < result->row_count; i++) + { + seq_printf(seq, "%-3d", i); + seq_printf(seq, "%#8x ", result->user[i].instance); + seq_printf(seq, "%#7x ", result->user[i].user_tid); + seq_printf(seq, "%#9x\n", result->user[i].claim_type); + } + + if (result->more_flag) + seq_printf(seq, "There is more...\n"); +out: + kfree(result); + return 0; +} + +/* Generic group F005h - Private message extensions (table) (optional) */ +int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + int i; + + typedef struct _i2o_private + { + u16 ext_instance; + u16 organization_id; + u16 x_function_code; + } i2o_private; + + struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + i2o_private extension[64]; + } result; + + token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, + &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0xF005 Private Message Extensions (optional)"); + return 0; + } + + seq_printf(seq, "Instance# OrgId FunctionCode\n"); + + for(i=0; i < result.row_count; i++) + { + seq_printf(seq, "%0#9x ", result.extension[i].ext_instance); + seq_printf(seq, "%0#6x ", result.extension[i].organization_id); + seq_printf(seq, "%0#6x", result.extension[i].x_function_code); + + seq_printf(seq, "\n"); + } + + if(result.more_flag) + seq_printf(seq, "There is more...\n"); + + return 0; +} + + +/* Generic group F006h - Authorized User Table (table) */ +int i2o_seq_show_authorized_users(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + int i; + + struct + { + u16 result_count; + u16 pad; + u16 block_size; + u8 block_status; + u8 error_info_size; + u16 row_count; + u16 more_flag; + u32 alternate_tid[64]; + } result; + + token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0, + &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0xF006 Autohorized User Table"); + return 0; + } + + if (result.row_count) + seq_printf(seq, "# AlternateTid\n"); + + for(i=0; i < result.row_count; i++) + { + seq_printf(seq, "%-2d", i); + seq_printf(seq, "%#7x ", result.alternate_tid[i]); + } + + if (result.more_flag) + seq_printf(seq, "There is more...\n"); + + return 0; +} + + +/* Generic group F100h - Device Identity (scalar) */ +int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number + // == (allow) 512d bytes (max) + static u16 *work16 = (u16*)work32; + int token; + + token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0xF100 Device Identity"); + return 0; + } + + seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); + seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); + seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); + seq_printf(seq, "Vendor info : %s\n", chtostr((u8 *)(work32+2), 16)); + seq_printf(seq, "Product info : %s\n", chtostr((u8 *)(work32+6), 16)); + seq_printf(seq, "Description : %s\n", chtostr((u8 *)(work32+10), 16)); + seq_printf(seq, "Product rev. : %s\n", chtostr((u8 *)(work32+14), 8)); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, + (u8*)(work32+16), + /* allow for SNLen plus + * possible trailing '\0' + */ + sizeof(work32)-(16*sizeof(u32))-2 + ); + seq_printf(seq, "\n"); + + return 0; +} + + +int i2o_seq_show_dev_name(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + + seq_printf(seq, "%s\n", d->device.bus_id); + + return 0; +} + + +/* Generic group F101h - DDM Identity (scalar) */ +int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + + struct + { + u16 ddm_tid; + u8 module_name[24]; + u8 module_rev[8]; + u8 sn_format; + u8 serial_number[12]; + u8 pad[256]; // allow up to 256 byte (max) serial number + } result; + + token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token, "0xF101 DDM Identity"); + return 0; + } + + seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); + seq_printf(seq, "Module name : %s\n", chtostr(result.module_name, 24)); + seq_printf(seq, "Module revision : %s\n", chtostr(result.module_rev, 8)); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, result.serial_number, sizeof(result)-36); + /* allow for SNLen plus possible trailing '\0' */ + + seq_printf(seq, "\n"); + + return 0; +} + +/* Generic group F102h - User Information (scalar) */ +int i2o_seq_show_uinfo(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + + struct + { + u8 device_name[64]; + u8 service_name[64]; + u8 physical_location[64]; + u8 instance_number[4]; + } result; + + token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0xF102 User Information"); + return 0; + } + + seq_printf(seq, "Device name : %s\n", chtostr(result.device_name, 64)); + seq_printf(seq, "Service name : %s\n", chtostr(result.service_name, 64)); + seq_printf(seq, "Physical name : %s\n", chtostr(result.physical_location, 64)); + seq_printf(seq, "Instance number : %s\n", chtostr(result.instance_number, 4)); + + return 0; +} + +/* Generic group F103h - SGL Operating Limits (scalar) */ +int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + static u32 work32[12]; + static u16 *work16 = (u16 *)work32; + static u8 *work8 = (u8 *)work32; + int token; + + token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0xF103 SGL Operating Limits"); + return 0; + } + + seq_printf(seq, "SGL chain size : %d\n", work32[0]); + seq_printf(seq, "Max SGL chain size : %d\n", work32[1]); + seq_printf(seq, "SGL chain size target : %d\n", work32[2]); + seq_printf(seq, "SGL frag count : %d\n", work16[6]); + seq_printf(seq, "Max SGL frag count : %d\n", work16[7]); + seq_printf(seq, "SGL frag count target : %d\n", work16[8]); + +/* FIXME + if (d->i2oversion == 0x02) + { +*/ + seq_printf(seq, "SGL data alignment : %d\n", work16[8]); + seq_printf(seq, "SGL addr limit : %d\n", work8[20]); + seq_printf(seq, "SGL addr sizes supported : "); + if (work8[21] & 0x01) + seq_printf(seq, "32 bit "); + if (work8[21] & 0x02) + seq_printf(seq, "64 bit "); + if (work8[21] & 0x04) + seq_printf(seq, "96 bit "); + if (work8[21] & 0x08) + seq_printf(seq, "128 bit "); + seq_printf(seq, "\n"); +/* + } +*/ + + return 0; +} + +/* Generic group F200h - Sensors (scalar) */ +int i2o_seq_show_sensors(struct seq_file *seq, void *v) +{ + struct i2o_device *d = (struct i2o_device*)seq->private; + int token; + + struct + { + u16 sensor_instance; + u8 component; + u16 component_instance; + u8 sensor_class; + u8 sensor_type; + u8 scaling_exponent; + u32 actual_reading; + u32 minimum_reading; + u32 low2lowcat_treshold; + u32 lowcat2low_treshold; + u32 lowwarn2low_treshold; + u32 low2lowwarn_treshold; + u32 norm2lowwarn_treshold; + u32 lowwarn2norm_treshold; + u32 nominal_reading; + u32 hiwarn2norm_treshold; + u32 norm2hiwarn_treshold; + u32 high2hiwarn_treshold; + u32 hiwarn2high_treshold; + u32 hicat2high_treshold; + u32 hi2hicat_treshold; + u32 maximum_reading; + u8 sensor_state; + u16 event_enable; + } result; + + token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result)); + + if (token < 0) { + i2o_report_query_status(seq, token,"0xF200 Sensors (optional)"); + return 0; + } + + seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance); + + seq_printf(seq, "Component : %d = ", result.component); + switch (result.component) + { + case 0: seq_printf(seq, "Other"); + break; + case 1: seq_printf(seq, "Planar logic Board"); + break; + case 2: seq_printf(seq, "CPU"); + break; + case 3: seq_printf(seq, "Chassis"); + break; + case 4: seq_printf(seq, "Power Supply"); + break; + case 5: seq_printf(seq, "Storage"); + break; + case 6: seq_printf(seq, "External"); + break; + } + seq_printf(seq,"\n"); + + seq_printf(seq, "Component instance : %d\n", result.component_instance); + seq_printf(seq, "Sensor class : %s\n", + result.sensor_class ? "Analog" : "Digital"); + + seq_printf(seq, "Sensor type : %d = ",result.sensor_type); + switch (result.sensor_type) + { + case 0: seq_printf(seq, "Other\n"); + break; + case 1: seq_printf(seq, "Thermal\n"); + break; + case 2: seq_printf(seq, "DC voltage (DC volts)\n"); + break; + case 3: seq_printf(seq, "AC voltage (AC volts)\n"); + break; + case 4: seq_printf(seq, "DC current (DC amps)\n"); + break; + case 5: seq_printf(seq, "AC current (AC volts)\n"); + break; + case 6: seq_printf(seq, "Door open\n"); + break; + case 7: seq_printf(seq, "Fan operational\n"); + break; + } + + seq_printf(seq, "Scaling exponent : %d\n", result.scaling_exponent); + seq_printf(seq, "Actual reading : %d\n", result.actual_reading); + seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading); + seq_printf(seq, "Low2LowCat treshold : %d\n", result.low2lowcat_treshold); + seq_printf(seq, "LowCat2Low treshold : %d\n", result.lowcat2low_treshold); + seq_printf(seq, "LowWarn2Low treshold : %d\n", result.lowwarn2low_treshold); + seq_printf(seq, "Low2LowWarn treshold : %d\n", result.low2lowwarn_treshold); + seq_printf(seq, "Norm2LowWarn treshold : %d\n", result.norm2lowwarn_treshold); + seq_printf(seq, "LowWarn2Norm treshold : %d\n", result.lowwarn2norm_treshold); + seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading); + seq_printf(seq, "HiWarn2Norm treshold : %d\n", result.hiwarn2norm_treshold); + seq_printf(seq, "Norm2HiWarn treshold : %d\n", result.norm2hiwarn_treshold); + seq_printf(seq, "High2HiWarn treshold : %d\n", result.high2hiwarn_treshold); + seq_printf(seq, "HiWarn2High treshold : %d\n", result.hiwarn2high_treshold); + seq_printf(seq, "HiCat2High treshold : %d\n", result.hicat2high_treshold); + seq_printf(seq, "High2HiCat treshold : %d\n", result.hi2hicat_treshold); + seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading); + + seq_printf(seq, "Sensor state : %d = ", result.sensor_state); + switch (result.sensor_state) + { + case 0: seq_printf(seq, "Normal\n"); + break; + case 1: seq_printf(seq, "Abnormal\n"); + break; + case 2: seq_printf(seq, "Unknown\n"); + break; + case 3: seq_printf(seq, "Low Catastrophic (LoCat)\n"); + break; + case 4: seq_printf(seq, "Low (Low)\n"); + break; + case 5: seq_printf(seq, "Low Warning (LoWarn)\n"); + break; + case 6: seq_printf(seq, "High Warning (HiWarn)\n"); + break; + case 7: seq_printf(seq, "High (High)\n"); + break; + case 8: seq_printf(seq, "High Catastrophic (HiCat)\n"); + break; + } + + seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable); + seq_printf(seq, " [%s] Operational state change. \n", + (result.event_enable & 0x01) ? "+" : "-" ); + seq_printf(seq, " [%s] Low catastrophic. \n", + (result.event_enable & 0x02) ? "+" : "-" ); + seq_printf(seq, " [%s] Low reading. \n", + (result.event_enable & 0x04) ? "+" : "-" ); + seq_printf(seq, " [%s] Low warning. \n", + (result.event_enable & 0x08) ? "+" : "-" ); + seq_printf(seq, " [%s] Change back to normal from out of range state. \n", + (result.event_enable & 0x10) ? "+" : "-" ); + seq_printf(seq, " [%s] High warning. \n", + (result.event_enable & 0x20) ? "+" : "-" ); + seq_printf(seq, " [%s] High reading. \n", + (result.event_enable & 0x40) ? "+" : "-" ); + seq_printf(seq, " [%s] High catastrophic. \n", + (result.event_enable & 0x80) ? "+" : "-" ); + + return 0; +} + +/** + * i2o_proc_create_entries - Creates proc dir entries + * @dir: proc dir entry under which the entries should be placed + * @i2o_pe: pointer to the entries which should be added + * @data: pointer to I2O controller or device + * + * Create proc dir entries for a I2O controller or I2O device. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_proc_create_entries(struct proc_dir_entry *dir, + i2o_proc_entry *i2o_pe, void *data) +{ + struct proc_dir_entry *tmp; + + while(i2o_pe->name) { + tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir); + if(!tmp) + return -1; + + tmp->data = data; + tmp->proc_fops = i2o_pe->fops; + + i2o_pe++; + } + + return 0; +} + +/** + * i2o_proc_subdir_remove - Remove child entries from a proc entry + * @dir: proc dir entry from which the childs should be removed + * + * Iterate over each i2o proc entry under dir and remove it. If the child + * also has entries, remove them too. + */ +static void i2o_proc_subdir_remove(struct proc_dir_entry *dir) +{ + struct proc_dir_entry *pe, *tmp; + pe=dir->subdir; + while(pe) { + tmp = pe->next; + i2o_proc_subdir_remove(pe); + remove_proc_entry(pe->name, dir); + pe = tmp; + } +}; + +/** + * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree + * @dir: parent proc dir entry + * @c: I2O controller which should be added + * + * Add the entries to the parent proc dir entry. Also each device is added + * to the controllers proc dir entry. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_proc_iop_add(struct proc_dir_entry *dir,struct i2o_controller *c) +{ + struct proc_dir_entry *iopdir; + struct i2o_device *dev; + char buff[10]; + + snprintf(buff, 10, "iop%d", c->unit); + + DBG("Adding IOP /proc/i2o/%s\n", buff); + + iopdir = proc_mkdir(buff, dir); + if(!iopdir) + return -1; + + iopdir->data = c; + + i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c); + + list_for_each_entry(dev, &c->devices, list) + i2o_proc_device_add(iopdir, dev); + + return 0; +} + +/** + * i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree + * @dir: parent proc dir entry + * @c: I2O controller which should be removed + * + * Iterate over each i2o proc entry and search controller c. If it is found + * remove it from the tree. + */ +static void i2o_proc_iop_remove(struct proc_dir_entry *dir, + struct i2o_controller *c) +{ + struct proc_dir_entry *pe, *tmp; + + pe=dir->subdir; + while(pe) { + tmp = pe->next; + if(pe->data == c) { + i2o_proc_subdir_remove(pe); + remove_proc_entry(pe->name, dir); + } + DBG("Removing IOP /proc/i2o/iop%d\n", c->unit); + pe = tmp; + } +} + +/** + * i2o_proc_device_add - Add an I2O device to the proc dir + * @dir: proc dir entry to which the device should be added + * @dev: I2O device which should be added + * + * Add an I2O device to the proc dir entry dir and create the entries for + * the device depending on the class of the I2O device. + */ +static void i2o_proc_device_add(struct proc_dir_entry *dir, + struct i2o_device *dev) +{ + char buff[10]; + struct proc_dir_entry *devdir; + i2o_proc_entry *i2o_pe = NULL; + + sprintf(buff, "%03x", dev->lct_data.tid); + + DBG("Adding device /proc/i2o/iop%d/%s\n", dev->iop->unit, buff); + + devdir = proc_mkdir(buff, dir); + if(!devdir) { + printk(KERN_WARNING "i2o: Could not allocate procdir!\n"); + return; + } + + devdir->data = dev; + + i2o_proc_create_entries(devdir, generic_dev_entries, dev); + + /* Inform core that we want updates about this device's status */ + switch(dev->lct_data.class_id) { + case I2O_CLASS_SCSI_PERIPHERAL: + case I2O_CLASS_RANDOM_BLOCK_STORAGE: + i2o_pe = rbs_dev_entries; + break; + default: + break; + } + if(i2o_pe) + i2o_proc_create_entries(devdir, i2o_pe, dev); +} + +/** + * i2o_proc_fs_create - Create the i2o proc fs. + * + * Iterate over each I2O controller and create the entries for it. + * + * Returns 0 on success or negative error code on failure. + */ +static int __init i2o_proc_fs_create(void) +{ + struct i2o_controller *c; + + i2o_proc_dir_root = proc_mkdir("i2o", 0); + if(!i2o_proc_dir_root) + return -1; + + i2o_proc_dir_root->owner = THIS_MODULE; + + list_for_each_entry(c, &i2o_controllers, list) + i2o_proc_iop_add(i2o_proc_dir_root, c); + + return 0; +}; + +/** + * i2o_proc_fs_destroy - Cleanup the all i2o proc entries + * + * Iterate over each I2O controller and remove the entries for it. + * + * Returns 0 on success or negative error code on failure. + */ +static int __exit i2o_proc_fs_destroy(void) +{ + struct i2o_controller *c; + + list_for_each_entry(c, &i2o_controllers, list) + i2o_proc_iop_remove(i2o_proc_dir_root, c); + + remove_proc_entry("i2o", 0); + + return 0; +}; + +/** + * i2o_proc_init - Init function for procfs + * + * Registers Proc OSM and creates procfs entries. + * + * Returns 0 on success or negative error code on failure. + */ +static int __init i2o_proc_init(void) +{ + int rc; + + rc = i2o_driver_register(&i2o_proc_driver); + if(rc) + return rc; + + rc = i2o_proc_fs_create(); + if(rc) { + i2o_driver_unregister(&i2o_proc_driver); + return rc; + } + + return 0; +}; + +/** + * i2o_proc_exit - Exit function for procfs + * + * Unregisters Proc OSM and removes procfs entries. + */ +static void __exit i2o_proc_exit(void) +{ + i2o_driver_unregister(&i2o_proc_driver); + i2o_proc_fs_destroy(); +}; + +MODULE_AUTHOR("Deepak Saxena"); +MODULE_DESCRIPTION("I2O procfs Handler"); +MODULE_LICENSE("GPL"); + +module_init(i2o_proc_init); +module_exit(i2o_proc_exit); diff -L drivers/message/i2o/README -puN drivers/message/i2o/README~i2o-build_99 /dev/null --- 25/drivers/message/i2o/README +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,98 +0,0 @@ - - Linux I2O Support (c) Copyright 1999 Red Hat Software - and others. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version - 2 of the License, or (at your option) any later version. - -AUTHORS (so far) - -Alan Cox, Building Number Three Ltd. - Core code, SCSI and Block OSMs - -Steve Ralston, LSI Logic Corp. - Debugging SCSI and Block OSM - -Deepak Saxena, Intel Corp. - Various core/block extensions - /proc interface, bug fixes - Ioctl interfaces for control - Debugging LAN OSM - -Philip Rumpf - Fixed assorted dumb SMP locking bugs - -Juha Sievanen, University of Helsinki Finland - LAN OSM code - /proc interface to LAN class - Bug fixes - Core code extensions - -Auvo Häkkinen, University of Helsinki Finland - LAN OSM code - /Proc interface to LAN class - Bug fixes - Core code extensions - -Taneli Vähäkangas, University of Helsinki Finland - Fixes to i2o_config - -CREDITS - - This work was made possible by - -Red Hat Software - Funding for the Building #3 part of the project - -Symbios Logic (Now LSI) - Host adapters, hints, known to work platforms when I hit - compatibility problems - -BoxHill Corporation - Loan of initial FibreChannel disk array used for development work. - -European Comission - Funding the work done by the University of Helsinki - -SysKonnect - Loan of FDDI and Gigabit Ethernet cards - -ASUSTeK - Loan of I2O motherboard - -STATUS: - -o The core setup works within limits. -o The scsi layer seems to almost work. - I'm still chasing down the hang bug. -o The block OSM is mostly functional -o LAN OSM works with FDDI and Ethernet cards. - -TO DO: - -General: -o Provide hidden address space if asked -o Long term message flow control -o PCI IOP's without interrupts are not supported yet -o Push FAIL handling into the core -o DDM control interfaces for module load etc -o Add I2O 2.0 support (Deffered to 2.5 kernel) - -Block: -o Multiple major numbers -o Read ahead and cache handling stuff. Talk to Ingo and people -o Power management -o Finish Media changers - -SCSI: -o Find the right way to associate drives/luns/busses - -Lan: -o Performance tuning -o Test Fibre Channel code - -Tape: -o Anyone seen anything implementing this ? - (D.S: Will attempt to do so if spare cycles permit) diff -L drivers/message/i2o/README.ioctl -puN drivers/message/i2o/README.ioctl~i2o-build_99 /dev/null --- 25/drivers/message/i2o/README.ioctl +++ /dev/null Thu Apr 11 07:25:15 2002 @@ -1,394 +0,0 @@ - -Linux I2O User Space Interface -rev 0.3 - 04/20/99 - -============================================================================= -Originally written by Deepak Saxena(deepak@plexity.net) -Currently maintained by Deepak Saxena(deepak@plexity.net) -============================================================================= - -I. Introduction - -The Linux I2O subsystem provides a set of ioctl() commands that can be -utilized by user space applications to communicate with IOPs and devices -on individual IOPs. This document defines the specific ioctl() commands -that are available to the user and provides examples of their uses. - -This document assumes the reader is familiar with or has access to the -I2O specification as no I2O message parameters are outlined. For information -on the specification, see http://www.i2osig.org - -This document and the I2O user space interface are currently maintained -by Deepak Saxena. Please send all comments, errata, and bug fixes to -deepak@csociety.purdue.edu - -II. IOP Access - -Access to the I2O subsystem is provided through the device file named -/dev/i2o/ctl. This file is a character file with major number 10 and minor -number 166. It can be created through the following command: - - mknod /dev/i2o/ctl c 10 166 - -III. Determining the IOP Count - - SYNOPSIS - - ioctl(fd, I2OGETIOPS, int *count); - - u8 count[MAX_I2O_CONTROLLERS]; - - DESCRIPTION - - This function returns the system's active IOP table. count should - point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon - returning, each entry will contain a non-zero value if the given - IOP unit is active, and NULL if it is inactive or non-existent. - - RETURN VALUE. - - Returns 0 if no errors occur, and -1 otherwise. If an error occurs, - errno is set appropriately: - - EFAULT Invalid user space pointer was passed - -IV. Getting Hardware Resource Table - - SYNOPSIS - - ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt); - - struct i2o_cmd_hrtlct - { - u32 iop; /* IOP unit number */ - void *resbuf; /* Buffer for result */ - u32 *reslen; /* Buffer length in bytes */ - }; - - DESCRIPTION - - This function returns the Hardware Resource Table of the IOP specified - by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of - the data is written into *(hrt->reslen). - - RETURNS - - This function returns 0 if no errors occur. If an error occurs, -1 - is returned and errno is set appropriately: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ENOBUFS Buffer not large enough. If this occurs, the required - buffer length is written into *(hrt->reslen) - -V. Getting Logical Configuration Table - - SYNOPSIS - - ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct); - - struct i2o_cmd_hrtlct - { - u32 iop; /* IOP unit number */ - void *resbuf; /* Buffer for result */ - u32 *reslen; /* Buffer length in bytes */ - }; - - DESCRIPTION - - This function returns the Logical Configuration Table of the IOP specified - by lct->iop in the buffer pointed to by lct->resbuf. The actual size of - the data is written into *(lct->reslen). - - RETURNS - - This function returns 0 if no errors occur. If an error occurs, -1 - is returned and errno is set appropriately: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ENOBUFS Buffer not large enough. If this occurs, the required - buffer length is written into *(lct->reslen) - -VI. Settting Parameters - - SYNOPSIS - - ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops); - - struct i2o_cmd_psetget - { - u32 iop; /* IOP unit number */ - u32 tid; /* Target device TID */ - void *opbuf; /* Operation List buffer */ - u32 oplen; /* Operation List buffer length in bytes */ - void *resbuf; /* Result List buffer */ - u32 *reslen; /* Result List buffer length in bytes */ - }; - - DESCRIPTION - - This function posts a UtilParamsSet message to the device identified - by ops->iop and ops->tid. The operation list for the message is - sent through the ops->opbuf buffer, and the result list is written - into the buffer pointed to by ops->resbuf. The number of bytes - written is placed into *(ops->reslen). - - RETURNS - - The return value is the size in bytes of the data written into - ops->resbuf if no errors occur. If an error occurs, -1 is returned - and errno is set appropriatly: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ENOBUFS Buffer not large enough. If this occurs, the required - buffer length is written into *(ops->reslen) - ETIMEDOUT Timeout waiting for reply message - ENOMEM Kernel memory allocation error - - A return value of 0 does not mean that the value was actually - changed properly on the IOP. The user should check the result - list to determine the specific status of the transaction. - -VII. Getting Parameters - - SYNOPSIS - - ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops); - - struct i2o_parm_setget - { - u32 iop; /* IOP unit number */ - u32 tid; /* Target device TID */ - void *opbuf; /* Operation List buffer */ - u32 oplen; /* Operation List buffer length in bytes */ - void *resbuf; /* Result List buffer */ - u32 *reslen; /* Result List buffer length in bytes */ - }; - - DESCRIPTION - - This function posts a UtilParamsGet message to the device identified - by ops->iop and ops->tid. The operation list for the message is - sent through the ops->opbuf buffer, and the result list is written - into the buffer pointed to by ops->resbuf. The actual size of data - written is placed into *(ops->reslen). - - RETURNS - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ENOBUFS Buffer not large enough. If this occurs, the required - buffer length is written into *(ops->reslen) - ETIMEDOUT Timeout waiting for reply message - ENOMEM Kernel memory allocation error - - A return value of 0 does not mean that the value was actually - properly retreived. The user should check the result list - to determine the specific status of the transaction. - -VIII. Downloading Software - - SYNOPSIS - - ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw); - - struct i2o_sw_xfer - { - u32 iop; /* IOP unit number */ - u8 flags; /* DownloadFlags field */ - u8 sw_type; /* Software type */ - u32 sw_id; /* Software ID */ - void *buf; /* Pointer to software buffer */ - u32 *swlen; /* Length of software buffer */ - u32 *maxfrag; /* Number of fragments */ - u32 *curfrag; /* Current fragment number */ - }; - - DESCRIPTION - - This function downloads a software fragment pointed by sw->buf - to the iop identified by sw->iop. The DownloadFlags, SwID, SwType - and SwSize fields of the ExecSwDownload message are filled in with - the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen). - - The fragments _must_ be sent in order and be 8K in size. The last - fragment _may_ be shorter, however. The kernel will compute its - size based on information in the sw->swlen field. - - Please note that SW transfers can take a long time. - - RETURNS - - This function returns 0 no errors occur. If an error occurs, -1 - is returned and errno is set appropriatly: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ETIMEDOUT Timeout waiting for reply message - ENOMEM Kernel memory allocation error - -IX. Uploading Software - - SYNOPSIS - - ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw); - - struct i2o_sw_xfer - { - u32 iop; /* IOP unit number */ - u8 flags; /* UploadFlags */ - u8 sw_type; /* Software type */ - u32 sw_id; /* Software ID */ - void *buf; /* Pointer to software buffer */ - u32 *swlen; /* Length of software buffer */ - u32 *maxfrag; /* Number of fragments */ - u32 *curfrag; /* Current fragment number */ - }; - - DESCRIPTION - - This function uploads a software fragment from the IOP identified - by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields. - The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload - message are filled in with the values of sw->flags, sw->sw_id, - sw->sw_type and *(sw->swlen). - - The fragments _must_ be requested in order and be 8K in size. The - user is responsible for allocating memory pointed by sw->buf. The - last fragment _may_ be shorter. - - Please note that SW transfers can take a long time. - - RETURNS - - This function returns 0 if no errors occur. If an error occurs, -1 - is returned and errno is set appropriatly: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ETIMEDOUT Timeout waiting for reply message - ENOMEM Kernel memory allocation error - -X. Removing Software - - SYNOPSIS - - ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw); - - struct i2o_sw_xfer - { - u32 iop; /* IOP unit number */ - u8 flags; /* RemoveFlags */ - u8 sw_type; /* Software type */ - u32 sw_id; /* Software ID */ - void *buf; /* Unused */ - u32 *swlen; /* Length of the software data */ - u32 *maxfrag; /* Unused */ - u32 *curfrag; /* Unused */ - }; - - DESCRIPTION - - This function removes software from the IOP identified by sw->iop. - The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message - are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and - *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses - *(sw->swlen) value to verify correct identication of the module to remove. - The actual size of the module is written into *(sw->swlen). - - RETURNS - - This function returns 0 if no errors occur. If an error occurs, -1 - is returned and errno is set appropriatly: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ETIMEDOUT Timeout waiting for reply message - ENOMEM Kernel memory allocation error - -X. Validating Configuration - - SYNOPSIS - - ioctl(fd, I2OVALIDATE, int *iop); - u32 iop; - - DESCRIPTION - - This function posts an ExecConfigValidate message to the controller - identified by iop. This message indicates that the current - configuration is accepted. The iop changes the status of suspect drivers - to valid and may delete old drivers from its store. - - RETURNS - - This function returns 0 if no erro occur. If an error occurs, -1 is - returned and errno is set appropriatly: - - ETIMEDOUT Timeout waiting for reply message - ENXIO Invalid IOP number - -XI. Configuration Dialog - - SYNOPSIS - - ioctl(fd, I2OHTML, struct i2o_html *htquery); - struct i2o_html - { - u32 iop; /* IOP unit number */ - u32 tid; /* Target device ID */ - u32 page; /* HTML page */ - void *resbuf; /* Buffer for reply HTML page */ - u32 *reslen; /* Length in bytes of reply buffer */ - void *qbuf; /* Pointer to HTTP query string */ - u32 qlen; /* Length in bytes of query string buffer */ - }; - - DESCRIPTION - - This function posts an UtilConfigDialog message to the device identified - by htquery->iop and htquery->tid. The requested HTML page number is - provided by the htquery->page field, and the resultant data is stored - in the buffer pointed to by htquery->resbuf. If there is an HTTP query - string that is to be sent to the device, it should be sent in the buffer - pointed to by htquery->qbuf. If there is no query string, this field - should be set to NULL. The actual size of the reply received is written - into *(htquery->reslen). - - RETURNS - - This function returns 0 if no error occur. If an error occurs, -1 - is returned and errno is set appropriatly: - - EFAULT Invalid user space pointer was passed - ENXIO Invalid IOP number - ENOBUFS Buffer not large enough. If this occurs, the required - buffer length is written into *(ops->reslen) - ETIMEDOUT Timeout waiting for reply message - ENOMEM Kernel memory allocation error - -XII. Events - - In the process of determining this. Current idea is to have use - the select() interface to allow user apps to periodically poll - the /dev/i2o/ctl device for events. When select() notifies the user - that an event is available, the user would call read() to retrieve - a list of all the events that are pending for the specific device. - -============================================================================= -Revision History -============================================================================= - -Rev 0.1 - 04/01/99 -- Initial revision - -Rev 0.2 - 04/06/99 -- Changed return values to match UNIX ioctl() standard. Only return values - are 0 and -1. All errors are reported through errno. -- Added summary of proposed possible event interfaces - -Rev 0.3 - 04/20/99 -- Changed all ioctls() to use pointers to user data instead of actual data -- Updated error values to match the code diff -puN /dev/null drivers/message/i2o/scsi-osm.c --- /dev/null Thu Apr 11 07:25:15 2002 +++ 25-akpm/drivers/message/i2o/scsi-osm.c Tue Jul 27 14:06:32 2004 @@ -0,0 +1,1012 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * For the avoidance of doubt the "preferred form" of this code is one which + * is in an open non patent encumbered format. Where cryptographic key signing + * forms part of the process of creating an executable the information + * including keys needed to generate an equivalently functional executable + * are deemed to be part of the source code. + * + * Complications for I2O scsi + * + * o Each (bus,lun) is a logical device in I2O. We keep a map + * table. We spoof failed selection for unmapped units + * o Request sense buffers can come back for free. + * o Scatter gather is a bit dynamic. We have to investigate at + * setup time. + * o Some of our resources are dynamically shared. The i2o core + * needs a message reservation protocol to avoid swap v net + * deadlocking. We need to back off queue requests. + * + * In general the firmware wants to help. Where its help isn't performance + * useful we just ignore the aid. Its not worth the code in truth. + * + * Fixes/additions: + * Steve Ralston: + * Scatter gather now works + * Markus Lidel : + * Minor fixes for 2.6. + * + * To Do: + * 64bit cleanups + * Fix the resource management problems. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi/scsi.h" +#include "scsi/scsi_host.h" +#include "scsi/scsi_device.h" +#include "scsi/scsi_cmnd.h" + + + +#define VERSION_STRING "Version 0.1.2" + +#undef DEBUG +//#define DEBUG 1 + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + + +static int i2o_scsi_max_id = 16; +static int i2o_scsi_max_lun = 8; + +static LIST_HEAD(i2o_scsi_hosts); + +struct i2o_scsi_host { + struct list_head list; /* node in in i2o_scsi_hosts */ + struct Scsi_Host *scsi_host; /* pointer to the SCSI host */ + struct i2o_controller *iop; /* pointer to the I2O controller */ + struct i2o_device *channel[]; /* channel->i2o_dev mapping table */ +}; + +/* + * This is only needed, because we can only set the hostdata after the device is + * added to the scsi core. So we need this little workaround. + */ +static DECLARE_MUTEX(i2o_scsi_probe_lock); +static struct i2o_device *i2o_scsi_probe_dev = NULL; + +static int i2o_scsi_slave_alloc(struct scsi_device *sdp) { + sdp->hostdata = i2o_scsi_probe_dev; + return 0; +}; + +static int i2o_scsi_probe(struct device *); +static int i2o_scsi_queuecommand(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)); +static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c); +static int i2o_scsi_remove(struct device *dev); +static int i2o_scsi_reply(struct i2o_controller *c, u32 m, + struct i2o_message *msg); + +#define I2O_SCSI_CAN_QUEUE 4 + +/* SCSI OSM class handling definition */ +static struct i2o_class_id i2o_scsi_class_id[] = { + { I2O_CLASS_SCSI_PERIPHERAL }, + { I2O_CLASS_END } +}; + +/* SCSI OSM driver struct */ +static struct i2o_driver i2o_scsi_driver = { + .name = "scsi-osm", + .reply = i2o_scsi_reply, + .classes = i2o_scsi_class_id, + .driver.probe = i2o_scsi_probe, + .driver.remove = i2o_scsi_remove +}; + +/** + * i2o_scsi_remove - Remove I2O device from SCSI core + * @dev: device which should be removed + * + * Removes the I2O device from the SCSI core again. + * + * Returns 0 on success. + */ +static int i2o_scsi_remove(struct device *dev) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + struct i2o_controller *c = i2o_dev->iop; + struct i2o_scsi_host *i2o_shost; + struct scsi_device *scsi_dev; + + i2o_shost = i2o_scsi_get_host(c); + + shost_for_each_device(scsi_dev, i2o_shost->scsi_host) + if(scsi_dev->hostdata == i2o_dev) { + scsi_remove_device(scsi_dev); + scsi_device_put(scsi_dev); + break; + } + + return 0; +}; + +/** + * i2o_scsi_probe - verify if dev is a I2O SCSI device and install it + * @dev: device to verify if it is a I2O SCSI device + * + * Retrieve channel, id and lun for I2O device. If everthing goes well + * register the I2O device as SCSI device on the I2O SCSI controller. + * + * Returns 0 on success or negative error code on failure. + */ +static int i2o_scsi_probe(struct device *dev) +{ + struct i2o_device *i2o_dev = to_i2o_device(dev); + struct i2o_controller *c = i2o_dev->iop; + struct i2o_scsi_host *i2o_shost; + struct Scsi_Host *scsi_host; + struct i2o_device *parent; + struct scsi_device *scsi_dev; + u32 id; + u64 lun; + int channel = -1; + int i; + + i2o_shost = i2o_scsi_get_host(c); + if(IS_ERR(i2o_shost)) + return PTR_ERR(i2o_shost); + + scsi_host = i2o_shost->scsi_host; + + if(i2o_parm_field_get(i2o_dev, 0, 3, &id, 4) < 0) + return -EFAULT; + + if(id >= scsi_host->max_id) { + printk(KERN_WARNING "scsi-osm: SCSI device id (%d) >= max_id " + "of I2O host (%d)", id, scsi_host->max_id); + return -EFAULT; + } + + if(i2o_parm_field_get(i2o_dev, 0, 4, &lun, 8) < 0) + return -EFAULT; + if(lun >= scsi_host->max_lun) { + printk(KERN_WARNING "scsi-osm: SCSI device id (%d) >= max_lun " + "of I2O host (%d)", (unsigned int)lun, + scsi_host->max_lun); + return -EFAULT; + } + + parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); + if(!parent) { + printk(KERN_WARNING "scsi-osm: can not find parent of device " + "%03x\n", i2o_dev->lct_data.tid); + return -EFAULT; + } + + for(i = 0; i <= i2o_shost->scsi_host->max_channel; i ++) + if(i2o_shost->channel[i] == parent) + channel = i; + + if(channel == -1) { + printk(KERN_WARNING "scsi-osm: can not find channel of device " + "%03x\n", i2o_dev->lct_data.tid); + return -EFAULT; + } + + down_interruptible(&i2o_scsi_probe_lock); + i2o_scsi_probe_dev = i2o_dev; + scsi_dev = scsi_add_device(i2o_shost->scsi_host, channel, id, lun); + i2o_scsi_probe_dev = NULL; + up(&i2o_scsi_probe_lock); + + if(!scsi_dev) { + printk(KERN_WARNING "scsi-osm: can not add SCSI device " + "%03x\n", i2o_dev->lct_data.tid); + return -EFAULT; + } + + DBG("Added new SCSI device %03x (cannel: %d, id: %d, lun: %d)\n", + i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); + + return 0; +}; + +static const char *i2o_scsi_info(struct Scsi_Host *SChost) +{ + struct i2o_scsi_host *hostdata; + hostdata = (struct i2o_scsi_host *)SChost->hostdata; + return hostdata->iop->name; +} + +#if 0 +/** + * i2o_retry_run - retry on timeout + * @f: unused + * + * Retry congested frames. This actually needs pushing down into + * i2o core. We should only bother the OSM with this when we can't + * queue and retry the frame. Or perhaps we should call the OSM + * and its default handler should be this in the core, and this + * call a 2nd "I give up" handler in the OSM ? + */ + +static void i2o_retry_run(unsigned long f) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&retry_lock, flags); + for(i=0;i 0 + * on success and if the reply should be flushed. Returns negative error + * code on failure and if the reply should be flushed. + */ +static int i2o_scsi_reply(struct i2o_controller *c, u32 m, + struct i2o_message *msg) +{ + struct scsi_cmnd *cmd; + struct device *dev; + u8 as,ds,st; + + cmd = i2o_cntxt_list_get(c, readl(&msg->tcntxt)); + + if(msg->head[0] & (1<<13)) { + struct i2o_message *pmsg; /* preserved message */ + u32 pm; + + pm = readl(&msg->body[3]); + + pmsg = c->in_queue.virt + pm; + + printk("IOP fail.\n"); + printk("From %d To %d Cmd %d.\n", + (msg->head[1]>>12)&0xFFF, + msg->head[1]&0xFFF, + msg->head[1]>>24); + printk("Failure Code %d.\n", msg->body[0]>>24); + if(msg->body[0]&(1<<16)) + printk("Format error.\n"); + if(msg->body[0]&(1<<17)) + printk("Path error.\n"); + if(msg->body[0]&(1<<18)) + printk("Path State.\n"); + if(msg->body[0]&(1<<18)) + printk("Congestion.\n"); + + printk("Failing message is %p.\n", pmsg); + + cmd = i2o_cntxt_list_get(c, readl(&pmsg->tcntxt)); + if(!cmd) + return 1; + + printk("Aborted %ld\n", cmd->serial_number); + cmd->result = DID_ERROR << 16; + cmd->scsi_done(cmd); + + /* Now flush the message by making it a NOP */ + i2o_msg_nop(c, pm); + + return 1; + } + + /* + * Low byte is device status, next is adapter status, + * (then one byte reserved), then request status. + */ + ds=(u8)readl(&msg->body[0]); + as=(u8)(readl(&msg->body[0])>>8); + st=(u8)(readl(&msg->body[0])>>24); + + + /* + * Is this a control request coming back - eg an abort ? + */ + + if(!cmd) { + if(st) + printk(KERN_WARNING "SCSI abort: %08X", readl(&msg->body[0])); + printk(KERN_INFO "SCSI abort completed.\n"); + return -EFAULT; + } + + DBG("Completed %ld\n", cmd->serial_number); + + if(st) { + u32 count, error; + /* An error has occurred */ + + switch(st) { + case 0x06: + count = readl(&msg->body[1]); + if(count < cmd->underflow) { + int i; + printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X" + "\n", count, cmd->underflow); + printk("Cmd: "); + for(i=0;i<15;i++) + printk("%02X ", cmd->cmnd[i]); + printk(".\n"); + cmd->result = (DID_ERROR << 16); + } + break; + + default: + error = readl(&msg->body[0]); + + printk(KERN_ERR "scsi-osm: SCSI error %08x\n", error); + + if((error & 0xff) == 0x02 /*CHECK_CONDITION*/) { + int i; + u32 len = sizeof(cmd->sense_buffer); + len = (len > 40) ? 40 : len; + // Copy over the sense data + memcpy(cmd->sense_buffer, (void*)&msg->body[3], + len); + for(i = 0; i <= len; i++) + printk(KERN_INFO "%02x\n", cmd->sense_buffer[i]); + if(cmd->sense_buffer[0] == 0x70 && + cmd->sense_buffer[2] == DATA_PROTECT ){ + /* This is to handle an array failed */ + cmd->result = (DID_TIME_OUT << 16); + printk(KERN_WARNING"%s: SCSI Data " + "Protect-Device (%d,%d,%d) " + "hba_status=0x%x, dev_status=" + "0x%x, cmd=0x%x\n", c->name, + (u32)cmd->device->channel, + (u32)cmd->device->id, + (u32)cmd->device->lun, + (error >> 8) & 0xff, + error & 0xff, cmd->cmnd[0]); + } else + cmd->result = (DID_ERROR << 16); + + break; + } + + switch(as) { + case 0x0E: + /* SCSI Reset */ + cmd->result = DID_RESET << 16; + break; + + case 0x0F: + cmd->result = DID_PARITY << 16; + break; + + default: + cmd->result = DID_ERROR << 16; + break; + } + + break; + } + + cmd->scsi_done(cmd); + return 1; + } + + cmd->result = DID_OK << 16 | ds; + + cmd->scsi_done(cmd); + + dev = &c->pdev->dev; + if (cmd->use_sg) + dma_unmap_sg(dev, (struct scatterlist *)cmd->buffer, + cmd->use_sg, cmd->sc_data_direction); + else if (cmd->request_bufflen) + dma_unmap_single(dev, (dma_addr_t)((long)cmd->SCp.ptr), + cmd->request_bufflen, cmd->sc_data_direction); + + return 1; +} + +/** + * i2o_scsi_queuecommand - queue a SCSI command + * @SCpnt: scsi command pointer + * @done: callback for completion + * + * Issue a scsi command asynchronously. Return 0 on success or 1 if + * we hit an error (normally message queue congestion). The only + * minor complication here is that I2O deals with the device addressing + * so we have to map the bus/dev/lun back to an I2O handle as well + * as faking absent devices ourself. + * + * Locks: takes the controller lock on error path only + */ + +static int i2o_scsi_queuecommand(struct scsi_cmnd * SCpnt, + void (*done) (struct scsi_cmnd *)) +{ + struct i2o_controller *c; + struct Scsi_Host *host; + struct i2o_device *i2o_dev; + struct device *dev; + int tid; + struct i2o_message *msg; + u32 m; + u32 scsi_flags, sg_flags; + u32 *mptr, *lenptr; + u32 len, reqlen; + int i; + + /* + * Do the incoming paperwork + */ + + i2o_dev = SCpnt->device->hostdata; + host = SCpnt->device->host; + c = i2o_dev->iop; + dev = &c->pdev->dev; + + SCpnt->scsi_done = done; + + if(unlikely(!i2o_dev)) { + printk(KERN_WARNING "scsi-osm: no I2O device in request\n"); + SCpnt->result = DID_NO_CONNECT << 16; + done(SCpnt); + return 0; + } + + tid = i2o_dev->lct_data.tid; + + DBG("qcmd: Tid = %03x\n", tid); + DBG("Real scsi messages.\n"); + + /* + * Obtain an I2O message. If there are none free then + * throw it back to the scsi layer + */ + + m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); + if(m==I2O_QUEUE_EMPTY) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * Put together a scsi execscb message + */ + + len = SCpnt->request_bufflen; + + switch(SCpnt->sc_data_direction) { + case PCI_DMA_NONE: + scsi_flags = 0x00000000; // DATA NO XFER + sg_flags = 0x00000000; + break; + + case PCI_DMA_TODEVICE: + scsi_flags = 0x80000000; // DATA OUT (iop-->dev) + sg_flags = 0x14000000; + break; + + case PCI_DMA_FROMDEVICE: + scsi_flags = 0x40000000; // DATA IN (iop<--dev) + sg_flags = 0x10000000; + break; + + default: + /* Unknown - kill the command */ + SCpnt->result = DID_NO_CONNECT << 16; + done(SCpnt); + return 0; + } + + writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg->head[1]); + writel(i2o_scsi_driver.context, &msg->icntxt); + + /* We want the SCSI control block back */ + writel(i2o_cntxt_list_add(c, SCpnt), &msg->tcntxt); + + /* LSI_920_PCI_QUIRK + * + * Intermittant observations of msg frame word data corruption + * observed on msg[4] after: + * WRITE, READ-MODIFY-WRITE + * operations. 19990606 -sralston + * + * (Hence we build this word via tag. Its good practice anyway + * we don't want fetches over PCI needlessly) + */ + + /* Attach tags to the devices */ + /* + if(SCpnt->device->tagged_supported) { + if(SCpnt->tag == HEAD_OF_QUEUE_TAG) + scsi_flags |= 0x01000000; + else if(SCpnt->tag == ORDERED_QUEUE_TAG) + scsi_flags |= 0x01800000; + } + */ + + /* Direction, disconnect ok, tag, CDBLen */ + writel(scsi_flags|0x20200000|SCpnt->cmd_len, &msg->body[0]); + + mptr=&msg->body[1]; + + /* Write SCSI command into the message - always 16 byte block */ + memcpy_toio(mptr, SCpnt->cmnd, 16); + mptr+=4; + lenptr=mptr++; /* Remember me - fill in when we know */ + + reqlen = 12; // SINGLE SGE + + /* Now fill in the SGList and command */ + if(SCpnt->use_sg) { + struct scatterlist *sg; + int sg_count; + + sg = SCpnt->request_buffer; + len = 0; + + sg_count = dma_map_sg(dev, sg, SCpnt->use_sg, + SCpnt->sc_data_direction); + + if(unlikely(sg_count <= 0)) + return -ENOMEM; + + for(i = SCpnt->use_sg; i > 0; i--) { + if(i == 1) + sg_flags |= 0xC0000000; + writel(sg_flags|sg_dma_len(sg), mptr++); + writel(sg_dma_address(sg), mptr++); + len+=sg_dma_len(sg); + sg++; + } + + reqlen = mptr - &msg->head[0]; + writel(len, lenptr); + } else { + len = SCpnt->request_bufflen; + + writel(len, lenptr); + + if(len > 0) { + dma_addr_t dma_addr; + + dma_addr = dma_map_single(dev, SCpnt->request_buffer, + SCpnt->request_bufflen, + SCpnt->sc_data_direction); + if(!dma_addr) + return -ENOMEM; + + SCpnt->SCp.ptr = (void *)(unsigned long)dma_addr; + sg_flags |= 0xC0000000; + writel(sg_flags|SCpnt->request_bufflen, mptr++); + writel(dma_addr, mptr++); + } else + reqlen = 9; + } + + /* Stick the headers on */ + writel(reqlen<<16 | SGL_OFFSET_10, &msg->head[0]); + + /* Queue the message */ + i2o_msg_post(c,m); + + DBG("Issued %ld\n", SCpnt->serial_number); + + return 0; +}; + +#if 0 +/** + * i2o_scsi_abort - abort a running command + * @SCpnt: command to abort + * + * Ask the I2O controller to abort a command. This is an asynchrnous + * process and our callback handler will see the command complete + * with an aborted message if it succeeds. + * + * Locks: no locks are held or needed + */ + +int i2o_scsi_abort(Scsi_Cmnd * SCpnt) +{ + struct i2o_controller *c; + struct Scsi_Host *host; + struct i2o_scsi_host *hostdata; + u32 msg[5]; + int tid; + int status = FAILED; + + printk(KERN_WARNING "i2o_scsi: Aborting command block.\n"); + + host = SCpnt->device->host; + hostdata = (struct i2o_scsi_host *)host->hostdata; + tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun]; + if(tid==-1) + { + printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n"); + return status; + } + c = hostdata->controller; + + spin_unlock_irq(host->host_lock); + + msg[0] = FIVE_WORD_MSG_SIZE; + msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid; + msg[2] = scsi_context; + msg[3] = 0; + msg[4] = i2o_context_list_remove(SCpnt, c); + if(i2o_post_wait(c, msg, sizeof(msg), 240)) + status = SUCCESS; + + spin_lock_irq(host->host_lock); + return status; +} + +/** + * i2o_scsi_bus_reset - Issue a SCSI reset + * @SCpnt: the command that caused the reset + * + * Perform a SCSI bus reset operation. In I2O this is just a message + * we pass. I2O can do clever multi-initiator and shared reset stuff + * but we don't support this. + * + * Locks: called with no lock held, requires no locks. + */ + +static int i2o_scsi_bus_reset(Scsi_Cmnd * SCpnt) +{ + int tid; + struct i2o_controller *c; + struct Scsi_Host *host; + struct i2o_scsi_host *hostdata; + u32 m; + void *msg; + unsigned long timeout; + + + /* + * Find the TID for the bus + */ + + + host = SCpnt->device->host; + + spin_unlock_irq(host->host_lock); + + printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n"); + + hostdata = (struct i2o_scsi_host *)host->hostdata; + tid = hostdata->bus_task; + c = hostdata->controller; + + /* + * Now send a SCSI reset request. Any remaining commands + * will be aborted by the IOP. We need to catch the reply + * possibly ? + */ + + timeout = jiffies+2*HZ; + do + { + m = le32_to_cpu(I2O_POST_READ32(c)); + if(m != 0xFFFFFFFF) + break; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + mb(); + } + while(time_before(jiffies, timeout)); + + + msg = c->msg_virt + m; + i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg); + i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4); + i2o_raw_writel(scsi_context|0x80000000, msg+8); + /* We use the top bit to split controller and unit transactions */ + /* Now store unit,tid so we can tie the completion back to a specific device */ + __raw_writel(c->unit << 16 | tid, msg+12); + wmb(); + + /* We want the command to complete after we return */ + spin_lock_irq(host->host_lock); + i2o_post_message(c,m); + + /* Should we wait for the reset to complete ? */ + return SUCCESS; +} +#endif + +/** + * i2o_scsi_host_reset - host reset callback + * @SCpnt: command causing the reset + * + * An I2O controller can be many things at once. While we can + * reset a controller the potential mess from doing so is vast, and + * it's better to simply hold on and pray + */ + +static int i2o_scsi_host_reset(struct scsi_cmnd * SCpnt) +{ + return FAILED; +} +#if 0 + +/** + * i2o_scsi_device_reset - device reset callback + * @SCpnt: command causing the reset + * + * I2O does not (AFAIK) support doing a device reset + */ + +static int i2o_scsi_device_reset(Scsi_Cmnd * SCpnt) +{ + return FAILED; +} + +/** + * i2o_scsi_bios_param - Invent disk geometry + * @sdev: scsi device + * @dev: block layer device + * @capacity: size in sectors + * @ip: geometry array + * + * This is anyones guess quite frankly. We use the same rules everyone + * else appears to and hope. It seems to work. + */ + +static int i2o_scsi_bios_param(struct scsi_device * sdev, + struct block_device *dev, sector_t capacity, int *ip) +{ + int size; + + size = capacity; + ip[0] = 64; /* heads */ + ip[1] = 32; /* sectors */ + if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */ + ip[0] = 255; /* heads */ + ip[1] = 63; /* sectors */ + ip[2] = size / (255 * 63); /* cylinders */ + } + return 0; +} + + +#endif + +static struct scsi_host_template i2o_scsi_host_template = { + .proc_name = "SCSI-OSM", + .name = "I2O SCSI Peripheral OSM", + .info = i2o_scsi_info, + .queuecommand = i2o_scsi_queuecommand, +/* + .eh_abort_handler = i2o_scsi_abort, + .eh_bus_reset_handler = i2o_scsi_bus_reset, + .eh_device_reset_handler= i2o_scsi_device_reset, +*/ + .eh_host_reset_handler = i2o_scsi_host_reset, +/* + .bios_param = i2o_scsi_bios_param, +*/ + .can_queue = I2O_SCSI_CAN_QUEUE, + .sg_tablesize = 8, + .cmd_per_lun = 6, + .use_clustering = ENABLE_CLUSTERING, + .slave_alloc = i2o_scsi_slave_alloc, +}; + + +/* +int +i2o_scsi_queuecommand(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) +{ + printk(KERN_INFO "queuecommand\n"); + return SCSI_MLQUEUE_HOST_BUSY; +}; +*/ + +static struct i2o_scsi_host * i2o_scsi_host_alloc(struct i2o_controller *c) +{ + struct i2o_scsi_host *i2o_shost; + struct i2o_device *i2o_dev; + struct Scsi_Host *scsi_host; + int max_channel = 0; + u8 type; + int i; + size_t size; + i2o_status_block *sb; + + list_for_each_entry(i2o_dev, &c->devices, list) + if(i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { + if(i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || + (type == 1)) /* SCSI bus */ + max_channel ++; + } + + if(!max_channel) { + printk(KERN_WARNING "scsi-osm: no channels found on %s\n", + c->name); + return ERR_PTR(-EFAULT); + } + + size = max_channel * sizeof(struct i2o_device *) + + sizeof(struct i2o_scsi_host); + + scsi_host = scsi_host_alloc(&i2o_scsi_host_template, size); + if(!scsi_host) { + printk(KERN_WARNING "scsi-osm: Could not allocate SCSI host\n"); + return ERR_PTR(-ENOMEM); + } + + scsi_host->max_channel = max_channel - 1; + scsi_host->max_id = i2o_scsi_max_id; + scsi_host->max_lun = i2o_scsi_max_lun; + scsi_host->this_id = c->unit; + + sb = c->status_block.virt; + + scsi_host->sg_tablesize = (sb->inbound_frame_size - + sizeof(struct i2o_message)/4 - 6) / 2; + + i2o_shost = (struct i2o_scsi_host *) scsi_host->hostdata; + i2o_shost->scsi_host = scsi_host; + i2o_shost->iop = c; + + i = 0; + list_for_each_entry(i2o_dev, &c->devices, list) + if(i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { + if(i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || + (type == 1)) /* only SCSI bus */ + i2o_shost->channel[i++] = i2o_dev; + + if(i >= max_channel) + break; + } + + return i2o_shost; +}; + +/** + * i2o_scsi_get_host - Get an I2O SCSI host + * @c: I2O controller to for which to get the SCSI host + * + * If the I2O controller already exists as SCSI host, the SCSI host + * is returned, otherwise the I2O controller is added to the SCSI + * core. + * + * Returns pointer to the I2O SCSI host on success or negative error code + * on failure. + */ +static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c) +{ + struct i2o_scsi_host *i2o_shost; + int rc; + + /* skip if already registered as I2O SCSI host */ + list_for_each_entry(i2o_shost, &i2o_scsi_hosts, list) + if(i2o_shost->iop == c) + return i2o_shost; + + i2o_shost = i2o_scsi_host_alloc(c); + if(IS_ERR(i2o_shost)) { + printk(KERN_ERR "scsi-osm: Could not initialize SCSI host\n"); + return i2o_shost; + } + + rc = scsi_add_host(i2o_shost->scsi_host, &c->device); + if(rc) { + printk(KERN_ERR "scsi-osm: Could not add SCSI host\n"); + scsi_host_put(i2o_shost->scsi_host); + return ERR_PTR(rc); + } + + list_add(&i2o_shost->list, &i2o_scsi_hosts); + DBG("new I2O SCSI host added\n"); + + return i2o_shost; + +}; + +/** + * i2o_scsi_init - SCSI OSM initialization function + * + * Register SCSI OSM into I2O core. + * + * Returns 0 on success or negative error code on failure. + */ +static int __init i2o_scsi_init(void) +{ + int rc; + + printk(KERN_INFO "I2O SCSI Peripheral OSM\n"); + + /* Register SCSI OSM into I2O core */ + rc = i2o_driver_register(&i2o_scsi_driver); + if(rc) { + printk(KERN_ERR "scsi-osm: Could not register SCSI driver\n"); + return rc; + } + + return 0; +}; + +/** + * i2o_scsi_exit - SCSI OSM exit function + * + * Unregisters SCSI OSM from I2O core. + */ +static void __exit i2o_scsi_exit(void) +{ + struct i2o_scsi_host *i2o_shost, *tmp; + + /* Remove I2O SCSI hosts */ + list_for_each_entry_safe(i2o_shost, tmp, &i2o_scsi_hosts, list) { + scsi_remove_host(i2o_shost->scsi_host); + scsi_host_put(i2o_shost->scsi_host); + } + + /* Unregister I2O SCSI OSM from I2O core */ + i2o_driver_unregister(&i2o_scsi_driver); +}; + + +MODULE_AUTHOR("Red Hat Software"); +MODULE_LICENSE("GPL"); + + +module_init(i2o_scsi_init); +module_exit(i2o_scsi_exit); diff -puN include/linux/i2o-dev.h~i2o-build_99 include/linux/i2o-dev.h --- 25/include/linux/i2o-dev.h~i2o-build_99 Tue Jul 27 14:06:32 2004 +++ 25-akpm/include/linux/i2o-dev.h Tue Jul 27 14:06:32 2004 @@ -23,7 +23,7 @@ /* How many controllers are we allowing */ #define MAX_I2O_CONTROLLERS 32 -#include +//#include /* * I2O Control IOCTLs and structures @@ -42,28 +42,35 @@ #define I2OEVTREG _IOW(I2O_MAGIC_NUMBER,10,struct i2o_evt_id) #define I2OEVTGET _IOR(I2O_MAGIC_NUMBER,11,struct i2o_evt_info) #define I2OPASSTHRU _IOR(I2O_MAGIC_NUMBER,12,struct i2o_cmd_passthru) +#define I2OPASSTHRU32 _IOR(I2O_MAGIC_NUMBER,12,struct i2o_cmd_passthru32) + +struct i2o_cmd_passthru32 +{ + unsigned int iop; /* IOP unit number */ + u32 msg; /* message */ +}; struct i2o_cmd_passthru { unsigned int iop; /* IOP unit number */ - void __user *msg; /* message */ + void *msg; /* message */ }; struct i2o_cmd_hrtlct { unsigned int iop; /* IOP unit number */ - void __user *resbuf; /* Buffer for result */ - unsigned int __user *reslen; /* Buffer length in bytes */ + void *resbuf; /* Buffer for result */ + unsigned int *reslen; /* Buffer length in bytes */ }; struct i2o_cmd_psetget { unsigned int iop; /* IOP unit number */ unsigned int tid; /* Target device TID */ - void __user *opbuf; /* Operation List buffer */ + void *opbuf; /* Operation List buffer */ unsigned int oplen; /* Operation List buffer length in bytes */ - void __user *resbuf; /* Result List buffer */ - unsigned int __user *reslen; /* Result List buffer length in bytes */ + void *resbuf; /* Result List buffer */ + unsigned int *reslen; /* Result List buffer length in bytes */ }; struct i2o_sw_xfer @@ -72,10 +79,10 @@ struct i2o_sw_xfer unsigned char flags; /* Flags field */ unsigned char sw_type; /* Software type */ unsigned int sw_id; /* Software ID */ - void __user *buf; /* Pointer to software buffer */ - unsigned int __user *swlen; /* Length of software data */ - unsigned int __user *maxfrag; /* Maximum fragment count */ - unsigned int __user *curfrag; /* Current fragment count */ + void *buf; /* Pointer to software buffer */ + unsigned int *swlen; /* Length of software data */ + unsigned int *maxfrag; /* Maximum fragment count */ + unsigned int *curfrag; /* Current fragment count */ }; struct i2o_html @@ -83,9 +90,9 @@ struct i2o_html unsigned int iop; /* IOP unit number */ unsigned int tid; /* Target device ID */ unsigned int page; /* HTML page */ - void __user *resbuf; /* Buffer for reply HTML page */ - unsigned int __user *reslen; /* Length in bytes of reply buffer */ - void __user *qbuf; /* Pointer to HTTP query string */ + void *resbuf; /* Buffer for reply HTML page */ + unsigned int *reslen; /* Length in bytes of reply buffer */ + void *qbuf; /* Pointer to HTTP query string */ unsigned int qlen; /* Length in bytes of query string buffer */ }; @@ -351,6 +358,7 @@ typedef struct _i2o_status_block #define I2O_CLASS_BUS_ADAPTER_PORT 0x080 #define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090 #define I2O_CLASS_PEER_TRANSPORT 0x091 +#define I2O_CLASS_END 0xfff /* * Rest of 0x092 - 0x09f reserved for peer-to-peer classes diff -puN include/linux/i2o.h~i2o-build_99 include/linux/i2o.h --- 25/include/linux/i2o.h~i2o-build_99 Tue Jul 27 14:06:32 2004 +++ 25-akpm/include/linux/i2o.h Tue Jul 27 14:06:32 2004 @@ -23,66 +23,116 @@ #include /* How many different OSM's are we allowing */ -#define MAX_I2O_MODULES 4 - -/* How many OSMs can register themselves for device status updates? */ -#define I2O_MAX_MANAGERS 4 +#define I2O_MAX_DRIVERS 4 +#include #include /* Needed for MUTEX init macros */ -#include -#include -#include +#include +#include + + +/* message queue empty */ +#define I2O_QUEUE_EMPTY 0xffffffff + /* * Message structures */ struct i2o_message { - u8 version_offset; - u8 flags; - u16 size; - u32 target_tid:12; - u32 init_tid:12; - u32 function:8; - u32 initiator_context; + union { + struct { + u8 version_offset; + u8 flags; + u16 size; + u32 target_tid:12; + u32 init_tid:12; + u32 function:8; + u32 icntxt; /* initiator context */ + u32 tcntxt; /* transaction context */ + }; + u32 head[4]; + }; /* List follows */ + u32 body[0]; }; /* - * Each I2O device entity has one or more of these. There is one - * per device. + * Each I2O device entity has one of these. There is one per device. */ struct i2o_device { i2o_lct_entry lct_data; /* Device LCT information */ - u32 flags; - int i2oversion; /* I2O version supported. Actually - * there should be high and low - * version */ - struct proc_dir_entry *proc_entry; /* /proc dir */ + struct i2o_controller *iop; /* Controlling IOP */ + struct list_head list; /* node in IOP devices list */ + + struct device device; + + struct semaphore lock; /* device lock */ + + struct class_device classdev; /* i2o device class */ +}; + +/* + * Event structure provided to the event handling function + */ +struct i2o_event { + struct work_struct work; + struct i2o_device *i2o_dev; /* I2O device pointer from which the + event reply was initiated */ + u16 size; /* Size of data in 32-bit words */ + u32 tcntxt; /* Transaction context used at + registration */ + u32 event_indicator; /* Event indicator from reply */ + u32 data[0]; /* Event data from reply */ +}; + +/* + * I2O classes which could be handled by the OSM + */ +struct i2o_class_id { + u16 class_id:12; +}; + +/* + * I2O driver structure for OSMs + */ +struct i2o_driver { + char *name; /* OSM name */ + int context; /* Low 8 bits of the transaction info */ + struct i2o_class_id *classes; /* I2O classes that this OSM handles */ - /* Primary user */ - struct i2o_handler *owner; + /* Message reply handler */ + int (*reply)(struct i2o_controller *, u32, struct i2o_message *); + + /* Event handler */ + void (*event)(struct i2o_event *); + + struct workqueue_struct *event_queue; /* Event queue */ - /* Management users */ - struct i2o_handler *managers[I2O_MAX_MANAGERS]; - int num_managers; + struct device_driver driver; + + struct semaphore lock; +}; - struct i2o_controller *controller; /* Controlling IOP */ - struct i2o_device *next; /* Chain */ - struct i2o_device *prev; - char dev_name[8]; /* linux /dev name if available */ +/* + * Contains all information which are necessary for DMA operations + */ +struct i2o_dma { + void *virt; + dma_addr_t phys; + u32 len; }; /* - * context queue entry, used for 32-bit context on 64-bit systems + * Context queue entry, used for 32-bit context on 64-bit systems */ struct i2o_context_list_element { - struct i2o_context_list_element *next; + struct list_head list; u32 context; void *ptr; - unsigned int flags; + unsigned long timestamp; }; /* @@ -93,47 +143,42 @@ struct i2o_controller char name[16]; int unit; int type; - int enabled; - struct pci_dev *pdev; /* PCI device */ - int irq; - int short_req:1; /* Use small block sizes */ - int dpt:1; /* Don't quiesce */ - int raptor:1; /* split bar */ - int promise:1; /* Promise controller */ + struct pci_dev *pdev; /* PCI device */ + + int short_req:1; /* use small block sizes */ + int no_quiesce:1; /* dont quiesce before reset */ + int raptor:1; /* split bar */ + int promise:1; /* Promise controller */ + #ifdef CONFIG_MTRR int mtrr_reg0; int mtrr_reg1; #endif + struct list_head devices; /* list of I2O devices */ + struct notifier_block *event_notifer; /* Events */ atomic_t users; - struct i2o_device *devices; /* I2O device chain */ - struct i2o_controller *next; /* Controller chain */ + struct list_head list; /* Controller list */ void *post_port; /* Inbout port address */ void *reply_port; /* Outbound port address */ void *irq_mask; /* Interrupt register address */ /* Dynamic LCT related data */ - struct semaphore lct_sem; - int lct_pid; - int lct_running; - - i2o_status_block *status_block; /* IOP status block */ - dma_addr_t status_block_phys; - i2o_lct *lct; /* Logical Config Table */ - dma_addr_t lct_phys; - i2o_lct *dlct; /* Temp LCT */ - dma_addr_t dlct_phys; - i2o_hrt *hrt; /* HW Resource Table */ - dma_addr_t hrt_phys; - u32 hrt_len; - void *base_virt; /* base virtual address */ - unsigned long base_phys; /* base physical address */ + struct i2o_dma status; /* status of IOP */ - void *msg_virt; /* messages virtual address */ - unsigned long msg_phys; /* messages physical address */ + struct i2o_dma hrt; /* HW Resource Table */ + i2o_lct *lct; /* Logical Config Table */ + struct i2o_dma dlct; /* Temp LCT */ + struct semaphore lct_lock; /* Lock for LCT updates */ + struct i2o_dma status_block; /* IOP status block */ + + + struct i2o_dma base; /* controller messaging unit */ + struct i2o_dma in_queue; /* inbound message queue Host->IOP */ + struct i2o_dma out_queue; /* outbound message queue IOP->Host */ int battery:1; /* Has a battery backup */ int io_alloc:1; /* An I/O resource was allocated */ @@ -145,68 +190,20 @@ struct i2o_controller struct proc_dir_entry *proc_entry; /* /proc dir */ - void *page_frame; /* Message buffers */ - dma_addr_t page_frame_map; /* Cache map */ + struct list_head bus_list; /* list of busses on IOP */ + struct device device; + struct i2o_device *exec; /* Executive */ #if BITS_PER_LONG == 64 spinlock_t context_list_lock; /* lock for context_list */ - struct i2o_context_list_element *context_list; /* list of context id's + atomic_t context_list_counter; /* needed for unique contexts */ + struct list_head context_list; /* list of context id's and pointers */ #endif + spinlock_t lock; /* lock for controller + configuration */ }; /* - * OSM resgistration block - * - * Each OSM creates at least one of these and registers it with the - * I2O core through i2o_register_handler. An OSM may want to - * register more than one if it wants a fast path to a reply - * handler by having a separate initiator context for each - * class function. - */ -struct i2o_handler -{ - /* Message reply handler */ - void (*reply)(struct i2o_handler *, struct i2o_controller *, - struct i2o_message *); - - /* New device notification handler */ - void (*new_dev_notify)(struct i2o_controller *, struct i2o_device *); - - /* Device deltion handler */ - void (*dev_del_notify)(struct i2o_controller *, struct i2o_device *); - - /* Reboot notification handler */ - void (*reboot_notify)(void); - - char *name; /* OSM name */ - int context; /* Low 8 bits of the transaction info */ - u32 class; /* I2O classes that this driver handles */ - /* User data follows */ -}; - -#ifdef MODULE -/* - * Used by bus specific modules to communicate with the core - * - * This is needed because the bus modules cannot make direct - * calls to the core as this results in the i2o_bus_specific_module - * being dependent on the core, not the otherway around. - * In that case, a 'modprobe i2o_lan' loads i2o_core & i2o_lan, - * but _not_ i2o_pci...which makes the whole thing pretty useless :) - * - */ -struct i2o_core_func_table -{ - int (*install)(struct i2o_controller *); - int (*activate)(struct i2o_controller *); - struct i2o_controller *(*find)(int); - void (*unlock)(struct i2o_controller *); - void (*run_queue)(struct i2o_controller * c); - int (*delete)(struct i2o_controller *); -}; -#endif /* MODULE */ - -/* * I2O System table entry * * The system table contains information about all the IOPs in the @@ -242,53 +239,291 @@ struct i2o_sys_tbl struct i2o_sys_tbl_entry iops[0]; }; +extern struct list_head i2o_controllers; + + +/* Message functions */ +static inline u32 i2o_msg_get(struct i2o_controller *, struct i2o_message **); +extern u32 i2o_msg_get_wait(struct i2o_controller *, struct i2o_message **,int); +static inline void i2o_msg_post(struct i2o_controller *, u32); +static inline int i2o_msg_post_wait(struct i2o_controller *,u32,unsigned long); +extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long, + struct i2o_dma *); +extern void i2o_msg_nop(struct i2o_controller *, u32); +static inline void i2o_flush_reply(struct i2o_controller *, u32); + + +/* DMA handling functions */ +static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t, + unsigned int); +static inline void i2o_dma_free(struct device *, struct i2o_dma *); +int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int); + +static inline int i2o_dma_map(struct device *, struct i2o_dma *); +static inline void i2o_dma_unmap(struct device *, struct i2o_dma *); + +/* IOP functions */ +extern int i2o_status_get(struct i2o_controller *); +extern int i2o_hrt_get(struct i2o_controller *); + +extern int i2o_event_register(struct i2o_device *, struct i2o_driver *,int,u32); +extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16); +extern struct i2o_controller *i2o_find_iop(int); + +/* Functions needed for handling 64-bit pointers in 32-bit context */ +#if BITS_PER_LONG == 64 +extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *); +extern void *i2o_cntxt_list_get(struct i2o_controller *, u32); +extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *); + +static inline u32 i2o_ptr_low(void *ptr) +{ + return (u32)(u64)ptr; +}; + +static inline u32 i2o_ptr_high(void *ptr) +{ + return (u32)((u64)ptr>>32); +}; +#else +static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) +{ + return (u32)ptr; +}; + +static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) +{ + return (void *)context; +}; + +static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr) +{ + return (u32)ptr; +}; + +static inline u32 i2o_ptr_low(void *ptr) +{ + return (u32)ptr; +}; + +static inline u32 i2o_ptr_high(void *ptr) +{ + return 0; +}; +#endif + +/* I2O driver (OSM) functions */ +extern int i2o_driver_register(struct i2o_driver *); +extern void i2o_driver_unregister(struct i2o_driver *); + +/* I2O device functions */ +extern int i2o_device_claim(struct i2o_device *); +extern int i2o_device_claim_release(struct i2o_device *); + +/* Exec OSM functions */ +extern int i2o_exec_lct_get(struct i2o_controller *); +extern int i2o_exec_lct_notify(struct i2o_controller *, u32); + +/* device to i2o_device and driver to i2o_driver convertion functions */ +#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) +#define to_i2o_device(dev) container_of(dev, struct i2o_device, device) + + /* * Messenger inlines */ static inline u32 I2O_POST_READ32(struct i2o_controller *c) { + rmb(); return readl(c->post_port); -} +}; static inline void I2O_POST_WRITE32(struct i2o_controller *c, u32 val) { + wmb(); writel(val, c->post_port); -} +}; static inline u32 I2O_REPLY_READ32(struct i2o_controller *c) { + rmb(); return readl(c->reply_port); -} +}; static inline void I2O_REPLY_WRITE32(struct i2o_controller *c, u32 val) { + wmb(); writel(val, c->reply_port); -} +}; static inline u32 I2O_IRQ_READ32(struct i2o_controller *c) { + rmb(); return readl(c->irq_mask); -} +}; static inline void I2O_IRQ_WRITE32(struct i2o_controller *c, u32 val) { + wmb(); writel(val, c->irq_mask); -} + wmb(); +}; + +/** + * i2o_msg_get - obtain an I2O message from the IOP + * @c: I2O controller + * @msg: pointer to a I2O message pointer + * + * This function tries to get a message slot. If no message slot is + * available do not wait until one is availabe (see also i2o_msg_get_wait). + * + * On a success the message is returned and the pointer to the message is + * set in msg. The returned message is the physical page frame offset + * address from the read port (see the i2o spec). If no message is + * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. + */ +static inline u32 i2o_msg_get(struct i2o_controller *c,struct i2o_message **msg) +{ + u32 m; + + if((m=I2O_POST_READ32(c))!=I2O_QUEUE_EMPTY) + *msg = c->in_queue.virt + m; + return m; +}; -static inline void i2o_post_message(struct i2o_controller *c, u32 m) +/** + * i2o_msg_post - Post I2O message to I2O controller + * @c: I2O controller to which the message should be send + * @m: the message identifier + * + * Post the message to the I2O controller. + */ +static inline void i2o_msg_post(struct i2o_controller *c, u32 m) { - /* The second line isnt spurious - thats forcing PCI posting */ I2O_POST_WRITE32(c, m); - (void) I2O_IRQ_READ32(c); -} +}; + +/** + * i2o_msg_post_wait - Post and wait a message and wait until return + * @c: controller + * @m: message to post + * @timeout: time in seconds to wait + * + * This API allows an OSM to post a message and then be told whether or + * not the system received a successful reply. If the message times out + * then the value '-ETIMEDOUT' is returned. + * + * Returns 0 on success or negative error code on failure. + */ +static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m, + unsigned long timeout) +{ + return i2o_msg_post_wait_mem(c, m, timeout, NULL); +}; +/** + * i2o_flush_reply - Flush reply from I2O controller + * @c: I2O controller + * @m: the message identifier + * + * The I2O controller must be informed that the reply message is not needed + * anymore. If you forget to flush the reply, the message frame can't be + * used by the controller anymore and is therefore lost. + * + * FIXME: is there a timeout after which the controller reuse the message? + */ static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) { I2O_REPLY_WRITE32(c, m); -} +}; + +/** + * i2o_dma_alloc - Allocate DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: i2o_dma struct which should get the DMA buffer + * @len: length of the new DMA memory + * @gfp_mask: GFP mask + * + * Allocate a coherent DMA memory and write the pointers into addr. + * + * Returns 0 on success or -ENOMEM on failure. + */ +static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, + size_t len, unsigned int gfp_mask) +{ + addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); + if(!addr->virt) + return -ENOMEM; + + memset(addr->virt, 0, len); + addr->len = len; + + return 0; +}; + +/** + * i2o_dma_free - Free DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: i2o_dma struct which contains the DMA buffer + * + * Free a coherent DMA memory and set virtual address of addr to NULL. + */ +static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) +{ + if(addr->virt) { + if(addr->phys) + dma_free_coherent(dev, addr->len,addr->virt,addr->phys); + else + kfree(addr->virt); + addr->virt = NULL; + } +}; + +/** + * i2o_dma_map - Map the memory to DMA + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: i2o_dma struct which should be mapped + * + * Map the memory in addr->virt to coherent DMA memory and write the + * physical address into addr->phys. + * + * Returns 0 on success or -ENOMEM on failure. + */ +static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr) +{ + if(!addr->virt) + return -EFAULT; + + if(!addr->phys) + addr->phys = dma_map_single(dev, addr->virt, addr->len, + DMA_BIDIRECTIONAL); + if(!addr->phys) + return -ENOMEM; + + return 0; +}; + +/** + * i2o_dma_unmap - Unmap the DMA memory + * @dev: struct device pointer to the PCI device of the I2O controller + * @addr: i2o_dma struct which should be unmapped + * + * Unmap the memory in addr->virt from DMA memory. + */ +static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr) +{ + if(!addr->virt) + return; + + if(addr->phys) { + dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL); + addr->phys = 0; + } +}; + /* * Endian handling wrapped into the macro - keeps the core code @@ -297,30 +532,12 @@ static inline void i2o_flush_reply(struc #define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem) -extern struct i2o_controller *i2o_find_controller(int); -extern void i2o_unlock_controller(struct i2o_controller *); -extern struct i2o_controller *i2o_controller_chain; -extern int i2o_num_controllers; -extern int i2o_status_get(struct i2o_controller *); -extern int i2o_install_handler(struct i2o_handler *); -extern int i2o_remove_handler(struct i2o_handler *); - -extern int i2o_claim_device(struct i2o_device *, struct i2o_handler *); -extern int i2o_release_device(struct i2o_device *, struct i2o_handler *); -extern int i2o_device_notify_on(struct i2o_device *, struct i2o_handler *); -extern int i2o_device_notify_off(struct i2o_device *, - struct i2o_handler *); - -extern int i2o_post_this(struct i2o_controller *, u32 *, int); -extern int i2o_post_wait(struct i2o_controller *, u32 *, int, int); -extern int i2o_post_wait_mem(struct i2o_controller *, u32 *, int, int, - void *, void *, dma_addr_t, dma_addr_t, int, int); - -extern int i2o_query_scalar(struct i2o_controller *, int, int, int, void *, - int); -extern int i2o_set_scalar(struct i2o_controller *, int, int, int, void *, - int); +extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int); +extern int i2o_parm_field_set(struct i2o_device *, int, int, void *, int); +extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int, + void *, int); +/* FIXME: remove extern int i2o_query_table(int, struct i2o_controller *, int, int, int, void *, int, void *, int); extern int i2o_clear_table(struct i2o_controller *, int, int); @@ -328,39 +545,15 @@ extern int i2o_row_add_table(struct i2o_ void *, int); extern int i2o_issue_params(int, struct i2o_controller *, int, void *, int, void *, int); +*/ -extern int i2o_event_register(struct i2o_controller *, u32, u32, u32, u32); -extern int i2o_event_ack(struct i2o_controller *, u32 *); -extern void i2o_report_status(const char *, const char *, u32 *); -extern void i2o_dump_message(u32 *); -extern const char *i2o_get_class_name(int); - -extern int i2o_install_controller(struct i2o_controller *); -extern int i2o_activate_controller(struct i2o_controller *); -extern void i2o_run_queue(struct i2o_controller *); -extern int i2o_delete_controller(struct i2o_controller *); +/* debugging functions */ +extern void i2o_report_status(const char *, const char *, struct i2o_message *); +extern void i2o_dump_message(struct i2o_message *); +extern void i2o_dump_hrt(struct i2o_controller *c); +extern void i2o_debug_state(struct i2o_controller *c); -#if BITS_PER_LONG == 64 -extern u32 i2o_context_list_add(void *, struct i2o_controller *); -extern void *i2o_context_list_get(u32, struct i2o_controller *); -extern u32 i2o_context_list_remove(void *, struct i2o_controller *); -#else -static inline u32 i2o_context_list_add(void *ptr, struct i2o_controller *c) -{ - return (u32)ptr; -} - -static inline void *i2o_context_list_get(u32 context, struct i2o_controller *c) -{ - return (void *)context; -} - -static inline u32 i2o_context_list_remove(void *ptr, struct i2o_controller *c) -{ - return (u32)ptr; -} -#endif /* * Cache strategies @@ -414,8 +607,6 @@ static inline u32 i2o_context_list_remov #define BLKI2OSWSTRAT _IOW('2', 4, int) - - /* * I2O Function codes */ @@ -679,7 +870,7 @@ static inline u32 i2o_context_list_remov #define ADAPTER_TID 0 #define HOST_TID 1 -#define MSG_FRAME_SIZE 64 /* i2o_scsi assumes >= 32 */ +#define MSG_FRAME_SIZE 128 /* i2o_scsi assumes >= 32 */ #define REPLY_FRAME_SIZE 17 #define SG_TABLESIZE 30 #define NMBR_MSG_FRAMES 128 @@ -693,5 +884,22 @@ static inline u32 i2o_context_list_remov #define I2O_CONTEXT_LIST_USED 0x01 #define I2O_CONTEXT_LIST_DELETED 0x02 +/* timeouts */ +#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 +#define I2O_TIMEOUT_MESSAGE_GET 5 +#define I2O_TIMEOUT_RESET 30 +#define I2O_TIMEOUT_STATUS_GET 5 +#define I2O_TIMEOUT_LCT_GET 20 + +/* retries */ +#define I2O_HRT_GET_TRIES 3 +#define I2O_LCT_GET_TRIES 3 + +/* request queue sizes */ +#define I2O_MAX_SECTORS 1024 +#define I2O_MAX_SEGMENTS 128 + +#define I2O_REQ_MEMPOOL_SIZE 32 + #endif /* __KERNEL__ */ #endif /* _I2O_H */ _