From: Martin Schwidefsky - Add module license gpl. - Add debug messages. - Make blocksize persistent after close. Limit blocksize to 64k. - Check tape state against TS_INIT/TS_UNUSED for special case of medium sense and assign. - Assign tape as soon as they are set online and unassign when set offline. - Correct implementation of MT_EOD. - Add backward compatible tape.agent hotplug support (to be removed as soon as a full blown tape class is implemented). - Add state to differentiate between character device and block device access. - Make tape block device usable. - Add 34xx seek speedup code. - Fix device reference counting. - Fix online-offline-online cycle. - Add timeout to standard assign function. - Correct calculation of device index in tape_get_device(). - Check idal buffer for fixed block size reads and writes. - Adapt to notify api change in cio. - Add sysfs attributes for tape state, first minor, current operation and current blocksize. --- 25-akpm/drivers/s390/char/tape.h | 90 +++- 25-akpm/drivers/s390/char/tape_34xx.c | 657 ++++++++++++++++++++++++--------- 25-akpm/drivers/s390/char/tape_block.c | 453 ++++++++++++++-------- 25-akpm/drivers/s390/char/tape_char.c | 173 ++++++-- 25-akpm/drivers/s390/char/tape_core.c | 496 ++++++++++++++++-------- 25-akpm/drivers/s390/char/tape_proc.c | 14 25-akpm/drivers/s390/char/tape_std.c | 225 ++++++++--- 25-akpm/drivers/s390/char/tape_std.h | 12 8 files changed, 1475 insertions(+), 645 deletions(-) diff -puN drivers/s390/char/tape_34xx.c~s390-05-tape-driver drivers/s390/char/tape_34xx.c --- 25/drivers/s390/char/tape_34xx.c~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_34xx.c Thu Jan 8 14:11:31 2004 @@ -14,31 +14,60 @@ #include #include #include -#include #include "tape.h" #include "tape_std.h" -#define PRINTK_HEADER "T34xx:" +#define PRINTK_HEADER "TAPE_34XX: " -enum tape34xx_type { +enum tape_34xx_type { tape_3480, tape_3490, }; +#define TAPE34XX_FMT_3480 0 +#define TAPE34XX_FMT_3480_2_XF 1 +#define TAPE34XX_FMT_3480_XF 2 + +struct tape_34xx_block_id { + unsigned int wrap : 1; + unsigned int segment : 7; + unsigned int format : 2; + unsigned int block : 22; +}; + /* - * Medium sense (asynchronous with callback) for 34xx tapes. There is no 'real' - * medium sense call. So we just do a normal sense. + * A list of block ID's is used to faster seek blocks. */ -static void -__tape_34xx_medium_sense_callback(struct tape_request *request, void *data) +struct tape_34xx_sbid { + struct list_head list; + struct tape_34xx_block_id bid; +}; + +static void tape_34xx_delete_sbid_from(struct tape_device *, int); + +/* + * Medium sense for 34xx tapes. There is no 'real' medium sense call. + * So we just do a normal sense. + */ +static int +tape_34xx_medium_sense(struct tape_device *device) { - unsigned char *sense; - struct tape_device *device; + struct tape_request *request; + unsigned char *sense; + int rc; - request->callback = NULL; + request = tape_alloc_request(1, 32); + if (IS_ERR(request)) { + DBF_EXCEPTION(6, "MSEN fail\n"); + return PTR_ERR(request); + } - if(request->rc == 0 && (device = request->device) != NULL) { + request->op = TO_MSEN; + tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); + + rc = tape_do_io_interruptible(device, request); + if (request->rc == 0) { sense = request->cpdata; /* @@ -47,36 +76,20 @@ __tape_34xx_medium_sense_callback(struct * only slightly inaccurate to say there is no tape loaded if * the drive isn't online... */ - if(sense[0] & SENSE_INTERVENTION_REQUIRED) + if (sense[0] & SENSE_INTERVENTION_REQUIRED) tape_med_state_set(device, MS_UNLOADED); else tape_med_state_set(device, MS_LOADED); - if(sense[1] & SENSE_WRITE_PROTECT) + if (sense[1] & SENSE_WRITE_PROTECT) device->tape_generic_status |= GMT_WR_PROT(~0); else device->tape_generic_status &= ~GMT_WR_PROT(~0); + } else { + DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", + request->rc); } tape_free_request(request); -} - -static int -tape_34xx_medium_sense(struct tape_device *device) -{ - struct tape_request *request; - int rc; - - request = tape_alloc_request(1, 32); - if(IS_ERR(request)) { - DBF_EXCEPTION(6, "MSEN fail\n"); - return PTR_ERR(request); - } - - request->op = TO_MSEN; - tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); - request->callback = __tape_34xx_medium_sense_callback; - - rc = tape_do_io_async(device, request); return rc; } @@ -104,7 +117,7 @@ tape_34xx_work_handler(void *data) DBF_EVENT(3, "T34XX: internal error: unknown work\n"); } - tape_put_device(p->device); + p->device = tape_put_device(p->device); kfree(p); } @@ -117,14 +130,13 @@ tape_34xx_schedule_work(struct tape_devi struct work_struct work; } *p; - if((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) + if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) return -ENOMEM; memset(p, 0, sizeof(*p)); INIT_WORK(&p->work, tape_34xx_work_handler, p); - atomic_inc(&device->ref_count); - p->device = device; + p->device = tape_get_device_reference(device); p->op = op; schedule_work(&p->work); @@ -134,37 +146,46 @@ tape_34xx_schedule_work(struct tape_devi /* * Done Handler is called when dev stat = DEVICE-END (successful operation) */ -static int -tape_34xx_done(struct tape_device *device, struct tape_request *request) +static inline int +tape_34xx_done(struct tape_request *request) { DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); + switch (request->op) { + case TO_DSE: + case TO_RUN: + case TO_WRI: + case TO_WTM: + case TO_ASSIGN: + case TO_UNASSIGN: + tape_34xx_delete_sbid_from(request->device, 0); + break; + default: + ; + } return TAPE_IO_SUCCESS; } static inline int -tape_34xx_erp_failed(struct tape_device *device, - struct tape_request *request, int rc) +tape_34xx_erp_failed(struct tape_request *request, int rc) { - DBF_EVENT(3, "Error recovery failed for %s\n", - tape_op_verbose[request->op]); + DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n", + tape_op_verbose[request->op], rc); return rc; } static inline int -tape_34xx_erp_succeeded(struct tape_device *device, - struct tape_request *request) +tape_34xx_erp_succeeded(struct tape_request *request) { DBF_EVENT(3, "Error Recovery successful for %s\n", tape_op_verbose[request->op]); - return tape_34xx_done(device, request); + return tape_34xx_done(request); } static inline int -tape_34xx_erp_retry(struct tape_device *device, struct tape_request *request) +tape_34xx_erp_retry(struct tape_request *request) { - DBF_EVENT(3, "xerp retr %s\n", - tape_op_verbose[request->op]); + DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]); return TAPE_IO_RETRY; } @@ -178,6 +199,7 @@ tape_34xx_unsolicited_irq(struct tape_de if (irb->scsw.dstat == 0x85 /* READY */) { /* A medium was inserted in the drive. */ DBF_EVENT(6, "xuud med\n"); + tape_34xx_delete_sbid_from(device, 0); tape_34xx_schedule_work(device, TO_MSEN); } else { DBF_EVENT(3, "unsol.irq! dev end: %s\n", @@ -203,7 +225,7 @@ tape_34xx_erp_read_opposite(struct tape_ * and try again. */ tape_std_read_backward(device, request); - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); } if (request->op != TO_RBA) PRINT_ERR("read_opposite called with state:%s\n", @@ -212,7 +234,7 @@ tape_34xx_erp_read_opposite(struct tape_ * We tried to read forward and backward, but hat no * success -> failed. */ - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); } static int @@ -228,7 +250,7 @@ tape_34xx_erp_bug(struct tape_device *de tape_op_verbose[request->op]); tape_dump_sense(device, request, irb); } - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); } /* @@ -243,7 +265,7 @@ tape_34xx_erp_overrun(struct tape_device PRINT_WARN ("Data overrun error between control-unit " "and drive. Use a faster channel connection, " "if possible! \n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); } return tape_34xx_erp_bug(device, request, irb, -1); } @@ -260,7 +282,7 @@ tape_34xx_erp_sequence(struct tape_devic * cu detected incorrect block-id sequence on tape. */ PRINT_WARN("Illegal block-id sequence found!\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); } /* * Record sequence error bit is set, but erpa does not @@ -281,6 +303,9 @@ tape_34xx_unit_check(struct tape_device int inhibit_cu_recovery; __u8* sense; + inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; + sense = irb->ecw; + #ifdef CONFIG_S390_TAPE_BLOCK if (request->op == TO_BLOCK) { /* @@ -289,51 +314,95 @@ tape_34xx_unit_check(struct tape_device */ device->blk_data.block_position = -1; if (request->retries-- <= 0) - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); else - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); } #endif - inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; - sense = irb->ecw; - - if (sense[0] & SENSE_COMMAND_REJECT) { - if ((sense[1] & SENSE_WRITE_PROTECT) && - (request->op == TO_DSE || - request->op == TO_WRI || - request->op == TO_WTM)) + if ( + sense[0] & SENSE_COMMAND_REJECT && + sense[1] & SENSE_WRITE_PROTECT + ) { + if ( + request->op == TO_DSE || + request->op == TO_WRI || + request->op == TO_WTM + ) { /* medium is write protected */ - return tape_34xx_erp_failed(device, request, -EACCES); - else + return tape_34xx_erp_failed(request, -EACCES); + } else { return tape_34xx_erp_bug(device, request, irb, -3); + } } /* - * special cases for various tape-states when reaching + * Special cases for various tape-states when reaching * end of recorded area + * + * FIXME: Maybe a special case of the special case: + * sense[0] == SENSE_EQUIPMENT_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x47 (Volume Fenced) + * + * This was caused by continued FSF or FSR after an + * 'End Of Data'. */ - if ((sense[0] == 0x08 || sense[0] == 0x10 || sense[0] == 0x12) && - (sense[1] == 0x40 || sense[1] == 0x0c)) + if (( + sense[0] == SENSE_DATA_CHECK || + sense[0] == SENSE_EQUIPMENT_CHECK || + sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK + ) && ( + sense[1] == SENSE_DRIVE_ONLINE || + sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE + )) { switch (request->op) { + /* + * sense[0] == SENSE_DATA_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE + * sense[3] == 0x36 (End Of Data) + * + * Further seeks might return a 'Volume Fenced'. + */ case TO_FSF: + case TO_FSB: /* Trying to seek beyond end of recorded area */ - return tape_34xx_erp_failed(device, request, -ENOSPC); + return tape_34xx_erp_failed(request, -ENOSPC); + case TO_BSB: + return tape_34xx_erp_retry(request); + + /* + * sense[0] == SENSE_DATA_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x36 (End Of Data) + */ case TO_LBL: /* Block could not be located. */ - return tape_34xx_erp_failed(device, request, -EIO); + tape_34xx_delete_sbid_from(device, 0); + return tape_34xx_erp_failed(request, -EIO); + case TO_RFO: /* Read beyond end of recorded area -> 0 bytes read */ - return tape_34xx_erp_failed(device, request, 0); + return tape_34xx_erp_failed(request, 0); + + /* + * sense[0] == SENSE_EQUIPMENT_CHECK && + * sense[1] == SENSE_DRIVE_ONLINE && + * sense[3] == 0x38 (Physical End Of Volume) + */ + case TO_WRI: + /* Writing at physical end of volume */ + return tape_34xx_erp_failed(request, -ENOSPC); default: PRINT_ERR("Invalid op in %s:%i\n", __FUNCTION__, __LINE__); - return tape_34xx_erp_failed(device, request, 0); + return tape_34xx_erp_failed(request, 0); } + } /* Sensing special bits */ if (sense[0] & SENSE_BUS_OUT_CHECK) - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); if (sense[0] & SENSE_DATA_CHECK) { /* @@ -353,7 +422,7 @@ tape_34xx_unit_check(struct tape_device /* data check is permanent, CU recovery has failed */ PRINT_WARN("Permanent read error\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x25: // a write data check occurred if ((sense[2] & SENSE_TAPE_SYNC_MODE) || @@ -366,22 +435,22 @@ tape_34xx_unit_check(struct tape_device // data check is permanent, cu-recovery has failed PRINT_WARN("Permanent write error\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x26: /* Data Check (read opposite) occurred. */ return tape_34xx_erp_read_opposite(device, request); case 0x28: /* ID-Mark at tape start couldn't be written */ PRINT_WARN("ID-Mark could not be written.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x31: /* Tape void. Tried to read beyond end of device. */ PRINT_WARN("Read beyond end of recorded area.\n"); - return tape_34xx_erp_failed(device, request, -ENOSPC); + return tape_34xx_erp_failed(request, -ENOSPC); case 0x41: /* Record sequence error. */ PRINT_WARN("Invalid block-id sequence found.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); default: /* all data checks for 3480 should result in one of * the above erpa-codes. For 3490, other data-check @@ -412,7 +481,7 @@ tape_34xx_unit_check(struct tape_device */ PRINT_WARN("Data streaming not operational. " "Switching to interlock-mode.\n"); - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x22: /* * Path equipment check. Might be drive adapter error, buffer @@ -424,14 +493,14 @@ tape_34xx_unit_check(struct tape_device PRINT_WARN("drive adapter error, buffer error on the lower " "interface, internal path not usable, error " "during cartridge load.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x24: /* * Load display check. Load display was command was issued, * but the drive is displaying a drive check message. Can * be threated as "device end". */ - return tape_34xx_erp_succeeded(device, request); + return tape_34xx_erp_succeeded(request); case 0x27: /* * Command reject. May indicate illegal channel program or @@ -439,7 +508,7 @@ tape_34xx_unit_check(struct tape_device * issued by this driver and ought be correct, we assume a * over/underrun situation and retry the channel program. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x29: /* * Function incompatible. Either the tape is idrc compressed @@ -447,13 +516,13 @@ tape_34xx_unit_check(struct tape_device * subsystem func is issued and the CU is not on-line. */ PRINT_WARN ("Function incompatible. Try to switch off idrc\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x2a: /* * Unsolicited environmental data. An internal counter * overflows, we can ignore this and reissue the cmd. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x2b: /* * Environmental data present. Indicates either unload @@ -462,7 +531,7 @@ tape_34xx_unit_check(struct tape_device if (request->op == TO_RUN) { /* Rewind unload completed ok. */ tape_med_state_set(device, MS_UNLOADED); - return tape_34xx_erp_succeeded(device, request); + return tape_34xx_erp_succeeded(request); } /* tape_34xx doesn't use read buffered log commands. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); @@ -471,11 +540,11 @@ tape_34xx_unit_check(struct tape_device * Permanent equipment check. CU has tried recovery, but * did not succeed. */ - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x2d: /* Data security erase failure. */ if (request->op == TO_DSE) - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); /* Data security erase failure, but no such command issued. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x2e: @@ -484,16 +553,16 @@ tape_34xx_unit_check(struct tape_device * reading the format id mark or that that format specified * is not supported by the drive. */ - PRINT_WARN("Drive not capable processing the tape format!"); - return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + PRINT_WARN("Drive not capable processing the tape format!\n"); + return tape_34xx_erp_failed(request, -EMEDIUMTYPE); case 0x30: /* The medium is write protected. */ PRINT_WARN("Medium is write protected!\n"); - return tape_34xx_erp_failed(device, request, -EACCES); + return tape_34xx_erp_failed(request, -EACCES); case 0x32: // Tension loss. We cannot recover this, it's an I/O error. PRINT_WARN("The drive lost tape tension.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x33: /* * Load Failure. The cartridge was not inserted correctly or @@ -501,7 +570,8 @@ tape_34xx_unit_check(struct tape_device */ PRINT_WARN("Cartridge load failure. Reload the cartridge " "and try again.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + tape_34xx_delete_sbid_from(device, 0); + return tape_34xx_erp_failed(request, -EIO); case 0x34: /* * Unload failure. The drive cannot maintain tape tension @@ -510,7 +580,7 @@ tape_34xx_unit_check(struct tape_device PRINT_WARN("Failure during cartridge unload. " "Please try manually.\n"); if (request->op == TO_RUN) - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x35: /* @@ -522,11 +592,11 @@ tape_34xx_unit_check(struct tape_device */ PRINT_WARN("Equipment check! Please check the drive and " "the cartridge loader.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x36: if (device->cdev->id.driver_info == tape_3490) /* End of data. */ - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); /* This erpa is reserved for 3480 */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x37: @@ -535,7 +605,7 @@ tape_34xx_unit_check(struct tape_device * the beginning-of-tape data. */ PRINT_WARN("Tape length error.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x38: /* * Physical end of tape. A read/write operation reached @@ -544,55 +614,58 @@ tape_34xx_unit_check(struct tape_device if (request->op==TO_WRI || request->op==TO_DSE || request->op==TO_WTM) - return tape_34xx_erp_failed(device, request, -ENOSPC); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -ENOSPC); + return tape_34xx_erp_failed(request, -EIO); case 0x39: /* Backward at Beginning of tape. */ - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x3a: /* Drive switched to not ready. */ PRINT_WARN("Drive not ready. Turn the ready/not ready switch " "to ready position and try again.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x3b: /* Manual rewind or unload. This causes an I/O error. */ PRINT_WARN("Medium was rewound or unloaded manually.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + tape_34xx_delete_sbid_from(device, 0); + return tape_34xx_erp_failed(request, -EIO); case 0x42: /* * Degraded mode. A condition that can cause degraded * performance is detected. */ PRINT_WARN("Subsystem is running in degraded mode.\n"); - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x43: /* Drive not ready. */ + tape_34xx_delete_sbid_from(device, 0); tape_med_state_set(device, MS_UNLOADED); /* Some commands commands are successful even in this case */ - if(sense[1] & SENSE_DRIVE_ONLINE) { + if (sense[1] & SENSE_DRIVE_ONLINE) { switch(request->op) { case TO_ASSIGN: case TO_UNASSIGN: case TO_DIS: - return tape_34xx_done(device, request); + case TO_NOP: + return tape_34xx_done(request); break; default: break; } } PRINT_WARN("The drive is not ready.\n"); - return tape_34xx_erp_failed(device, request, -ENOMEDIUM); + return tape_34xx_erp_failed(request, -ENOMEDIUM); case 0x44: /* Locate Block unsuccessful. */ if (request->op != TO_BLOCK && request->op != TO_LBL) /* No locate block was issued. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x45: /* The drive is assigned to a different channel path. */ PRINT_WARN("The drive is assigned elsewhere.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x46: /* * Drive not on-line. Drive may be switched offline, @@ -600,23 +673,24 @@ tape_34xx_unit_check(struct tape_device * the drive address may not be set correctly. */ PRINT_WARN("The drive is not on-line."); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x47: /* Volume fenced. CU reports volume integrity is lost. */ PRINT_WARN("Volume fenced. The volume integrity is lost.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + tape_34xx_delete_sbid_from(device, 0); + return tape_34xx_erp_failed(request, -EIO); case 0x48: /* Log sense data and retry request. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x49: /* Bus out check. A parity check error on the bus was found. */ PRINT_WARN("Bus out check. A data transfer over the bus " "has been corrupted.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x4a: /* Control unit erp failed. */ PRINT_WARN("The control unit I/O error recovery failed.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x4b: /* * CU and drive incompatible. The drive requests micro-program @@ -624,13 +698,13 @@ tape_34xx_unit_check(struct tape_device */ PRINT_WARN("The drive needs microprogram patches from the " "control unit, which are not available.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x4c: /* * Recovered Check-One failure. Cu develops a hardware error, * but is able to recover. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x4d: if (device->cdev->id.driver_info == tape_3490) /* @@ -638,7 +712,7 @@ tape_34xx_unit_check(struct tape_device * not support resetting event recovery (which has to * be handled by the I/O Layer), retry our command. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); /* This erpa is reserved for 3480. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x4e: @@ -650,7 +724,7 @@ tape_34xx_unit_check(struct tape_device */ PRINT_WARN("Maximum block size for buffered " "mode exceeded.\n"); - return tape_34xx_erp_failed(device, request, -ENOBUFS); + return tape_34xx_erp_failed(request, -ENOBUFS); } /* This erpa is reserved for 3480. */ return tape_34xx_erp_bug(device, request, irb, sense[3]); @@ -661,7 +735,7 @@ tape_34xx_unit_check(struct tape_device * never happen, since we're never running in extended * buffered log mode. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x51: /* * Read buffered log (EOV). EOF processing occurs while the @@ -669,73 +743,83 @@ tape_34xx_unit_check(struct tape_device * happen, since we're never running in extended buffered * log mode. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x52: /* End of Volume complete. Rewind unload completed ok. */ if (request->op == TO_RUN) { tape_med_state_set(device, MS_UNLOADED); - return tape_34xx_erp_succeeded(device, request); + tape_34xx_delete_sbid_from(device, 0); + return tape_34xx_erp_succeeded(request); } return tape_34xx_erp_bug(device, request, irb, sense[3]); case 0x53: /* Global command intercept. */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x54: /* Channel interface recovery (temporary). */ - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); case 0x55: /* Channel interface recovery (permanent). */ PRINT_WARN("A permanent channel interface error occurred.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x56: /* Channel protocol error. */ PRINT_WARN("A channel protocol error occurred.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_failed(request, -EIO); case 0x57: if (device->cdev->id.driver_info == tape_3480) { /* Attention intercept. */ PRINT_WARN("An attention intercept occurred, " "which will be recovered.\n"); - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); } else { /* Global status intercept. */ PRINT_WARN("An global status intercept was received, " "which will be recovered.\n"); - return tape_34xx_erp_retry(device, request); + return tape_34xx_erp_retry(request); } case 0x5a: /* * Tape length incompatible. The tape inserted is too long, * which could cause damage to the tape or the drive. */ - PRINT_WARN("Tape length incompatible [should be IBM Cartridge " - "System Tape]. May cause damage to drive or tape.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + PRINT_WARN("Tape Length Incompatible\n"); + PRINT_WARN("Tape length exceeds IBM enhanced capacity " + "cartdridge length or a medium\n"); + PRINT_WARN("with EC-CST identification mark has been mounted " + "in a device that writes\n"); + PRINT_WARN("3480 or 3480 XF format.\n"); + return tape_34xx_erp_failed(request, -EIO); case 0x5b: /* Format 3480 XF incompatible */ if (sense[1] & SENSE_BEGINNING_OF_TAPE) /* The tape will get overwritten. */ - return tape_34xx_erp_retry(device, request); - PRINT_WARN("Tape format is incompatible to the drive, " - "which writes 3480-2 XF.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + return tape_34xx_erp_retry(request); + PRINT_WARN("Format 3480 XF Incompatible\n"); + PRINT_WARN("Medium has been created in 3480 format. " + "To change the format writes\n"); + PRINT_WARN("must be issued at BOT.\n"); + return tape_34xx_erp_failed(request, -EIO); case 0x5c: /* Format 3480-2 XF incompatible */ - PRINT_WARN("Tape format is incompatible to the drive. " - "The drive cannot access 3480-2 XF volumes.\n"); - return tape_34xx_erp_failed(device, request, -EIO); + PRINT_WARN("Format 3480-2 XF Incompatible\n"); + PRINT_WARN("Device can only read 3480 or 3480 XF format.\n"); + return tape_34xx_erp_failed(request, -EIO); case 0x5d: /* Tape length violation. */ - PRINT_WARN("Tape length violation [should be IBM Enhanced " - "Capacity Cartridge System Tape]. May cause " - "damage to drive or tape.\n"); - return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + PRINT_WARN("Tape Length Violation\n"); + PRINT_WARN("The mounted tape exceeds IBM Enhanced Capacity " + "Cartdridge System Tape length.\n"); + PRINT_WARN("This may cause damage to the drive or tape when " + "processing to the EOV\n"); + return tape_34xx_erp_failed(request, -EMEDIUMTYPE); case 0x5e: /* Compaction algorithm incompatible. */ + PRINT_WARN("Compaction Algorithm Incompatible\n"); PRINT_WARN("The volume is recorded using an incompatible " - "compaction algorithm, which is not supported by " - "the control unit.\n"); - return tape_34xx_erp_failed(device, request, -EMEDIUMTYPE); + "compaction algorithm,\n"); + PRINT_WARN("which is not supported by the device.\n"); + return tape_34xx_erp_failed(request, -EMEDIUMTYPE); /* The following erpas should have been covered earlier. */ case 0x23: /* Read data check. */ @@ -766,14 +850,24 @@ tape_34xx_irq(struct tape_device *device (request->op == TO_WRI)) { /* Write at end of volume */ PRINT_INFO("End of volume\n"); /* XXX */ - return tape_34xx_erp_failed(device, request, -ENOSPC); + return tape_34xx_erp_failed(request, -ENOSPC); } if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) return tape_34xx_unit_check(device, request, irb); - if (irb->scsw.dstat & DEV_STAT_DEV_END) - return tape_34xx_done(device, request); + if (irb->scsw.dstat & DEV_STAT_DEV_END) { + /* + * A unit exception occurs on skipping over a tapemark block. + */ + if (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { + if (request->op == TO_BSB || request->op == TO_FSB) + request->rescnt++; + else + DBF_EVENT(5, "Unit Exception!\n"); + } + return tape_34xx_done(request); + } DBF_EVENT(6, "xunknownirq\n"); PRINT_ERR("Unexpected interrupt.\n"); @@ -786,45 +880,217 @@ tape_34xx_irq(struct tape_device *device * ioctl_overload */ static int -tape_34xx_ioctl(struct tape_device *device, - unsigned int cmd, unsigned long arg) +tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) { - if (cmd == TAPE390_DISPLAY) - return tape_std_display(device, cmd, arg); - else + if (cmd == TAPE390_DISPLAY) { + struct display_struct disp; + + if (copy_from_user(&disp, (char *) arg, sizeof(disp)) != 0) + return -EFAULT; + + return tape_std_display(device, &disp); + } else return -EINVAL; } +static inline void +tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l) +{ + struct tape_34xx_sbid * new_sbid; + + new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC); + if (!new_sbid) + return; + + new_sbid->bid = bid; + list_add(&new_sbid->list, l); +} + +/* + * Build up the search block ID list. The block ID consists of a logical + * block number and a hardware specific part. The hardware specific part + * helps the tape drive to speed up searching for a specific block. + */ +static void +tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid) +{ + struct list_head * sbid_list; + struct tape_34xx_sbid * sbid; + struct list_head * l; + + /* + * immediately return if there is no list at all or the block to add + * is located in segment 1 of wrap 0 because this position is used + * if no hardware position data is supplied. + */ + sbid_list = (struct list_head *) device->discdata; + if (!sbid_list || (bid.segment < 2 && bid.wrap == 0)) + return; + + /* + * Search the position where to insert the new entry. Hardware + * acceleration uses only the segment and wrap number. So we + * need only one entry for a specific wrap/segment combination. + * If there is a block with a lower number but the same hard- + * ware position data we just update the block number in the + * existing entry. + */ + list_for_each(l, sbid_list) { + sbid = list_entry(l, struct tape_34xx_sbid, list); + + if ( + (sbid->bid.segment == bid.segment) && + (sbid->bid.wrap == bid.wrap) + ) { + if (bid.block < sbid->bid.block) + sbid->bid = bid; + else return; + break; + } + + /* Sort in according to logical block number. */ + if (bid.block < sbid->bid.block) { + tape_34xx_append_new_sbid(bid, l->prev); + break; + } + } + /* List empty or new block bigger than last entry. */ + if (l == sbid_list) + tape_34xx_append_new_sbid(bid, l->prev); + + DBF_LH(4, "Current list is:\n"); + list_for_each(l, sbid_list) { + sbid = list_entry(l, struct tape_34xx_sbid, list); + DBF_LH(4, "%d:%03d@%05d\n", + sbid->bid.wrap, + sbid->bid.segment, + sbid->bid.block + ); + } +} + +/* + * Delete all entries from the search block ID list that belong to tape blocks + * equal or higher than the given number. + */ +static void +tape_34xx_delete_sbid_from(struct tape_device *device, int from) +{ + struct list_head * sbid_list; + struct tape_34xx_sbid * sbid; + struct list_head * l; + struct list_head * n; + + sbid_list = (struct list_head *) device->discdata; + if (!sbid_list) + return; + + list_for_each_safe(l, n, sbid_list) { + sbid = list_entry(l, struct tape_34xx_sbid, list); + if (sbid->bid.block >= from) { + DBF_LH(4, "Delete sbid %d:%03d@%05d\n", + sbid->bid.wrap, + sbid->bid.segment, + sbid->bid.block + ); + list_del(l); + kfree(sbid); + } + } +} + +/* + * Merge hardware position data into a block id. + */ +static void +tape_34xx_merge_sbid( + struct tape_device * device, + struct tape_34xx_block_id * bid +) { + struct tape_34xx_sbid * sbid; + struct tape_34xx_sbid * sbid_to_use; + struct list_head * sbid_list; + struct list_head * l; + + sbid_list = (struct list_head *) device->discdata; + bid->wrap = 0; + bid->segment = 1; + + if (!sbid_list || list_empty(sbid_list)) + return; + + sbid_to_use = NULL; + list_for_each(l, sbid_list) { + sbid = list_entry(l, struct tape_34xx_sbid, list); + + if (sbid->bid.block >= bid->block) + break; + sbid_to_use = sbid; + } + if (sbid_to_use) { + bid->wrap = sbid_to_use->bid.wrap; + bid->segment = sbid_to_use->bid.segment; + DBF_LH(4, "Use %d:%03d@%05d for %05d\n", + sbid_to_use->bid.wrap, + sbid_to_use->bid.segment, + sbid_to_use->bid.block, + bid->block + ); + } +} + static int tape_34xx_setup_device(struct tape_device * device) { - DBF_EVENT(6, "34xx minor1: %x\n", device->first_minor); - tape_34xx_medium_sense(device); - return 0; + int rc; + struct list_head * discdata; + + DBF_EVENT(6, "34xx device setup\n"); + if ((rc = tape_std_assign(device)) == 0) { + if ((rc = tape_34xx_medium_sense(device)) != 0) { + DBF_LH(3, "34xx medium sense returned %d\n", rc); + } + } + discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (discdata) { + INIT_LIST_HEAD(discdata); + device->discdata = discdata; + } + + return rc; } static void -tape_34xx_cleanup_device(struct tape_device * device) +tape_34xx_cleanup_device(struct tape_device *device) { + tape_std_unassign(device); + if (device->discdata) { + tape_34xx_delete_sbid_from(device, 0); kfree(device->discdata); device->discdata = NULL; } } + /* * MTTELL: Tell block. Return the number of block relative to current file. */ static int tape_34xx_mttell(struct tape_device *device, int mt_count) { - __u64 block_id; + struct { + struct tape_34xx_block_id cbid; + struct tape_34xx_block_id dbid; + } __attribute__ ((packed)) block_id; int rc; - rc = tape_std_read_block_id(device, &block_id); + rc = tape_std_read_block_id(device, (__u64 *) &block_id); if (rc) return rc; - return (block_id >> 32) & 0x3fffff; + + tape_34xx_add_sbid(device, block_id.cbid); + return block_id.cbid.block; } /* @@ -834,21 +1100,28 @@ static int tape_34xx_mtseek(struct tape_device *device, int mt_count) { struct tape_request *request; + struct tape_34xx_block_id * bid; - if (mt_count > 0x400000) { + if (mt_count > 0x3fffff) { DBF_EXCEPTION(6, "xsee parm\n"); return -EINVAL; } request = tape_alloc_request(3, 4); if (IS_ERR(request)) return PTR_ERR(request); - request->op = TO_LBL; + /* setup ccws */ + request->op = TO_LBL; + bid = (struct tape_34xx_block_id *) request->cpdata; + bid->format = (*device->modeset_byte & 0x08) ? + TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480; + bid->block = mt_count; + tape_34xx_merge_sbid(device, bid); + tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - *(__u32 *) request->cpdata = mt_count | - ((*device->modeset_byte & 0x08) ? 0x01800000 : 0x01000000); tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -862,15 +1135,14 @@ tape_34xx_bread(struct tape_device *devi { struct tape_request *request; struct ccw1 *ccw; - int count = 0,start_block,i; + int count = 0, i; unsigned off; char *dst; struct bio_vec *bv; struct bio *bio; + struct tape_34xx_block_id * start_block; DBF_EVENT(6, "xBREDid:"); - start_block = req->sector >> TAPEBLOCK_HSEC_S2B; - DBF_EVENT(6, "start_block = %i\n", start_block); /* Count the number of blocks for the request. */ rq_for_each_bio(bio, req) { @@ -878,14 +1150,18 @@ tape_34xx_bread(struct tape_device *devi count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); } } + /* Allocate the ccw request. */ - request = tape_alloc_request(2+count+1, 4); + request = tape_alloc_request(3+count+1, 8); if (IS_ERR(request)) return request; + /* Setup ccws. */ request->op = TO_BLOCK; - *(__u32 *) request->cpdata = (start_block & 0x3fffff) | - ((*device->modeset_byte & 0x08) ? 0x81000000 : 0x01000000); + start_block = (struct tape_34xx_block_id *) request->cpdata; + start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; + DBF_EVENT(6, "start_block = %i\n", start_block->block); + ccw = request->cpaddr; ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); @@ -893,8 +1169,12 @@ tape_34xx_bread(struct tape_device *devi * We always setup a nop after the mode set ccw. This slot is * used in tape_std_check_locate to insert a locate ccw if the * current tape position doesn't match the start block to be read. + * The second nop will be filled with a read block id which is in + * turn used by tape_34xx_free_bread to populate the segment bid + * table. */ ccw = tape_ccw_cc(ccw, NOP, 0, NULL); + ccw = tape_ccw_cc(ccw, NOP, 0, NULL); rq_for_each_bio(bio, req) { bio_for_each_segment(bv, bio, i) { @@ -921,8 +1201,21 @@ tape_34xx_free_bread (struct tape_reques { struct ccw1* ccw; + ccw = request->cpaddr; + if ((ccw + 2)->cmd_code == READ_BLOCK_ID) { + struct { + struct tape_34xx_block_id cbid; + struct tape_34xx_block_id dbid; + } __attribute__ ((packed)) *rbi_data; + + rbi_data = request->cpdata; + + if (request->device) + tape_34xx_add_sbid(request->device, rbi_data->cbid); + } + /* Last ccw is a nop and doesn't need clear_normalized_cda */ - for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) + for (; ccw->flags & CCW_FLAG_CC; ccw++) if (ccw->cmd_code == READ_FORWARD) clear_normalized_cda(ccw); tape_free_request(request); @@ -935,14 +1228,24 @@ tape_34xx_free_bread (struct tape_reques * start block for the request. */ static void -tape_34xx_check_locate (struct tape_device *device, - struct tape_request *request) +tape_34xx_check_locate(struct tape_device *device, struct tape_request *request) { - int start_block; + struct tape_34xx_block_id * start_block; - start_block = *(__u32 *) request->cpdata & 0x3fffff; - if (start_block != device->blk_data.block_position) - tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + start_block = (struct tape_34xx_block_id *) request->cpdata; + if (start_block->block == device->blk_data.block_position) + return; + + DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof); + start_block->wrap = 0; + start_block->segment = 1; + start_block->format = (*device->modeset_byte & 0x08) ? + TAPE34XX_FMT_3480_XF : + TAPE34XX_FMT_3480; + start_block->block = start_block->block + device->bof; + tape_34xx_merge_sbid(device, start_block); + tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); + tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata); } #endif @@ -995,8 +1298,6 @@ static struct tape_discipline tape_disci .irq = tape_34xx_irq, .read_block = tape_std_read_block, .write_block = tape_std_write_block, - .assign = tape_std_assign, - .unassign = tape_std_unassign, #ifdef CONFIG_S390_TAPE_BLOCK .bread = tape_34xx_bread, .free_bread = tape_34xx_free_bread, @@ -1041,7 +1342,7 @@ tape_34xx_init (void) { int rc; - DBF_EVENT(3, "34xx init: $Revision: 1.8 $\n"); + DBF_EVENT(3, "34xx init: $Revision: 1.18 $\n"); /* Register driver for 3480/3490 tapes. */ rc = ccw_driver_register(&tape_34xx_driver); if (rc) @@ -1060,7 +1361,7 @@ tape_34xx_exit(void) MODULE_DEVICE_TABLE(ccw, tape_34xx_ids); MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH"); MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape " - "device driver ($Revision: 1.8 $)"); + "device driver ($Revision: 1.18 $)"); MODULE_LICENSE("GPL"); module_init(tape_34xx_init); diff -puN drivers/s390/char/tape_block.c~s390-05-tape-driver drivers/s390/char/tape_block.c --- 25/drivers/s390/char/tape_block.c~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_block.c Thu Jan 8 14:11:31 2004 @@ -3,12 +3,14 @@ * block device frontend for tape device driver * * S390 and zSeries version - * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Carsten Otte * Tuan Ngo-Anh * Martin Schwidefsky + * Stefan Bader */ +#include #include #include #include @@ -19,25 +21,53 @@ #include "tape.h" -#define PRINTK_HEADER "TBLOCK:" +#define PRINTK_HEADER "TAPE_BLOCK: " #define TAPEBLOCK_MAX_SEC 100 #define TAPEBLOCK_MIN_REQUEUE 3 /* + * 2003/11/25 Stefan Bader + * + * In 2.5/2.6 the block device request function is very likely to be called + * with disabled interrupts (e.g. generic_unplug_device). So the driver can't + * just call any function that tries to allocate CCW requests from that con- + * text since it might sleep. There are two choices to work around this: + * a) do not allocate with kmalloc but use its own memory pool + * b) take requests from the queue outside that context, knowing that + * allocation might sleep + */ + +/* * file operation structure for tape block frontend */ static int tapeblock_open(struct inode *, struct file *); static int tapeblock_release(struct inode *, struct file *); +static int tapeblock_ioctl(struct inode *, struct file *, unsigned int, + unsigned long); +static int tapeblock_medium_changed(struct gendisk *); +static int tapeblock_revalidate_disk(struct gendisk *); static struct block_device_operations tapeblock_fops = { - .owner = THIS_MODULE, - .open = tapeblock_open, - .release = tapeblock_release, + .owner = THIS_MODULE, + .open = tapeblock_open, + .release = tapeblock_release, + .ioctl = tapeblock_ioctl, + .media_changed = tapeblock_medium_changed, + .revalidate_disk = tapeblock_revalidate_disk, }; static int tapeblock_major = 0; +static void +tapeblock_trigger_requeue(struct tape_device *device) +{ + /* Protect against rescheduling. */ + if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) + return; + schedule_work(&device->blk_data.requeue_task); +} + /* * Post finished request. */ @@ -55,6 +85,8 @@ __tapeblock_end_request(struct tape_requ struct tape_device *device; struct request *req; + DBF_LH(6, "__tapeblock_end_request()\n"); + device = ccw_req->device; req = (struct request *) data; tapeblock_end_request(req, ccw_req->rc == 0); @@ -68,33 +100,75 @@ __tapeblock_end_request(struct tape_requ device->discipline->free_bread(ccw_req); if (!list_empty(&device->req_queue) || elv_next_request(device->blk_data.request_queue)) - tasklet_schedule(&device->blk_data.tasklet); + tapeblock_trigger_requeue(device); } /* - * Fetch requests from block device queue. + * Feed the tape device CCW queue with requests supplied in a list. */ -static inline void -__tape_process_blk_queue(struct tape_device *device, struct list_head *new_req) +static inline int +tapeblock_start_request(struct tape_device *device, struct request *req) { - request_queue_t *queue; - struct list_head *l; - struct request *req; - struct tape_request *ccw_req; - int nr_queued; + struct tape_request * ccw_req; + int rc; + + DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req); + + ccw_req = device->discipline->bread(device, req); + if (IS_ERR(ccw_req)) { + DBF_EVENT(1, "TBLOCK: bread failed\n"); + tapeblock_end_request(req, 0); + return PTR_ERR(ccw_req); + } + ccw_req->callback = __tapeblock_end_request; + ccw_req->callback_data = (void *) req; + ccw_req->retries = TAPEBLOCK_RETRIES; + + rc = tape_do_io_async(device, ccw_req); + if (rc) { + /* + * Start/enqueueing failed. No retries in + * this case. + */ + tapeblock_end_request(req, 0); + device->discipline->free_bread(ccw_req); + } - /* FIXME: we have to make sure that the tapeblock frontend - owns the device. tape_state != TS_IN_USE is NOT enough. */ - if (device->tape_state != TS_IN_USE) + return rc; +} + +/* + * Move requests from the block device request queue to the tape device ccw + * queue. + */ +static void +tapeblock_requeue(void *data) { + struct tape_device * device; + request_queue_t * queue; + int nr_queued; + struct request * req; + struct list_head * l; + int rc; + + device = (struct tape_device *) data; + if (!device) return; - queue = device->blk_data.request_queue; - nr_queued = 0; + + spin_lock_irq(get_ccwdev_lock(device->cdev)); + queue = device->blk_data.request_queue; + /* Count number of requests on ccw queue. */ + nr_queued = 0; list_for_each(l, &device->req_queue) nr_queued++; - while (!blk_queue_plugged(queue) && - elv_next_request(queue) && - nr_queued < TAPEBLOCK_MIN_REQUEUE) { + spin_unlock(get_ccwdev_lock(device->cdev)); + + spin_lock(&device->blk_data.request_queue_lock); + while ( + !blk_queue_plugged(queue) && + elv_next_request(queue) && + nr_queued < TAPEBLOCK_MIN_REQUEUE + ) { req = elv_next_request(queue); if (rq_data_dir(req) == WRITE) { DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); @@ -102,52 +176,14 @@ __tape_process_blk_queue(struct tape_dev tapeblock_end_request(req, 0); continue; } - ccw_req = device->discipline->bread(device, req); - if (IS_ERR(ccw_req)) { - if (PTR_ERR(ccw_req) == -ENOMEM) - break; /* don't try again */ - DBF_EVENT(1, "TBLOCK: bread failed\n"); - blkdev_dequeue_request(req); - tapeblock_end_request(req, 0); - continue; - } - ccw_req->callback = __tapeblock_end_request; - ccw_req->callback_data = (void *) req; - ccw_req->retries = TAPEBLOCK_RETRIES; + spin_unlock_irq(&device->blk_data.request_queue_lock); + rc = tapeblock_start_request(device, req); + spin_lock_irq(&device->blk_data.request_queue_lock); blkdev_dequeue_request(req); - list_add_tail(new_req, &ccw_req->list); nr_queued++; } -} - -/* - * Feed requests to the tape device. - */ -static inline int -tape_queue_requests(struct tape_device *device, struct list_head *new_req) -{ - struct list_head *l, *n; - struct tape_request *ccw_req; - struct request *req; - int rc, fail; - - fail = 0; - list_for_each_safe(l, n, new_req) { - ccw_req = list_entry(l, struct tape_request, list); - list_del(&ccw_req->list); - rc = tape_do_io_async(device, ccw_req); - if (rc) { - /* - * Start/enqueueing failed. No retries in - * this case. - */ - req = (struct request *) ccw_req->callback_data; - tapeblock_end_request(req, 0); - device->discipline->free_bread(ccw_req); - fail = 1; - } - } - return fail; + spin_unlock_irq(&device->blk_data.request_queue_lock); + atomic_set(&device->blk_data.requeue_scheduled, 0); } /* @@ -156,48 +192,14 @@ tape_queue_requests(struct tape_device * static void tapeblock_request_fn(request_queue_t *queue) { - struct list_head new_req; struct tape_device *device; device = (struct tape_device *) queue->queuedata; - while (elv_next_request(queue)) { - INIT_LIST_HEAD(&new_req); - spin_lock(get_ccwdev_lock(device->cdev)); - __tape_process_blk_queue(device, &new_req); - spin_unlock(get_ccwdev_lock(device->cdev)); - /* - * Now queue the new request to the tape. This needs to be - * done without the device lock held. - */ - if (tape_queue_requests(device, &new_req) == 0) - /* All requests queued. Thats enough for now. */ - break; - } -} - -/* - * Acquire the device lock and process queues for the device. - */ -static void -tapeblock_tasklet(unsigned long data) -{ - struct list_head new_req; - struct tape_device *device; + DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); + if (device == NULL) + BUG(); - device = (struct tape_device *) data; - while (elv_next_request(device->blk_data.request_queue)) { - INIT_LIST_HEAD(&new_req); - spin_lock_irq(get_ccwdev_lock(device->cdev)); - __tape_process_blk_queue(device, &new_req); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - /* - * Now queue the new request to the tape. This needs to be - * done without the device lock held. - */ - if (tape_queue_requests(device, &new_req) == 0) - /* All requests queued. Thats enough for now. */ - break; - } + tapeblock_trigger_requeue(device); } /* @@ -206,126 +208,192 @@ tapeblock_tasklet(unsigned long data) int tapeblock_setup_device(struct tape_device * device) { - struct tape_blk_data *d = &device->blk_data; - request_queue_t *q; - struct gendisk *disk = alloc_disk(1); - int rc; - - if (!disk) + struct tape_blk_data * blkdat; + struct gendisk * disk; + int rc; + + blkdat = &device->blk_data; + spin_lock_init(&blkdat->request_queue_lock); + atomic_set(&blkdat->requeue_scheduled, 0); + + blkdat->request_queue = blk_init_queue( + tapeblock_request_fn, + &blkdat->request_queue_lock + ); + if (!blkdat->request_queue) return -ENOMEM; - tasklet_init(&d->tasklet, tapeblock_tasklet, (unsigned long)device); - - spin_lock_init(&d->request_queue_lock); - q = blk_init_queue(tapeblock_request_fn, &d->request_queue_lock); - if (!q) { - rc = -ENXIO; - goto put_disk; - } - d->request_queue = q; - elevator_exit(q); - rc = elevator_init(q, &elevator_noop); + elevator_exit(blkdat->request_queue); + rc = elevator_init(blkdat->request_queue, &elevator_noop); if (rc) goto cleanup_queue; - /* FIXME: We should be able to sense the sectore size */ - blk_queue_hardsect_size(q, TAPEBLOCK_HSEC_SIZE); - blk_queue_max_sectors(q, TAPEBLOCK_MAX_SEC); - blk_queue_max_phys_segments(q, -1L); - blk_queue_max_hw_segments(q, -1L); - blk_queue_max_segment_size(q, -1L); - blk_queue_segment_boundary(q, -1L); + blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); + blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); + blk_queue_max_phys_segments(blkdat->request_queue, -1L); + blk_queue_max_hw_segments(blkdat->request_queue, -1L); + blk_queue_max_segment_size(blkdat->request_queue, -1L); + blk_queue_segment_boundary(blkdat->request_queue, -1L); + + disk = alloc_disk(1); + if (!disk) { + rc = -ENOMEM; + goto cleanup_queue; + } disk->major = tapeblock_major; disk->first_minor = device->first_minor; disk->fops = &tapeblock_fops; - disk->private_data = device; - disk->queue = q; - //set_capacity(disk, size); - - sprintf(disk->disk_name, "tBLK/%d", device->first_minor / TAPE_MINORS_PER_DEV); + disk->private_data = tape_get_device_reference(device); + disk->queue = blkdat->request_queue; + set_capacity(disk, 0); + sprintf(disk->disk_name, "btibm%d", + device->first_minor / TAPE_MINORS_PER_DEV); + + blkdat->disk = disk; + blkdat->medium_changed = 1; + blkdat->request_queue->queuedata = tape_get_device_reference(device); add_disk(disk); - d->disk = disk; + + INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, + tape_get_device_reference(device)); + + /* Will vanish */ + tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_ADD); + return 0; - cleanup_queue: - blk_cleanup_queue(q); - put_disk: - put_disk(disk); +cleanup_queue: + blk_cleanup_queue(blkdat->request_queue); + blkdat->request_queue = NULL; + return rc; } void tapeblock_cleanup_device(struct tape_device *device) { - struct tape_blk_data *d = &device->blk_data; + tape_hotplug_event(device, tapeblock_major, TAPE_HOTPLUG_BLOCK_REMOVE); - del_gendisk(d->disk); - put_disk(d->disk); - blk_cleanup_queue(d->request_queue); + flush_scheduled_work(); + device->blk_data.requeue_task.data = tape_put_device(device); - tasklet_kill(&d->tasklet); + del_gendisk(device->blk_data.disk); + device->blk_data.disk->private_data = + tape_put_device(device->blk_data.disk->private_data); + put_disk(device->blk_data.disk); + + device->blk_data.disk = NULL; + device->blk_data.request_queue->queuedata = tape_put_device(device); + + blk_cleanup_queue(device->blk_data.request_queue); + device->blk_data.request_queue = NULL; } /* * Detect number of blocks of the tape. * FIXME: can we extent this to detect the blocks size as well ? */ -static int tapeblock_mediumdetect(struct tape_device *device) +static int +tapeblock_revalidate_disk(struct gendisk *disk) { - unsigned int nr_of_blks; - int rc; + struct tape_device * device; + unsigned int nr_of_blks; + int rc; + + device = (struct tape_device *) disk->private_data; + if (!device) + BUG(); + + if (!device->blk_data.medium_changed) + return 0; PRINT_INFO("Detecting media size...\n"); - rc = tape_mtop(device, MTREW, 1); - if (rc) - return rc; - rc = tape_mtop(device, MTFSF, 1); + rc = tape_mtop(device, MTFSFM, 1); if (rc) return rc; + rc = tape_mtop(device, MTTELL, 1); - if (rc) + if (rc < 0) return rc; - nr_of_blks = rc - 1; /* don't count FM */ - rc = tape_mtop(device, MTREW, 1); - if (rc) + + DBF_LH(3, "Image file ends at %d\n", rc); + nr_of_blks = rc; + + /* This will fail for the first file. Catch the error by checking the + * position. */ + tape_mtop(device, MTBSF, 1); + + rc = tape_mtop(device, MTTELL, 1); + if (rc < 0) return rc; + + if (rc > nr_of_blks) + return -EINVAL; + + DBF_LH(3, "Image file starts at %d\n", rc); + device->bof = rc; + nr_of_blks -= rc; + PRINT_INFO("Found %i blocks on media\n", nr_of_blks); + set_capacity(device->blk_data.disk, + nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); + + device->blk_data.block_position = 0; + device->blk_data.medium_changed = 0; return 0; } +static int +tapeblock_medium_changed(struct gendisk *disk) +{ + struct tape_device *device; + + device = (struct tape_device *) disk->private_data; + DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n", + device, device->blk_data.medium_changed); + + return device->blk_data.medium_changed; +} + /* * Block frontend tape device open function. */ static int tapeblock_open(struct inode *inode, struct file *filp) { - struct gendisk *disk = inode->i_bdev->bd_disk; - struct tape_device *device = disk->private_data; - int rc; + struct gendisk * disk; + struct tape_device * device; + int rc; + + disk = inode->i_bdev->bd_disk; + device = tape_get_device_reference(disk->private_data); + + if (device->required_tapemarks) { + DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); + PRINT_ERR("TBLOCK: Refusing to open tape with missing" + " end of file marks.\n"); + rc = -EPERM; + goto put_device; + } - /* - * FIXME: this new tapeblock_open function is from 2.5.69. - * It doesn't do tape_get_device anymore but picks the device - * pointer from disk->private_data. It is stored in - * tapeblock_setup_device but WITHOUT proper ref-counting. - */ rc = tape_open(device); if (rc) goto put_device; - rc = tape_assign(device); + + rc = tapeblock_revalidate_disk(disk); if (rc) goto release; - device->blk_data.block_position = -1; - rc = tapeblock_mediumdetect(device); - if (rc) - goto unassign; + + /* + * Note: The reference to is hold until the release function + * is called. + */ + tape_state_set(device, TS_BLKUSE); return 0; - unassign: - tape_unassign(device); - release: +release: tape_release(device); put_device: tape_put_device(device); @@ -334,6 +402,9 @@ tapeblock_open(struct inode *inode, stru /* * Block frontend tape device release function. + * + * Note: One reference to the tape device was made by the open function. So + * we just get the pointer here and release the reference. */ static int tapeblock_release(struct inode *inode, struct file *filp) @@ -341,14 +412,54 @@ tapeblock_release(struct inode *inode, s struct gendisk *disk = inode->i_bdev->bd_disk; struct tape_device *device = disk->private_data; + tape_state_set(device, TS_IN_USE); tape_release(device); - tape_unassign(device); tape_put_device(device); return 0; } /* + * Support of some generic block device IOCTLs. + */ +static int +tapeblock_ioctl( + struct inode * inode, + struct file * file, + unsigned int command, + unsigned long arg +) { + int rc; + int minor; + struct gendisk *disk = inode->i_bdev->bd_disk; + struct tape_device *device = disk->private_data; + + rc = 0; + disk = inode->i_bdev->bd_disk; + if (!disk) + BUG(); + device = disk->private_data; + if (!device) + BUG(); + minor = iminor(inode); + + DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); + DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor); + + switch (command) { + /* Refuse some IOCTL calls without complaining (mount). */ + case 0x5310: /* CDROMMULTISESSION */ + rc = -EINVAL; + break; + default: + PRINT_WARN("invalid ioctl 0x%x\n", command); + rc = -EINVAL; + } + + return rc; +} + +/* * Initialize block device frontend. */ int diff -puN drivers/s390/char/tape_char.c~s390-05-tape-driver drivers/s390/char/tape_char.c --- 25/drivers/s390/char/tape_char.c~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_char.c Thu Jan 8 14:11:31 2004 @@ -19,8 +19,9 @@ #include #include "tape.h" +#include "tape_std.h" -#define PRINTK_HEADER "TCHAR:" +#define PRINTK_HEADER "TAPE_CHAR: " #define TAPECHAR_MAJOR 0 /* get dynamic major */ @@ -52,12 +53,14 @@ static int tapechar_major = TAPECHAR_MAJ int tapechar_setup_device(struct tape_device * device) { + tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_ADD); return 0; } void tapechar_cleanup_device(struct tape_device *device) { + tape_hotplug_event(device, tapechar_major, TAPE_HOTPLUG_CHAR_REMOVE); } /* @@ -81,15 +84,27 @@ tapechar_check_idalbuffer(struct tape_de struct idal_buffer *new; if (device->char_data.idal_buf != NULL && - device->char_data.idal_buf->size >= block_size) + device->char_data.idal_buf->size == block_size) return 0; - /* The current idal buffer is not big enough. Allocate a new one. */ + + if (block_size > MAX_BLOCKSIZE) { + DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", + block_size, MAX_BLOCKSIZE); + PRINT_ERR("Invalid blocksize (%zd> %d)\n", + block_size, MAX_BLOCKSIZE); + return -EINVAL; + } + + /* The current idal buffer is not correct. Allocate a new one. */ new = idal_buffer_alloc(block_size, 0); if (new == NULL) return -ENOMEM; + if (device->char_data.idal_buf != NULL) idal_buffer_free(device->char_data.idal_buf); + device->char_data.idal_buf = new; + return 0; } @@ -116,6 +131,16 @@ tapechar_read (struct file *filp, char * DBF_EVENT(6, "TCHAR:ppos wrong\n"); return -EOVERFLOW; } + + /* + * If the tape isn't terminated yet, do it now. And since we then + * are at the end of the tape there wouldn't be anything to read + * anyways. So we return immediatly. + */ + if(device->required_tapemarks) { + return tape_std_terminate_write(device); + } + /* Find out block size to use */ if (device->char_data.block_size != 0) { if (count < device->char_data.block_size) { @@ -126,10 +151,17 @@ tapechar_read (struct file *filp, char * block_size = device->char_data.block_size; } else { block_size = count; - rc = tapechar_check_idalbuffer(device, block_size); - if (rc) - return rc; } + + rc = tapechar_check_idalbuffer(device, block_size); + if (rc) + return rc; + +#ifdef CONFIG_S390_TAPE_BLOCK + /* Changes position. */ + device->blk_data.medium_changed = 1; +#endif + DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); /* Let the discipline build the ccw chain. */ request = device->discipline->read_block(device, block_size); @@ -182,11 +214,18 @@ tapechar_write(struct file *filp, const nblocks = count / block_size; } else { block_size = count; - rc = tapechar_check_idalbuffer(device, block_size); - if (rc) - return rc; nblocks = 1; } + + rc = tapechar_check_idalbuffer(device, block_size); + if (rc) + return rc; + +#ifdef CONFIG_S390_TAPE_BLOCK + /* Changes position. */ + device->blk_data.medium_changed = 1; +#endif + DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); /* Let the discipline build the ccw chain. */ @@ -225,6 +264,17 @@ tapechar_write(struct file *filp, const rc = 0; } + + /* + * After doing a write we always need two tapemarks to correctly + * terminate the tape (one to terminate the file, the second to + * flag the end of recorded data. + * Since process_eov positions the tape in front of the written + * tapemark it doesn't hurt to write two marks again. + */ + if (!rc) + device->required_tapemarks = 2; + return rc ? rc : written; } @@ -237,24 +287,28 @@ tapechar_open (struct inode *inode, stru struct tape_device *device; int minor, rc; + DBF_EVENT(6, "TCHAR:open: %i:%i\n", + imajor(filp->f_dentry->d_inode), + iminor(filp->f_dentry->d_inode)); + if (imajor(filp->f_dentry->d_inode) != tapechar_major) return -ENODEV; + minor = iminor(filp->f_dentry->d_inode); device = tape_get_device(minor / TAPE_MINORS_PER_DEV); if (IS_ERR(device)) { + DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); return PTR_ERR(device); } - DBF_EVENT(6, "TCHAR:open: %x\n", iminor(inode)); + + rc = tape_open(device); if (rc == 0) { - rc = tape_assign(device); - if (rc == 0) { - filp->private_data = device; - return 0; - } - tape_release(device); + filp->private_data = device; + return 0; } tape_put_device(device); + return rc; } @@ -267,29 +321,32 @@ tapechar_release(struct inode *inode, st { struct tape_device *device; - device = (struct tape_device *) filp->private_data; DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode)); -#if 0 - // FIXME: this is broken. Either MTWEOF/MTWEOF/MTBSR is done - // EVERYTIME the user switches from write to something different - // or it is not done at all. The second is IMHO better because - // we should NEVER do something the user didn't request. - if (device->last_op == TO_WRI) - tapechar_terminate_write(device); -#endif + device = (struct tape_device *) filp->private_data; + /* - * If this is the rewinding tape minor then rewind. + * If this is the rewinding tape minor then rewind. In that case we + * write all required tapemarks. Otherwise only one to terminate the + * file. */ - if ((iminor(inode) & 1) != 0) + if ((iminor(inode) & 1) != 0) { + if (device->required_tapemarks) + tape_std_terminate_write(device); tape_mtop(device, MTREW, 1); + } else { + if (device->required_tapemarks > 1) { + if (tape_mtop(device, MTWEOF, 1) == 0) + device->required_tapemarks--; + } + } + if (device->char_data.idal_buf != NULL) { idal_buffer_free(device->char_data.idal_buf); device->char_data.idal_buf = NULL; } - device->char_data.block_size = 0; tape_release(device); - tape_unassign(device); - tape_put_device(device); + filp->private_data = tape_put_device(device); + return 0; } @@ -314,7 +371,40 @@ tapechar_ioctl(struct inode *inp, struct return -EFAULT; if (op.mt_count < 0) return -EINVAL; - return tape_mtop(device, op.mt_op, op.mt_count); + + /* + * Operations that change tape position should write final + * tapemarks. + */ + switch (op.mt_op) { + case MTFSF: + case MTBSF: + case MTFSR: + case MTBSR: + case MTREW: + case MTOFFL: + case MTEOM: + case MTRETEN: + case MTBSFM: + case MTFSFM: + case MTSEEK: +#ifdef CONFIG_S390_TAPE_BLOCK + device->blk_data.medium_changed = 1; +#endif + if (device->required_tapemarks) + tape_std_terminate_write(device); + default: + ; + } + rc = tape_mtop(device, op.mt_op, op.mt_count); + + if (op.mt_op == MTWEOF && rc == 0) { + if (op.mt_count > device->required_tapemarks) + device->required_tapemarks = 0; + else + device->required_tapemarks -= op.mt_count; + } + return rc; } if (no == MTIOCPOS) { /* MTIOCPOS: query the tape position. */ @@ -333,19 +423,30 @@ tapechar_ioctl(struct inode *inp, struct struct mtget get; memset(&get, 0, sizeof(get)); - rc = tape_mtop(device, MTTELL, 1); - if (rc < 0) - return rc; get.mt_type = MT_ISUNKNOWN; + get.mt_resid = 0 /* device->devstat.rescnt */; get.mt_dsreg = device->tape_state; /* FIXME: mt_gstat, mt_erreg, mt_fileno */ - get.mt_resid = 0 /* device->devstat.rescnt */; get.mt_gstat = 0; get.mt_erreg = 0; get.mt_fileno = 0; - get.mt_blkno = rc; + get.mt_gstat = device->tape_generic_status; + + if (device->medium_state == MS_LOADED) { + rc = tape_mtop(device, MTTELL, 1); + + if (rc < 0) + return rc; + + if (rc == 0) + get.mt_gstat |= GMT_BOT(~0); + + get.mt_blkno = rc; + } + if (copy_to_user((char *) data, &get, sizeof(get)) != 0) return -EFAULT; + return 0; } /* Try the discipline ioctl function. */ diff -puN drivers/s390/char/tape_core.c~s390-05-tape-driver drivers/s390/char/tape_core.c --- 25/drivers/s390/char/tape_core.c~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_core.c Thu Jan 8 14:11:31 2004 @@ -23,9 +23,10 @@ #include "tape.h" #include "tape_std.h" -#define PRINTK_HEADER "T390:" +#define PRINTK_HEADER "TAPE_CORE: " -static void tape_do_irq (struct ccw_device *, unsigned long, struct irb *); +static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); +static void __tape_remove_request(struct tape_device *, struct tape_request *); /* * One list to contain all tape devices of all disciplines, so @@ -36,11 +37,6 @@ static struct list_head tape_device_list static rwlock_t tape_device_lock = RW_LOCK_UNLOCKED; /* - * Wait queue for tape_delete_device waits. - */ -static DECLARE_WAIT_QUEUE_HEAD(tape_delete_wq); - -/* * Pointer to debug area. */ debug_info_t *tape_dbf_area = NULL; @@ -50,8 +46,11 @@ debug_info_t *tape_dbf_area = NULL; */ const char *tape_state_verbose[TS_SIZE] = { - [TS_UNUSED] = "UNUSED", [TS_IN_USE] = "IN_USE", - [TS_INIT] = "INIT ", [TS_NOT_OPER] = "NOT_OP" + [TS_UNUSED] = "UNUSED", + [TS_IN_USE] = "IN_USE", + [TS_BLKUSE] = "BLKUSE", + [TS_INIT] = "INIT ", + [TS_NOT_OPER] = "NOT_OP" }; const char *tape_op_verbose[TO_SIZE] = @@ -71,9 +70,105 @@ const char *tape_op_verbose[TO_SIZE] = }; /* + * Some channel attached tape specific attributes. + * + * FIXME: In the future the first_minor and blocksize attribute should be + * replaced by a link to the cdev tree. + */ +static ssize_t +tape_medium_state_show(struct device *dev, char *buf) +{ + struct tape_device *tdev; + + tdev = (struct tape_device *) dev->driver_data; + return snprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); +} + +static +DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); + +static ssize_t +tape_first_minor_show(struct device *dev, char *buf) +{ + struct tape_device *tdev; + + tdev = (struct tape_device *) dev->driver_data; + return snprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); +} + +static +DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); + +static ssize_t +tape_state_show(struct device *dev, char *buf) +{ + struct tape_device *tdev; + + tdev = (struct tape_device *) dev->driver_data; + return snprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? + "OFFLINE" : tape_state_verbose[tdev->tape_state]); +} + +static +DEVICE_ATTR(state, 0444, tape_state_show, NULL); + +static ssize_t +tape_operation_show(struct device *dev, char *buf) +{ + struct tape_device *tdev; + ssize_t rc; + + tdev = (struct tape_device *) dev->driver_data; + if (tdev->first_minor < 0) + return snprintf(buf, PAGE_SIZE, "N/A\n"); + + spin_lock_irq(get_ccwdev_lock(tdev->cdev)); + if (list_empty(&tdev->req_queue)) + rc = snprintf(buf, PAGE_SIZE, "---\n"); + else { + struct tape_request *req; + + req = list_entry(tdev->req_queue.next, struct tape_request, + list); + rc = snprintf(buf, PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); + } + spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); + return rc; +} + +static +DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); + +static ssize_t +tape_blocksize_show(struct device *dev, char *buf) +{ + struct tape_device *tdev; + + tdev = (struct tape_device *) dev->driver_data; + + return snprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); +} + +static +DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); + +static struct attribute *tape_attrs[] = { + &dev_attr_medium_state.attr, + &dev_attr_first_minor.attr, + &dev_attr_state.attr, + &dev_attr_operation.attr, + &dev_attr_blocksize.attr, + NULL +}; + +static struct attribute_group tape_attr_group = { + .attrs = tape_attrs, +}; + +/* * Tape state functions */ -static void +void tape_state_set(struct tape_device *device, enum tape_state newstate) { const char *str; @@ -139,14 +234,20 @@ __tape_halt_io(struct tape_device *devic /* Check if interrupt has already been processed */ if (request->callback == NULL) return 0; + rc = 0; for (retries = 0; retries < 5; retries++) { if (retries < 2) rc = ccw_device_halt(device->cdev, (long) request); else rc = ccw_device_clear(device->cdev, (long) request); - if (rc == 0) - break; /* termination successful */ + + if (rc == 0) { /* Termination successful */ + request->rc = -EIO; + request->status = TAPE_REQUEST_DONE; + return 0; + } + if (rc == -ENODEV) DBF_EXCEPTION(2, "device gone, retry\n"); else if (rc == -EIO) @@ -156,8 +257,7 @@ __tape_halt_io(struct tape_device *devic else BUG(); } - if (rc == 0) - request->status = TAPE_REQUEST_DONE; + return rc; } @@ -207,8 +307,12 @@ tape_enable_device(struct tape_device *d { int rc; - if (device->tape_state != TS_INIT) + DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); + + if (device->tape_state != TS_INIT) { + DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); return -EINVAL; + } /* Let the discipline have a go at the device. */ device->discipline = discipline; @@ -218,6 +322,7 @@ tape_enable_device(struct tape_device *d rc = tape_assign_minor(device); if (rc) goto out_discipline; + rc = tapechar_setup_device(device); if (rc) goto out_minor; @@ -250,7 +355,6 @@ tape_disable_device(struct tape_device * struct tape_request *request; spin_lock_irq(get_ccwdev_lock(device->cdev)); - tape_state_set(device, TS_NOT_OPER); /* Post remaining requests with -EIO */ list_for_each_safe(l, n, &device->req_queue) { request = list_entry(l, struct tape_request, list); @@ -258,7 +362,7 @@ tape_disable_device(struct tape_device * __tape_halt_io(device, request); list_del(&request->list); /* Decrease ref_count for removed request. */ - tape_put_device(device); + request->device = tape_put_device(device); request->rc = -EIO; if (request->callback != NULL) request->callback(request, request->callback_data); @@ -269,6 +373,9 @@ tape_disable_device(struct tape_device * tapechar_cleanup_device(device); device->discipline->cleanup_device(device); tape_remove_minor(device); + + tape_med_state_set(device, MS_UNKNOWN); + device->tape_state = TS_INIT; } /* @@ -301,17 +408,51 @@ tape_alloc_device(void) device->tape_state = TS_INIT; device->medium_state = MS_UNKNOWN; *device->modeset_byte = 0; + device->first_minor = -1; + atomic_set(&device->ref_count, 1); + return device; } /* - * Free memory of a device structure. + * Get a reference to an existing device structure. This will automatically + * increment the reference count. */ -static void -tape_free_device(struct tape_device *device) +struct tape_device * +tape_get_device_reference(struct tape_device *device) { - kfree(device->modeset_byte); - kfree(device); + DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, + atomic_inc_return(&device->ref_count)); + + return device; +} + +/* + * Decrease the reference counter of a devices structure. If the + * reference counter reaches zero free the device structure. + * The function returns a NULL pointer to be used by the caller + * for clearing reference pointers. + */ +struct tape_device * +tape_put_device(struct tape_device *device) +{ + int remain; + + remain = atomic_dec_return(&device->ref_count); + if (remain > 0) { + DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); + } else { + if (remain < 0) { + DBF_EVENT(4, "put device without reference\n"); + PRINT_ERR("put device without reference\n"); + } else { + DBF_EVENT(4, "tape_free_device(%p)\n", device); + kfree(device->modeset_byte); + kfree(device); + } + } + + return NULL; } /* @@ -325,9 +466,8 @@ tape_get_device(int devindex) device = ERR_PTR(-ENODEV); read_lock(&tape_device_lock); list_for_each_entry(tmp, &tape_device_list, node) { - if (tmp->first_minor * TAPE_MINORS_PER_DEV == devindex) { - device = tmp; - atomic_inc(&device->ref_count); + if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { + device = tape_get_device_reference(tmp); break; } } @@ -336,25 +476,6 @@ tape_get_device(int devindex) } /* - * Decrease the reference counter of a devices structure. If the - * reference counter reaches zero free the device structure and - * wake up sleepers. - */ -void -tape_put_device(struct tape_device *device) -{ - if (atomic_dec_return(&device->ref_count) > 0) - return; - /* - * Reference counter dropped to zero. This means - * that the device is deleted and the last user - * of the device structure is gone. That is what - * tape_delete_device is waiting for. Do a wake up. - */ - wake_up(&tape_delete_wq); -} - -/* * Driverfs tape probe function. */ int @@ -367,12 +488,12 @@ tape_generic_probe(struct ccw_device *cd if (IS_ERR(device)) return -ENODEV; PRINT_INFO("tape device %s found\n", bus_id); - atomic_inc(&device->ref_count); cdev->dev.driver_data = device; device->cdev = cdev; - cdev->handler = tape_do_irq; + cdev->handler = __tape_do_irq; ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); + sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); return 0; } @@ -380,19 +501,14 @@ tape_generic_probe(struct ccw_device *cd /* * Driverfs tape remove function. */ -int +void tape_generic_remove(struct ccw_device *cdev) { - struct tape_device *device; - - device = cdev->dev.driver_data; - cdev->dev.driver_data = NULL; - if (device != NULL) { - tape_put_device(device); - wait_event(tape_delete_wq, atomic_read(&device->ref_count) == 0); - tape_free_device(device); + ccw_device_set_offline(cdev); + if (cdev->dev.driver_data != NULL) { + sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); + cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data); } - return 0; } /* @@ -406,6 +522,8 @@ tape_alloc_request(int cplength, int dat if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE) BUG(); + DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); + request = (struct tape_request *) kmalloc(sizeof(struct tape_request), GFP_KERNEL); if (request == NULL) { @@ -436,6 +554,9 @@ tape_alloc_request(int cplength, int dat } memset(request->cpdata, 0, datasize); } + DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, + request->cpdata); + return request; } @@ -445,9 +566,10 @@ tape_alloc_request(int cplength, int dat void tape_free_request (struct tape_request * request) { + DBF_LH(6, "Free request %p\n", request); + if (request->device != NULL) { - tape_put_device(request->device); - request->device = NULL; + request->device = tape_put_device(request->device); } if (request->cpdata != NULL) kfree(request->cpdata); @@ -456,6 +578,56 @@ tape_free_request (struct tape_request * kfree(request); } +static inline void +__tape_do_io_list(struct tape_device *device) +{ + struct list_head *l, *n; + struct tape_request *request; + int rc; + + DBF_LH(6, "__tape_do_io_list(%p)\n", device); + /* + * Try to start each request on request queue until one is + * started successful. + */ + list_for_each_safe(l, n, &device->req_queue) { + request = list_entry(l, struct tape_request, list); +#ifdef CONFIG_S390_TAPE_BLOCK + if (request->op == TO_BLOCK) + device->discipline->check_locate(device, request); +#endif + rc = ccw_device_start(device->cdev, request->cpaddr, + (unsigned long) request, 0x00, + request->options); + if (rc == 0) { + request->status = TAPE_REQUEST_IN_IO; + break; + } + /* Start failed. Remove request and indicate failure. */ + DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); + + /* Set ending status and do callback. */ + request->rc = rc; + request->status = TAPE_REQUEST_DONE; + __tape_remove_request(device, request); + } +} + +static void +__tape_remove_request(struct tape_device *device, struct tape_request *request) +{ + /* Remove from request queue. */ + list_del(&request->list); + + /* Do callback. */ + if (request->callback != NULL) + request->callback(request, request->callback_data); + + /* Start next request. */ + if (!list_empty(&device->req_queue)) + __tape_do_io_list(device); +} + /* * Write sense data to console/dbf */ @@ -514,12 +686,24 @@ __tape_do_io(struct tape_device *device, { int rc; - if (device->tape_state != TS_IN_USE) - return -ENODEV; + switch (request->op) { + case TO_MSEN: + case TO_ASSIGN: + case TO_UNASSIGN: + case TO_READ_ATTMSG: + if (device->tape_state == TS_INIT) + break; + if (device->tape_state == TS_UNUSED) + break; + default: + if (device->tape_state == TS_BLKUSE) + break; + if (device->tape_state != TS_IN_USE) + return -ENODEV; + } /* Increase use count of device for the added request. */ - atomic_inc(&device->ref_count); - request->device = device; + request->device = tape_get_device_reference(device); if (list_empty(&device->req_queue)) { /* No other requests are on the queue. Start this one. */ @@ -534,9 +718,11 @@ __tape_do_io(struct tape_device *device, DBF_EVENT(1, "tape: DOIO failed with rc = %i\n", rc); return rc; } + DBF_LH(5, "Request %p added for execution.\n", request); list_add(&request->list, &device->req_queue); request->status = TAPE_REQUEST_IN_IO; } else { + DBF_LH(5, "Request %p add to queue.\n", request); list_add_tail(&request->list, &device->req_queue); request->status = TAPE_REQUEST_QUEUED; } @@ -552,6 +738,8 @@ tape_do_io_async(struct tape_device *dev { int rc; + DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); + spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Add request to request queue and try to start it. */ rc = __tape_do_io(device, request); @@ -637,48 +825,11 @@ tape_do_io_interruptible(struct tape_dev return rc; } -static inline void -__tape_do_io_list(struct tape_device *device) -{ - struct list_head *l, *n; - struct tape_request *request; - int rc; - - if (device->tape_state != TS_IN_USE) - return; - /* - * Try to start each request on request queue until one is - * started successful. - */ - list_for_each_safe(l, n, &device->req_queue) { - request = list_entry(l, struct tape_request, list); -#ifdef CONFIG_S390_TAPE_BLOCK - if (request->op == TO_BLOCK) - device->discipline->check_locate(device, request); -#endif - rc = ccw_device_start(device->cdev, request->cpaddr, - (unsigned long) request, 0x00, - request->options); - if (rc == 0) { - request->status = TAPE_REQUEST_IN_IO; - break; - } - /* Start failed. Remove request and indicate failure. */ - DBF_EVENT(1, "tape: DOIO failed with er = %i\n", rc); - list_del(&request->list); - /* Set ending status and do callback. */ - request->rc = rc; - request->status = TAPE_REQUEST_DONE; - if (request->callback != NULL) - request->callback(request, request->callback_data); - } -} - /* * Tape interrupt routine, called from the ccw_device layer */ static void -tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +__tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct tape_device *device; struct tape_request *request; @@ -693,11 +844,13 @@ tape_do_irq (struct ccw_device *cdev, un } request = (struct tape_request *) intparm; + DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); + /* May be an unsolicited irq */ if(request != NULL) request->rescnt = irb->scsw.count; - if (irb->scsw.dstat != 0x0c){ + if (irb->scsw.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) device->tape_generic_status |= GMT_ONLINE(~0); @@ -718,6 +871,16 @@ tape_do_irq (struct ccw_device *cdev, un DBF_EVENT(6, "tape:device is not operational\n"); return; } + + /* + * Request that were canceled still come back with an interrupt. + * To detect these request the state will be set to TAPE_REQUEST_DONE. + */ + if(request != NULL && request->status == TAPE_REQUEST_DONE) { + __tape_remove_request(device, request); + return; + } + rc = device->discipline->irq(device, request, irb); /* * rc < 0 : request finished unsuccessfully. @@ -729,6 +892,8 @@ tape_do_irq (struct ccw_device *cdev, un final = 0; switch (rc) { case TAPE_IO_SUCCESS: + /* Upon normal completion the device _is_ online */ + device->tape_generic_status |= GMT_ONLINE(~0); final = 1; break; case TAPE_IO_PENDING: @@ -748,8 +913,6 @@ tape_do_irq (struct ccw_device *cdev, un break; case TAPE_IO_STOP: __tape_halt_io(device, request); - rc = -EIO; - final = 1; break; default: if (rc > 0) { @@ -767,59 +930,11 @@ tape_do_irq (struct ccw_device *cdev, un /* Set ending status. */ request->rc = rc; request->status = TAPE_REQUEST_DONE; - /* Remove from request queue. */ - list_del(&request->list); - /* Do callback. */ - if (request->callback != NULL) - request->callback(request, request->callback_data); + __tape_remove_request(device, request); + } else { + __tape_do_io_list(device); } - /* Start next request. */ - __tape_do_io_list(device); - } -} - -/* - * Lock a shared tape for our exclusive use. - */ -int -tape_assign(struct tape_device *device) -{ - int rc; - - rc = device->discipline->assign(device); - if (rc) { - PRINT_WARN("(%s): assign failed - device might be busy\n", - device->cdev->dev.bus_id); - DBF_EVENT(3, "(%s): assign failed - device might be busy\n", - device->cdev->dev.bus_id); - return rc; - } - DBF_EVENT(3, "(%s): assign lpum = %02x\n", - device->cdev->dev.bus_id, - 0 /* FIXME: device->devstat.lpum */ ); - return 0; -} - -/* - * Unlock a shared tape. - */ -int -tape_unassign(struct tape_device *device) -{ - int rc; - - rc = device->discipline->unassign(device); - if (rc) { - PRINT_WARN("(%s): unassign failed\n", - device->cdev->dev.bus_id); - DBF_EVENT(3, "(%s): unassign failed\n", - device->cdev->dev.bus_id); - return rc; } - DBF_EVENT(3, "(%s): unassign lpum = %02x\n", - device->cdev->dev.bus_id, - 0 /* FIXME: device->devstat.lpum */ ); - return 0; } /* @@ -837,6 +952,9 @@ tape_open(struct tape_device *device) } else if (device->tape_state == TS_IN_USE) { DBF_EVENT(6, "TAPE:dbusy\n"); rc = -EBUSY; + } else if (device->tape_state == TS_BLKUSE) { + DBF_EVENT(6, "TAPE:dbusy\n"); + rc = -EBUSY; } else if (device->discipline != NULL && !try_module_get(device->discipline->owner)) { DBF_EVENT(6, "TAPE:nodisc\n"); @@ -884,7 +1002,7 @@ tape_mtop(struct tape_device *device, in /* We assume that the backends can handle count up to 500. */ if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || - mt_op == MTBSR || mt_op == MTFSFM || mt_op == MTBSFM) { + mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { rc = 0; for (; mt_count > 500; mt_count -= 500) if ((rc = fn(device, 500)) != 0) @@ -898,14 +1016,74 @@ tape_mtop(struct tape_device *device, in } /* + * Hutplug event support. + */ +void +tape_hotplug_event(struct tape_device *device, int devmaj, int action) { +#ifdef CONFIG_HOTPLUG + char *argv[3]; + char *envp[8]; + char busid[20]; + char major[20]; + char minor[20]; + + /* Call the busid DEVNO to be compatible with old tape.agent. */ + sprintf(busid, "DEVNO=%s", device->cdev->dev.bus_id); + sprintf(major, "MAJOR=%d", devmaj); + sprintf(minor, "MINOR=%d", device->first_minor); + + argv[0] = hotplug_path; + argv[1] = "tape"; + argv[2] = NULL; + + envp[0] = "HOME=/"; + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + + switch (action) { + case TAPE_HOTPLUG_CHAR_ADD: + case TAPE_HOTPLUG_BLOCK_ADD: + envp[2] = "ACTION=add"; + break; + case TAPE_HOTPLUG_CHAR_REMOVE: + case TAPE_HOTPLUG_BLOCK_REMOVE: + envp[2] = "ACTION=remove"; + break; + default: + BUG(); + } + switch (action) { + case TAPE_HOTPLUG_CHAR_ADD: + case TAPE_HOTPLUG_CHAR_REMOVE: + envp[3] = "INTERFACE=char"; + break; + case TAPE_HOTPLUG_BLOCK_ADD: + case TAPE_HOTPLUG_BLOCK_REMOVE: + envp[3] = "INTERFACE=block"; + break; + default: + BUG(); + } + envp[4] = busid; + envp[5] = major; + envp[6] = minor; + envp[7] = NULL; + + call_usermodehelper(argv[0], argv, envp, 0); +#endif +} + +/* * Tape init function. */ static int tape_init (void) { - tape_dbf_area = debug_register ( "tape", 1, 2, 3*sizeof(long)); + tape_dbf_area = debug_register ( "tape", 1, 2, 4*sizeof(long)); debug_register_view(tape_dbf_area, &debug_sprintf_view); - DBF_EVENT(3, "tape init: ($Revision: 1.26 $)\n"); +#ifdef DBF_LIKE_HELL + debug_set_level(tape_dbf_area, 6); +#endif + DBF_EVENT(3, "tape init: ($Revision: 1.41 $)\n"); tape_proc_init(); tapechar_init (); tapeblock_init (); @@ -930,7 +1108,8 @@ tape_exit(void) MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); MODULE_DESCRIPTION("Linux on zSeries channel attached " - "tape device driver ($Revision: 1.26 $)"); + "tape device driver ($Revision: 1.41 $)"); +MODULE_LICENSE("GPL"); module_init(tape_init); module_exit(tape_exit); @@ -941,6 +1120,7 @@ EXPORT_SYMBOL(tape_disable_device); EXPORT_SYMBOL(tape_generic_probe); EXPORT_SYMBOL(tape_enable_device); EXPORT_SYMBOL(tape_put_device); +EXPORT_SYMBOL(tape_get_device_reference); EXPORT_SYMBOL(tape_state_verbose); EXPORT_SYMBOL(tape_op_verbose); EXPORT_SYMBOL(tape_state_set); diff -puN drivers/s390/char/tape.h~s390-05-tape-driver drivers/s390/char/tape.h --- 25/drivers/s390/char/tape.h~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape.h Thu Jan 8 14:11:31 2004 @@ -12,19 +12,33 @@ #ifndef _TAPE_H #define _TAPE_H +#include +#include +#include #include #include #include #include #include #include -#include -#include -#include +#include struct gendisk; /* + * Define DBF_LIKE_HELL for lots of messages in the debug feature. + */ +#define DBF_LIKE_HELL +#ifdef DBF_LIKE_HELL +#define DBF_LH(level, str, ...) \ +do { \ + debug_sprintf_event(tape_dbf_area, level, str, ## __VA_ARGS__); \ +} while (0) +#else +#define DBF_LH(level, str, ...) do {} while(0) +#endif + +/* * macros s390 debug feature (dbf) */ #define DBF_EVENT(d_level, d_str...) \ @@ -46,7 +60,11 @@ do { \ #define TAPEBLOCK_HSEC_S2B 2 #define TAPEBLOCK_RETRIES 5 -#define TAPE_BUSY(td) (td->treq != NULL) +/* Event types for hotplug */ +#define TAPE_HOTPLUG_CHAR_ADD 1 +#define TAPE_HOTPLUG_BLOCK_ADD 2 +#define TAPE_HOTPLUG_CHAR_REMOVE 3 +#define TAPE_HOTPLUG_BLOCK_REMOVE 4 enum tape_medium_state { MS_UNKNOWN, @@ -58,6 +76,7 @@ enum tape_medium_state { enum tape_state { TS_UNUSED=0, TS_IN_USE, + TS_BLKUSE, TS_INIT, TS_NOT_OPER, TS_SIZE @@ -130,8 +149,6 @@ struct tape_discipline { struct module *owner; int (*setup_device)(struct tape_device *); void (*cleanup_device)(struct tape_device *); - int (*assign)(struct tape_device *); - int (*unassign)(struct tape_device *); int (*irq)(struct tape_device *, struct tape_request *, struct irb *); struct tape_request *(*read_block)(struct tape_device *, size_t); struct tape_request *(*write_block)(struct tape_device *, size_t); @@ -168,48 +185,60 @@ struct tape_char_data { struct tape_blk_data { /* Block device request queue. */ - request_queue_t *request_queue; - spinlock_t request_queue_lock; - /* Block frontend tasklet */ - struct tasklet_struct tasklet; + request_queue_t * request_queue; + spinlock_t request_queue_lock; + + /* Task to move entries from block request to CCS request queue. */ + struct work_struct requeue_task; + atomic_t requeue_scheduled; + /* Current position on the tape. */ - long block_position; - struct gendisk *disk; + long block_position; + int medium_changed; + struct gendisk * disk; }; #endif /* Tape Info */ struct tape_device { /* entry in tape_device_list */ - struct list_head node; + struct list_head node; - struct ccw_device *cdev; + struct ccw_device * cdev; /* Device discipline information. */ - struct tape_discipline *discipline; - void *discdata; + struct tape_discipline * discipline; + void * discdata; /* Generic status flags */ - long tape_generic_status; + long tape_generic_status; /* Device state information. */ - wait_queue_head_t state_change_wq; - enum tape_state tape_state; - enum tape_medium_state medium_state; - unsigned char *modeset_byte; + wait_queue_head_t state_change_wq; + enum tape_state tape_state; + enum tape_medium_state medium_state; + unsigned char * modeset_byte; /* Reference count. */ - atomic_t ref_count; + atomic_t ref_count; /* Request queue. */ - struct list_head req_queue; + struct list_head req_queue; + + /* Each tape device has (currently) two minor numbers. */ + int first_minor; + + /* Number of tapemarks required for correct termination. */ + int required_tapemarks; + + /* Block ID of the BOF */ + unsigned int bof; - int first_minor; /* each tape device has two minors */ /* Character device frontend data */ - struct tape_char_data char_data; + struct tape_char_data char_data; #ifdef CONFIG_S390_TAPE_BLOCK /* Block dev frontend data */ - struct tape_blk_data blk_data; + struct tape_blk_data blk_data; #endif }; @@ -219,6 +248,7 @@ extern void tape_free_request(struct tap extern int tape_do_io(struct tape_device *, struct tape_request *); extern int tape_do_io_async(struct tape_device *, struct tape_request *); extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); +void tape_hotplug_event(struct tape_device *, int major, int action); static inline int tape_do_io_free(struct tape_device *device, struct tape_request *request) @@ -234,19 +264,19 @@ extern int tape_oper_handler(int irq, in extern void tape_noper_handler(int irq, int status); extern int tape_open(struct tape_device *); extern int tape_release(struct tape_device *); -extern int tape_assign(struct tape_device *); -extern int tape_unassign(struct tape_device *); extern int tape_mtop(struct tape_device *, int, int); +extern void tape_state_set(struct tape_device *, enum tape_state); extern int tape_enable_device(struct tape_device *, struct tape_discipline *); extern void tape_disable_device(struct tape_device *device); /* Externals from tape_devmap.c */ extern int tape_generic_probe(struct ccw_device *); -extern int tape_generic_remove(struct ccw_device *); +extern void tape_generic_remove(struct ccw_device *); extern struct tape_device *tape_get_device(int devindex); -extern void tape_put_device(struct tape_device *); +extern struct tape_device *tape_get_device_reference(struct tape_device *); +extern struct tape_device *tape_put_device(struct tape_device *); /* Externals from tape_char.c */ extern int tapechar_init(void); diff -puN drivers/s390/char/tape_proc.c~s390-05-tape-driver drivers/s390/char/tape_proc.c --- 25/drivers/s390/char/tape_proc.c~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_proc.c Thu Jan 8 14:11:31 2004 @@ -18,7 +18,7 @@ #include "tape.h" -#define PRINTK_HEADER "T390:" +#define PRINTK_HEADER "TAPE_PROC: " static const char *tape_med_st_verbose[MS_SIZE] = { @@ -42,19 +42,19 @@ static int tape_proc_show(struct seq_fil n = (unsigned long) v - 1; if (!n) { - seq_printf(m, "TapeNo\tDevNo\tCuType\tCuModel\tDevType\t" - "DevMod\tBlkSize\tState\tOp\tMedState\n"); + seq_printf(m, "TapeNo\tBusID CuType/Model\t" + "DevType/Model\tBlkSize\tState\tOp\tMedState\n"); } device = tape_get_device(n); if (IS_ERR(device)) return 0; spin_lock_irq(get_ccwdev_lock(device->cdev)); seq_printf(m, "%d\t", (int) n); - seq_printf(m, "%s\t", device->cdev->dev.bus_id); - seq_printf(m, "%04X\t", device->cdev->id.cu_type); + seq_printf(m, "%-10.10s ", device->cdev->dev.bus_id); + seq_printf(m, "%04X/", device->cdev->id.cu_type); seq_printf(m, "%02X\t", device->cdev->id.cu_model); - seq_printf(m, "%04X\t", device->cdev->id.dev_type); - seq_printf(m, "%02X\t", device->cdev->id.dev_model); + seq_printf(m, "%04X/", device->cdev->id.dev_type); + seq_printf(m, "%02X\t\t", device->cdev->id.dev_model); if (device->char_data.block_size == 0) seq_printf(m, "auto\t"); else diff -puN drivers/s390/char/tape_std.c~s390-05-tape-driver drivers/s390/char/tape_std.c --- 25/drivers/s390/char/tape_std.c~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_std.c Thu Jan 8 14:11:31 2004 @@ -8,12 +8,14 @@ * Michael Holzheu * Tuan Ngo-Anh * Martin Schwidefsky + * Stefan Bader */ #include #include #include #include +#include #include #include @@ -23,23 +25,72 @@ #include "tape.h" #include "tape_std.h" -#define PRINTK_HEADER "T3xxx:" +#define PRINTK_HEADER "TAPE_STD: " /* * tape_std_assign */ +static void +tape_std_assign_timeout(unsigned long data) +{ + struct tape_request * request; + struct tape_device * device; + + request = (struct tape_request *) data; + if ((device = request->device) == NULL) + BUG(); + + spin_lock_irq(get_ccwdev_lock(device->cdev)); + if (request->callback != NULL) { + DBF_EVENT(3, "%s: Assignment timeout. Device busy.\n", + device->cdev->dev.bus_id); + PRINT_ERR("%s: Assignment timeout. Device busy.\n", + device->cdev->dev.bus_id); + ccw_device_clear(device->cdev, (long) request); + } + spin_unlock_irq(get_ccwdev_lock(device->cdev)); +} + int tape_std_assign(struct tape_device *device) { + int rc; + struct timer_list timeout; struct tape_request *request; request = tape_alloc_request(2, 11); if (IS_ERR(request)) return PTR_ERR(request); + request->op = TO_ASSIGN; tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); - return tape_do_io_free(device, request); + + /* + * The assign command sometimes blocks if the device is assigned + * to another host (actually this shouldn't happen but it does). + * So we set up a timeout for this call. + */ + init_timer(&timeout); + timeout.function = tape_std_assign_timeout; + timeout.data = (unsigned long) request; + timeout.expires = jiffies + 2 * HZ; + add_timer(&timeout); + + rc = tape_do_io_interruptible(device, request); + + del_timer(&timeout); + + if (rc != 0) { + PRINT_WARN("%s: assign failed - device might be busy\n", + device->cdev->dev.bus_id); + DBF_EVENT(3, "%s: assign failed - device might be busy\n", + device->cdev->dev.bus_id); + } else { + DBF_EVENT(3, "%s: Tape assigned\n", device->cdev->dev.bus_id); + } + tape_free_request(request); + return rc; } /* @@ -48,30 +99,36 @@ tape_std_assign(struct tape_device *devi int tape_std_unassign (struct tape_device *device) { + int rc; struct tape_request *request; request = tape_alloc_request(2, 11); if (IS_ERR(request)) return PTR_ERR(request); + request->op = TO_UNASSIGN; tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata); tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); - return tape_do_io_free(device, request); + + if ((rc = tape_do_io(device, request)) != 0) { + DBF_EVENT(3, "%s: Unassign failed\n", device->cdev->dev.bus_id); + PRINT_WARN("%s: Unassign failed\n", device->cdev->dev.bus_id); + } else { + DBF_EVENT(3, "%s: Tape unassigned\n", device->cdev->dev.bus_id); + } + tape_free_request(request); + return rc; } /* * TAPE390_DISPLAY: Show a string on the tape display. */ int -tape_std_display(struct tape_device *device, int cmd, unsigned long arg) +tape_std_display(struct tape_device *device, struct display_struct *disp) { - struct display_struct d_struct; struct tape_request *request; int rc; - if (copy_from_user(&d_struct, (char *) arg, sizeof(d_struct)) != 0) - return -EFAULT; - request = tape_alloc_request(2, 17); if (IS_ERR(request)) { DBF_EVENT(3, "TAPE: load display failed\n"); @@ -79,9 +136,10 @@ tape_std_display(struct tape_device *dev } request->op = TO_DIS; - *(unsigned char *) request->cpdata = d_struct.cntrl; - memcpy(((unsigned char *) request->cpdata) + 1, d_struct.message1, 8); - memcpy(((unsigned char *) request->cpdata) + 9, d_struct.message2, 8); + *(unsigned char *) request->cpdata = disp->cntrl; + DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl); + memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8); + memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8); ASCEBC(((unsigned char*) request->cpdata) + 1, 16); tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata); @@ -118,6 +176,25 @@ tape_std_read_block_id(struct tape_devic return rc; } +int +tape_std_terminate_write(struct tape_device *device) +{ + int rc; + + if(device->required_tapemarks == 0) + return 0; + + DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor, + device->required_tapemarks); + + rc = tape_mtop(device, MTWEOF, device->required_tapemarks); + if (rc) + return rc; + + device->required_tapemarks = 0; + return tape_mtop(device, MTBSR, 1); +} + /* * MTLOAD: Loads the tape. * The default implementation just wait until the tape medium state changes @@ -138,6 +215,7 @@ tape_std_mtsetblk(struct tape_device *de { struct idal_buffer *new; + DBF_LH(6, "tape_std_mtsetblk(%d)\n", count); if (count <= 0) { /* * Just set block_size to 0. tapechar_read/tapechar_write @@ -151,6 +229,15 @@ tape_std_mtsetblk(struct tape_device *de device->char_data.idal_buf->size == count) /* We already have a idal buffer of that size. */ return 0; + + if (count > MAX_BLOCKSIZE) { + DBF_EVENT(3, "Invalid block size (%d > %d) given.\n", + count, MAX_BLOCKSIZE); + PRINT_ERR("Invalid block size (%d > %d) given.\n", + count, MAX_BLOCKSIZE); + return -EINVAL; + } + /* Allocate a new idal buffer. */ new = idal_buffer_alloc(count, 0); if (new == NULL) @@ -159,6 +246,9 @@ tape_std_mtsetblk(struct tape_device *de idal_buffer_free(device->char_data.idal_buf); device->char_data.idal_buf = new; device->char_data.block_size = count; + + DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size); + return 0; } @@ -192,6 +282,7 @@ tape_std_mtfsf(struct tape_device *devic device->modeset_byte); ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -205,6 +296,7 @@ tape_std_mtfsr(struct tape_device *devic { struct tape_request *request; struct ccw1 *ccw; + int rc; request = tape_alloc_request(mt_count + 2, 0); if (IS_ERR(request)) @@ -215,8 +307,16 @@ tape_std_mtfsr(struct tape_device *devic device->modeset_byte); ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ - return tape_do_io_free(device, request); + rc = tape_do_io(device, request); + if (rc == 0 && request->rescnt > 0) { + DBF_LH(3, "FSR over tapemark\n"); + rc = 1; + } + tape_free_request(request); + + return rc; } /* @@ -228,6 +328,7 @@ tape_std_mtbsr(struct tape_device *devic { struct tape_request *request; struct ccw1 *ccw; + int rc; request = tape_alloc_request(mt_count + 2, 0); if (IS_ERR(request)) @@ -238,8 +339,16 @@ tape_std_mtbsr(struct tape_device *devic device->modeset_byte); ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ - return tape_do_io_free(device, request); + rc = tape_do_io(device, request); + if (rc == 0 && request->rescnt > 0) { + DBF_LH(3, "BSR over tapemark\n"); + rc = 1; + } + tape_free_request(request); + + return rc; } /* @@ -260,6 +369,7 @@ tape_std_mtweof(struct tape_device *devi device->modeset_byte); ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -284,6 +394,7 @@ tape_std_mtbsfm(struct tape_device *devi device->modeset_byte); ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -309,18 +420,12 @@ tape_std_mtbsf(struct tape_device *devic ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); /* execute it */ - rc = tape_do_io(device, request); + rc = tape_do_io_free(device, request); if (rc == 0) { - request->op = TO_FSF; - /* need to skip forward over the filemark. */ - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, - device->modeset_byte); - tape_ccw_cc(request->cpaddr + 1, FORSPACEFILE, 0, NULL); - tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); - /* execute it */ - rc = tape_do_io(device, request); + rc = tape_mtop(device, MTFSR, 1); + if (rc > 0) + rc = 0; } - tape_free_request(request); return rc; } @@ -346,18 +451,13 @@ tape_std_mtfsfm(struct tape_device *devi ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); ccw = tape_ccw_end(ccw, NOP, 0, NULL); /* execute it */ - rc = tape_do_io(device, request); + rc = tape_do_io_free(device, request); if (rc == 0) { - request->op = TO_BSF; - /* need to skip forward over the filemark. */ - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, - device->modeset_byte); - tape_ccw_cc(request->cpaddr + 1, BACKSPACEFILE, 0, NULL); - tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); - /* execute it */ - rc = tape_do_io(device, request); + rc = tape_mtop(device, MTBSR, 1); + if (rc > 0) + rc = 0; } - tape_free_request(request); + return rc; } @@ -378,6 +478,7 @@ tape_std_mtrew(struct tape_device *devic device->modeset_byte); tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -399,6 +500,7 @@ tape_std_mtoffl(struct tape_device *devi tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -430,22 +532,28 @@ tape_std_mtnop(struct tape_device *devic int tape_std_mteom(struct tape_device *device, int mt_count) { - struct tape_request *request; + int rc; - request = tape_alloc_request(4, 0); - if (IS_ERR(request)) - return PTR_ERR(request); - request->op = TO_FSF; - /* setup ccws */ - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL); - tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); - tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); - /* execute it */ - tape_do_io_interruptible(device, request); - tape_free_request(request); - /* MTEOM/MTRETEN errors get ignored. */ - return 0; + /* + * Seek from the beginning of tape (rewind). + */ + if ((rc = tape_mtop(device, MTREW, 1)) < 0) + return rc; + + /* + * The logical end of volume is given by two sewuential tapemarks. + * Look for this by skipping to the next file (over one tapemark) + * and then test for another one (fsr returns 1 if a tapemark was + * encountered). + */ + do { + if ((rc = tape_mtop(device, MTFSF, 1)) < 0) + return rc; + if ((rc = tape_mtop(device, MTFSR, 1)) < 0) + return rc; + } while (rc == 0); + + return tape_mtop(device, MTBSR, 1); } /* @@ -469,7 +577,7 @@ tape_std_mtreten(struct tape_device *dev /* execute it, MTRETEN rc gets ignored */ rc = tape_do_io_interruptible(device, request); tape_free_request(request); - return tape_std_mtrew(device, 1); + return tape_mtop(device, MTREW, 1); } /* @@ -480,7 +588,7 @@ tape_std_mterase(struct tape_device *dev { struct tape_request *request; - request = tape_alloc_request(5, 0); + request = tape_alloc_request(6, 0); if (IS_ERR(request)) return PTR_ERR(request); request->op = TO_DSE; @@ -489,7 +597,9 @@ tape_std_mterase(struct tape_device *dev tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL); tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL); - tape_ccw_end(request->cpaddr + 4, NOP, 0, NULL); + tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL); + tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL); + /* execute it */ return tape_do_io_free(device, request); } @@ -500,18 +610,7 @@ tape_std_mterase(struct tape_device *dev int tape_std_mtunload(struct tape_device *device, int mt_count) { - struct tape_request *request; - - request = tape_alloc_request(3, 32); - if (IS_ERR(request)) - return PTR_ERR(request); - request->op = TO_RUN; - /* setup ccws */ - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); - tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); - tape_ccw_end(request->cpaddr + 2, SENSE, 32, request->cpdata); - /* execute it */ - return tape_do_io_free(device, request); + return tape_mtop(device, MTOFFL, mt_count); } /* diff -puN drivers/s390/char/tape_std.h~s390-05-tape-driver drivers/s390/char/tape_std.h --- 25/drivers/s390/char/tape_std.h~s390-05-tape-driver Thu Jan 8 14:11:31 2004 +++ 25-akpm/drivers/s390/char/tape_std.h Thu Jan 8 14:11:31 2004 @@ -10,9 +10,16 @@ */ #ifndef _TAPE_STD_H - #define _TAPE_STD_H +#include + +/* + * Biggest block size to handle. Currently 64K because we only build + * channel programs without data chaining. + */ +#define MAX_BLOCKSIZE 65535 + /* * The CCW commands for the Tape type of command. */ @@ -105,7 +112,8 @@ struct tape_request *tape_std_bwrite(str int tape_std_assign(struct tape_device *); int tape_std_unassign(struct tape_device *); int tape_std_read_block_id(struct tape_device *device, __u64 *id); -int tape_std_display(struct tape_device *, int, unsigned long); +int tape_std_display(struct tape_device *, struct display_struct *disp); +int tape_std_terminate_write(struct tape_device *); /* Standard magnetic tape commands. */ int tape_std_mtbsf(struct tape_device *, int); _