2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* IDE I/O functions
|
|
|
|
*
|
|
|
|
* Basic PIO and command management functionality.
|
|
|
|
*
|
|
|
|
* This code was split off from ide.c. See ide.c for history and original
|
|
|
|
* copyrights.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2, or (at your option) any
|
|
|
|
* later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* For the avoidance of doubt the "preferred form" of this code is one which
|
|
|
|
* is in an open non patent encumbered format. Where cryptographic key signing
|
|
|
|
* forms part of the process of creating an executable the information
|
|
|
|
* including keys needed to generate an equivalently functional executable
|
|
|
|
* are deemed to be part of the source code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/major.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/genhd.h>
|
|
|
|
#include <linux/blkpg.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/ide.h>
|
2008-10-11 04:39:27 +08:00
|
|
|
#include <linux/hdreg.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
#include <linux/cdrom.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/kmod.h>
|
|
|
|
#include <linux/scatterlist.h>
|
2007-10-19 14:40:25 +08:00
|
|
|
#include <linux/bitops.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
|
2006-02-03 19:04:56 +08:00
|
|
|
static int __ide_end_request(ide_drive_t *drive, struct request *rq,
|
2007-10-20 06:32:36 +08:00
|
|
|
int uptodate, unsigned int nr_bytes, int dequeue)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret = 1;
|
2008-01-28 17:34:20 +08:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (uptodate <= 0)
|
|
|
|
error = uptodate ? uptodate : -EIO;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* if failfast is set on a request, override number of sectors and
|
|
|
|
* complete the whole request right now
|
|
|
|
*/
|
2008-01-28 17:34:20 +08:00
|
|
|
if (blk_noretry_request(rq) && error)
|
2007-07-19 14:13:01 +08:00
|
|
|
nr_bytes = rq->hard_nr_sectors << 9;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-28 17:34:20 +08:00
|
|
|
if (!blk_fs_request(rq) && error && !rq->errors)
|
2005-04-17 06:20:36 +08:00
|
|
|
rq->errors = -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* decide whether to reenable DMA -- 3 is a random magic for now,
|
|
|
|
* if we DMA timeout more than 3 times, just stay in PIO
|
|
|
|
*/
|
2008-10-14 03:39:37 +08:00
|
|
|
if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
|
|
|
|
drive->retry_pio <= 3) {
|
|
|
|
drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
|
2008-01-27 03:13:01 +08:00
|
|
|
ide_dma_on(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-12-30 03:27:31 +08:00
|
|
|
if (!blk_end_request(rq, error, nr_bytes))
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = 0;
|
2008-12-30 03:27:30 +08:00
|
|
|
|
|
|
|
if (ret == 0 && dequeue)
|
|
|
|
drive->hwif->hwgroup->rq = NULL;
|
2006-01-09 23:03:35 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ide_end_request - complete an IDE I/O
|
|
|
|
* @drive: IDE device for the I/O
|
|
|
|
* @uptodate:
|
|
|
|
* @nr_sectors: number of sectors completed
|
|
|
|
*
|
|
|
|
* This is our end_request wrapper function. We complete the I/O
|
|
|
|
* update random number input and dequeue the request, which if
|
|
|
|
* it was tagged may be out of order.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
|
|
|
|
{
|
2007-07-19 14:13:01 +08:00
|
|
|
unsigned int nr_bytes = nr_sectors << 9;
|
2008-12-30 03:27:30 +08:00
|
|
|
struct request *rq = drive->hwif->hwgroup->rq;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-07-19 14:13:01 +08:00
|
|
|
if (!nr_bytes) {
|
|
|
|
if (blk_pc_request(rq))
|
|
|
|
nr_bytes = rq->data_len;
|
|
|
|
else
|
|
|
|
nr_bytes = rq->hard_cur_sectors << 9;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-30 03:27:30 +08:00
|
|
|
return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ide_end_request);
|
|
|
|
|
[PATCH] IDE CD end-of media error fix
This is a patch from Alan that fixes a real ide-cd.c regression causing
bogus "Media Check" failures for perfectly valid Fedora install ISOs, on
certain CD-ROM drives.
This is a forward port to 2.6.16 (from RHEL) of the minimal changes for the
end of media problem. It may not be sufficient for some controllers
(promise notably) and it does not touch the locking so the error path
locking is as horked as in mainstream.
From: Ingo Molnar <mingo@elte.hu>
I have ported the patch to 2.6.17-rc4 and tested it by provoking
end-of-media IO errors with an unaligned ISO image. Unlike the vanilla
kernel, the patched kernel interpreted the error condition correctly with
512 byte granularity:
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
ATAPI device hdc:
Error: Illegal request -- (Sense key=0x05)
Illegal mode for this track or incompatible medium -- (asc=0x64, ascq=0x00)
The failed "Read 10" packet command was:
"28 00 00 04 fb 78 00 00 06 00 00 00 00 00 00 00 "
end_request: I/O error, dev hdc, sector 1306080
Buffer I/O error on device hdc, logical block 163260
Buffer I/O error on device hdc, logical block 163261
Buffer I/O error on device hdc, logical block 163262
the unpatched kernel produces an incorrect error dump:
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
end_request: I/O error, dev hdc, sector 1306080
Buffer I/O error on device hdc, logical block 163260
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
end_request: I/O error, dev hdc, sector 1306088
Buffer I/O error on device hdc, logical block 163261
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
end_request: I/O error, dev hdc, sector 1306096
Buffer I/O error on device hdc, logical block 163262
I do not have the right type of CD-ROM drive to reproduce the end-of-media
data corruption bug myself, but this same patch in RHEL solved it.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Bartlomiej Zolnierkiewicz <B.Zolnierkiewicz@elka.pw.edu.pl>
Cc: Jens Axboe <axboe@suse.de>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 20:47:44 +08:00
|
|
|
/**
|
|
|
|
* ide_end_dequeued_request - complete an IDE I/O
|
|
|
|
* @drive: IDE device for the I/O
|
|
|
|
* @uptodate:
|
|
|
|
* @nr_sectors: number of sectors completed
|
|
|
|
*
|
|
|
|
* Complete an I/O that is no longer on the request queue. This
|
|
|
|
* typically occurs when we pull the request and issue a REQUEST_SENSE.
|
|
|
|
* We must still finish the old request but we must not tamper with the
|
|
|
|
* queue in the meantime.
|
|
|
|
*
|
|
|
|
* NOTE: This path does not handle barrier, but barrier is not supported
|
|
|
|
* on ide-cd anyway.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
|
|
|
|
int uptodate, int nr_sectors)
|
|
|
|
{
|
2006-08-10 14:44:47 +08:00
|
|
|
BUG_ON(!blk_rq_started(rq));
|
2008-12-30 03:27:30 +08:00
|
|
|
|
2008-12-30 03:27:30 +08:00
|
|
|
return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
|
[PATCH] IDE CD end-of media error fix
This is a patch from Alan that fixes a real ide-cd.c regression causing
bogus "Media Check" failures for perfectly valid Fedora install ISOs, on
certain CD-ROM drives.
This is a forward port to 2.6.16 (from RHEL) of the minimal changes for the
end of media problem. It may not be sufficient for some controllers
(promise notably) and it does not touch the locking so the error path
locking is as horked as in mainstream.
From: Ingo Molnar <mingo@elte.hu>
I have ported the patch to 2.6.17-rc4 and tested it by provoking
end-of-media IO errors with an unaligned ISO image. Unlike the vanilla
kernel, the patched kernel interpreted the error condition correctly with
512 byte granularity:
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
ATAPI device hdc:
Error: Illegal request -- (Sense key=0x05)
Illegal mode for this track or incompatible medium -- (asc=0x64, ascq=0x00)
The failed "Read 10" packet command was:
"28 00 00 04 fb 78 00 00 06 00 00 00 00 00 00 00 "
end_request: I/O error, dev hdc, sector 1306080
Buffer I/O error on device hdc, logical block 163260
Buffer I/O error on device hdc, logical block 163261
Buffer I/O error on device hdc, logical block 163262
the unpatched kernel produces an incorrect error dump:
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
end_request: I/O error, dev hdc, sector 1306080
Buffer I/O error on device hdc, logical block 163260
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
end_request: I/O error, dev hdc, sector 1306088
Buffer I/O error on device hdc, logical block 163261
hdc: command error: status=0x51 { DriveReady SeekComplete Error }
hdc: command error: error=0x54 { AbortedCommand LastFailedSense=0x05 }
ide: failed opcode was: unknown
end_request: I/O error, dev hdc, sector 1306096
Buffer I/O error on device hdc, logical block 163262
I do not have the right type of CD-ROM drive to reproduce the end-of-media
data corruption bug myself, but this same patch in RHEL solved it.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Cc: Bartlomiej Zolnierkiewicz <B.Zolnierkiewicz@elka.pw.edu.pl>
Cc: Jens Axboe <axboe@suse.de>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 20:47:44 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
|
|
|
* ide_end_drive_cmd - end an explicit drive command
|
|
|
|
* @drive: command
|
|
|
|
* @stat: status bits
|
|
|
|
* @err: error bits
|
|
|
|
*
|
|
|
|
* Clean up after success/failure of an explicit drive command.
|
|
|
|
* These get thrown onto the queue so they are synchronized with
|
|
|
|
* real I/O operations on the drive.
|
|
|
|
*
|
|
|
|
* In LBA48 mode we have to read the register set twice to get
|
|
|
|
* all the extra information out.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
|
|
|
{
|
2008-12-30 03:27:30 +08:00
|
|
|
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
|
|
|
|
struct request *rq = hwgroup->rq;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-27 03:13:13 +08:00
|
|
|
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
2008-02-11 07:32:14 +08:00
|
|
|
ide_task_t *task = (ide_task_t *)rq->special;
|
|
|
|
|
|
|
|
if (task) {
|
|
|
|
struct ide_taskfile *tf = &task->tf;
|
2008-01-26 05:17:06 +08:00
|
|
|
|
|
|
|
tf->error = err;
|
|
|
|
tf->status = stat;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-24 01:55:56 +08:00
|
|
|
drive->hwif->tp_ops->tf_read(drive, task);
|
2008-02-11 07:32:14 +08:00
|
|
|
|
|
|
|
if (task->tf_flags & IDE_TFLAG_DYN)
|
|
|
|
kfree(task);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
} else if (blk_pm_request(rq)) {
|
2006-10-01 02:29:12 +08:00
|
|
|
struct request_pm_state *pm = rq->data;
|
2008-12-03 03:40:03 +08:00
|
|
|
|
|
|
|
ide_complete_power_step(drive, rq);
|
2008-10-14 03:39:38 +08:00
|
|
|
if (pm->pm_step == IDE_PM_COMPLETED)
|
2005-04-17 06:20:36 +08:00
|
|
|
ide_complete_pm_request(drive, rq);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-12-30 03:27:30 +08:00
|
|
|
hwgroup->rq = NULL;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rq->errors = err;
|
2008-12-30 03:27:30 +08:00
|
|
|
|
2008-12-30 03:27:31 +08:00
|
|
|
if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
|
|
|
|
blk_rq_bytes(rq))))
|
2008-01-28 17:34:20 +08:00
|
|
|
BUG();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ide_end_drive_cmd);
|
|
|
|
|
|
|
|
static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
|
|
|
{
|
|
|
|
if (rq->rq_disk) {
|
|
|
|
ide_driver_t *drv;
|
|
|
|
|
|
|
|
drv = *(ide_driver_t **)rq->rq_disk->private_data;
|
|
|
|
drv->end_request(drive, 0, 0);
|
|
|
|
} else
|
|
|
|
ide_end_request(drive, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
|
|
2008-10-14 03:39:36 +08:00
|
|
|
if ((stat & ATA_BUSY) ||
|
|
|
|
((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* other bits are useless when BUSY */
|
|
|
|
rq->errors |= ERROR_RESET;
|
2008-10-11 04:39:21 +08:00
|
|
|
} else if (stat & ATA_ERR) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* err has different meaning on cdrom and tape */
|
2008-10-11 04:39:21 +08:00
|
|
|
if (err == ATA_ABORTED) {
|
2008-10-14 03:39:38 +08:00
|
|
|
if ((drive->dev_flags & IDE_DFLAG_LBA) &&
|
2008-10-11 04:39:21 +08:00
|
|
|
/* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
|
|
|
|
hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
|
2005-04-17 06:20:36 +08:00
|
|
|
return ide_stopped;
|
|
|
|
} else if ((err & BAD_CRC) == BAD_CRC) {
|
|
|
|
/* UDMA crc error, just retry the operation */
|
|
|
|
drive->crc_count++;
|
2008-10-11 04:39:21 +08:00
|
|
|
} else if (err & (ATA_BBK | ATA_UNC)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* retries won't help these */
|
|
|
|
rq->errors = ERROR_MAX;
|
2008-10-11 04:39:21 +08:00
|
|
|
} else if (err & ATA_TRK0NF) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* help it find track zero */
|
|
|
|
rq->errors |= ERROR_RECAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-11 04:39:21 +08:00
|
|
|
if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
|
2008-07-16 03:21:47 +08:00
|
|
|
(hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
|
|
|
|
int nsect = drive->mult_count ? drive->mult_count : 1;
|
|
|
|
|
|
|
|
ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-03-27 05:03:20 +08:00
|
|
|
if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
|
|
|
|
ide_kill_rq(drive, rq);
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
2008-10-11 04:39:21 +08:00
|
|
|
if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
|
2007-03-27 05:03:20 +08:00
|
|
|
rq->errors |= ERROR_RESET;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-03-27 05:03:20 +08:00
|
|
|
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
|
2005-04-17 06:20:36 +08:00
|
|
|
++rq->errors;
|
2007-03-27 05:03:20 +08:00
|
|
|
return ide_do_reset(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2007-03-27 05:03:20 +08:00
|
|
|
|
|
|
|
if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
|
|
|
|
drive->special.b.recalibrate = 1;
|
|
|
|
|
|
|
|
++rq->errors;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
|
|
2008-10-14 03:39:36 +08:00
|
|
|
if ((stat & ATA_BUSY) ||
|
|
|
|
((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* other bits are useless when BUSY */
|
|
|
|
rq->errors |= ERROR_RESET;
|
|
|
|
} else {
|
|
|
|
/* add decoding error stuff */
|
|
|
|
}
|
|
|
|
|
2008-10-11 04:39:21 +08:00
|
|
|
if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
|
2005-04-17 06:20:36 +08:00
|
|
|
/* force an abort */
|
2008-10-11 04:39:21 +08:00
|
|
|
hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (rq->errors >= ERROR_MAX) {
|
|
|
|
ide_kill_rq(drive, rq);
|
|
|
|
} else {
|
|
|
|
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
|
|
|
|
++rq->errors;
|
|
|
|
return ide_do_reset(drive);
|
|
|
|
}
|
|
|
|
++rq->errors;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
|
|
|
ide_startstop_t
|
|
|
|
__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
|
|
|
|
{
|
|
|
|
if (drive->media == ide_disk)
|
|
|
|
return ide_ata_error(drive, rq, stat, err);
|
|
|
|
return ide_atapi_error(drive, rq, stat, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__ide_error);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ide_error - handle an error on the IDE
|
|
|
|
* @drive: drive the error occurred on
|
|
|
|
* @msg: message to report
|
|
|
|
* @stat: status bits
|
|
|
|
*
|
|
|
|
* ide_error() takes action based on the error returned by the drive.
|
|
|
|
* For normal I/O that may well include retries. We deal with
|
|
|
|
* both new-style (taskfile) and old style command handling here.
|
|
|
|
* In the case of taskfile command handling there is work left to
|
|
|
|
* do
|
|
|
|
*/
|
|
|
|
|
|
|
|
ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
|
|
|
|
{
|
|
|
|
struct request *rq;
|
|
|
|
u8 err;
|
|
|
|
|
|
|
|
err = ide_dump_status(drive, msg, stat);
|
|
|
|
|
|
|
|
if ((rq = HWGROUP(drive)->rq) == NULL)
|
|
|
|
return ide_stopped;
|
|
|
|
|
|
|
|
/* retry only "normal" I/O: */
|
2006-08-10 14:44:47 +08:00
|
|
|
if (!blk_fs_request(rq)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
rq->errors = 1;
|
|
|
|
ide_end_drive_cmd(drive, stat, err);
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rq->rq_disk) {
|
|
|
|
ide_driver_t *drv;
|
|
|
|
|
|
|
|
drv = *(ide_driver_t **)rq->rq_disk->private_data;
|
|
|
|
return drv->error(drive, rq, stat, err);
|
|
|
|
} else
|
|
|
|
return __ide_error(drive, rq, stat, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(ide_error);
|
|
|
|
|
2008-01-26 05:17:16 +08:00
|
|
|
static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-01-26 05:17:16 +08:00
|
|
|
tf->nsect = drive->sect;
|
|
|
|
tf->lbal = drive->sect;
|
|
|
|
tf->lbam = drive->cyl;
|
|
|
|
tf->lbah = drive->cyl >> 8;
|
2008-10-14 03:39:40 +08:00
|
|
|
tf->device = (drive->head - 1) | drive->select;
|
2008-10-11 04:39:21 +08:00
|
|
|
tf->command = ATA_CMD_INIT_DEV_PARAMS;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 05:17:16 +08:00
|
|
|
static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-01-26 05:17:16 +08:00
|
|
|
tf->nsect = drive->sect;
|
2008-10-11 04:39:21 +08:00
|
|
|
tf->command = ATA_CMD_RESTORE;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-01-26 05:17:16 +08:00
|
|
|
static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-01-26 05:17:16 +08:00
|
|
|
tf->nsect = drive->mult_req;
|
2008-10-11 04:39:21 +08:00
|
|
|
tf->command = ATA_CMD_SET_MULTI;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static ide_startstop_t ide_disk_special(ide_drive_t *drive)
|
|
|
|
{
|
|
|
|
special_t *s = &drive->special;
|
|
|
|
ide_task_t args;
|
|
|
|
|
|
|
|
memset(&args, 0, sizeof(ide_task_t));
|
2008-01-26 05:17:14 +08:00
|
|
|
args.data_phase = TASKFILE_NO_DATA;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (s->b.set_geometry) {
|
|
|
|
s->b.set_geometry = 0;
|
2008-01-26 05:17:16 +08:00
|
|
|
ide_tf_set_specify_cmd(drive, &args.tf);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (s->b.recalibrate) {
|
|
|
|
s->b.recalibrate = 0;
|
2008-01-26 05:17:16 +08:00
|
|
|
ide_tf_set_restore_cmd(drive, &args.tf);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (s->b.set_multmode) {
|
|
|
|
s->b.set_multmode = 0;
|
2008-01-26 05:17:16 +08:00
|
|
|
ide_tf_set_setmult_cmd(drive, &args.tf);
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (s->all) {
|
|
|
|
int special = s->all;
|
|
|
|
s->all = 0;
|
|
|
|
printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
2008-01-27 03:13:10 +08:00
|
|
|
args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
|
2008-01-26 05:17:16 +08:00
|
|
|
IDE_TFLAG_CUSTOM_HANDLER;
|
2008-01-26 05:17:07 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
do_rw_taskfile(drive, &args);
|
|
|
|
|
|
|
|
return ide_started;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* do_special - issue some special commands
|
|
|
|
* @drive: drive the command is for
|
|
|
|
*
|
2008-10-11 04:39:21 +08:00
|
|
|
* do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
|
|
|
|
* ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
|
|
|
|
*
|
|
|
|
* It used to do much more, but has been scaled back.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static ide_startstop_t do_special (ide_drive_t *drive)
|
|
|
|
{
|
|
|
|
special_t *s = &drive->special;
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
printk("%s: do_special: 0x%02x\n", drive->name, s->all);
|
|
|
|
#endif
|
2008-10-14 03:39:40 +08:00
|
|
|
if (drive->media == ide_disk)
|
|
|
|
return ide_disk_special(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-14 03:39:40 +08:00
|
|
|
s->all = 0;
|
|
|
|
drive->mult_req = 0;
|
|
|
|
return ide_stopped;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ide_map_sg(ide_drive_t *drive, struct request *rq)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
|
struct scatterlist *sg = hwif->sg_table;
|
|
|
|
|
2006-08-10 14:44:47 +08:00
|
|
|
if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
|
2005-04-17 06:20:36 +08:00
|
|
|
hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
|
|
|
|
} else {
|
|
|
|
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
|
|
|
|
hwif->sg_nents = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(ide_map_sg);
|
|
|
|
|
|
|
|
void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
|
|
|
|
|
hwif->nsect = hwif->nleft = rq->nr_sectors;
|
2007-07-25 14:13:56 +08:00
|
|
|
hwif->cursg_ofs = 0;
|
|
|
|
hwif->cursg = NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* execute_drive_command - issue special drive command
|
2005-09-10 15:26:54 +08:00
|
|
|
* @drive: the drive to issue the command on
|
2005-04-17 06:20:36 +08:00
|
|
|
* @rq: the request structure holding the command
|
|
|
|
*
|
|
|
|
* execute_drive_cmd() issues a special drive command, usually
|
|
|
|
* initiated by ioctl() from the external hdparm program. The
|
|
|
|
* command can be a drive command, drive task or taskfile
|
|
|
|
* operation. Weirdly you can call it with NULL to wait for
|
|
|
|
* all commands to finish. Don't do this as that is due to change
|
|
|
|
*/
|
|
|
|
|
|
|
|
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
|
|
|
|
struct request *rq)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = HWIF(drive);
|
2008-01-27 03:13:13 +08:00
|
|
|
ide_task_t *task = rq->special;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-27 03:13:13 +08:00
|
|
|
if (task) {
|
2008-01-26 05:17:09 +08:00
|
|
|
hwif->data_phase = task->data_phase;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (hwif->data_phase) {
|
|
|
|
case TASKFILE_MULTI_OUT:
|
|
|
|
case TASKFILE_OUT:
|
|
|
|
case TASKFILE_MULTI_IN:
|
|
|
|
case TASKFILE_IN:
|
|
|
|
ide_init_sg_cmd(drive, rq);
|
|
|
|
ide_map_sg(drive, rq);
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2008-01-26 05:17:07 +08:00
|
|
|
|
2008-01-26 05:17:09 +08:00
|
|
|
return do_rw_taskfile(drive, task);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* NULL is actually a valid way of waiting for
|
|
|
|
* all current requests to be flushed from the queue.
|
|
|
|
*/
|
|
|
|
#ifdef DEBUG
|
|
|
|
printk("%s: DRIVE_CMD (null)\n", drive->name);
|
|
|
|
#endif
|
2008-07-24 01:55:56 +08:00
|
|
|
ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
|
2008-07-24 01:55:52 +08:00
|
|
|
ide_read_error(drive));
|
2008-02-06 09:57:51 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
2008-10-11 04:39:40 +08:00
|
|
|
int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
|
|
|
|
int arg)
|
|
|
|
{
|
|
|
|
struct request_queue *q = drive->queue;
|
|
|
|
struct request *rq;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!(setting->flags & DS_SYNC))
|
|
|
|
return setting->set(drive, arg);
|
|
|
|
|
2008-10-14 03:39:45 +08:00
|
|
|
rq = blk_get_request(q, READ, __GFP_WAIT);
|
2008-10-11 04:39:40 +08:00
|
|
|
rq->cmd_type = REQ_TYPE_SPECIAL;
|
|
|
|
rq->cmd_len = 5;
|
|
|
|
rq->cmd[0] = REQ_DEVSET_EXEC;
|
|
|
|
*(int *)&rq->cmd[1] = arg;
|
|
|
|
rq->special = setting->set;
|
|
|
|
|
|
|
|
if (blk_execute_rq(q, NULL, rq, 0))
|
|
|
|
ret = rq->errors;
|
|
|
|
blk_put_request(rq);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ide_devset_execute);
|
|
|
|
|
2008-07-17 02:33:48 +08:00
|
|
|
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
|
|
|
|
{
|
2008-10-14 03:39:50 +08:00
|
|
|
u8 cmd = rq->cmd[0];
|
|
|
|
|
|
|
|
if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
|
|
|
|
ide_task_t task;
|
|
|
|
struct ide_taskfile *tf = &task.tf;
|
|
|
|
|
|
|
|
memset(&task, 0, sizeof(task));
|
|
|
|
if (cmd == REQ_PARK_HEADS) {
|
|
|
|
drive->sleep = *(unsigned long *)rq->special;
|
|
|
|
drive->dev_flags |= IDE_DFLAG_SLEEPING;
|
|
|
|
tf->command = ATA_CMD_IDLEIMMEDIATE;
|
|
|
|
tf->feature = 0x44;
|
|
|
|
tf->lbal = 0x4c;
|
|
|
|
tf->lbam = 0x4e;
|
|
|
|
tf->lbah = 0x55;
|
|
|
|
task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
|
|
|
|
} else /* cmd == REQ_UNPARK_HEADS */
|
|
|
|
tf->command = ATA_CMD_CHK_POWER;
|
|
|
|
|
|
|
|
task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
|
|
|
|
task.rq = rq;
|
|
|
|
drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
|
|
|
|
return do_rw_taskfile(drive, &task);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cmd) {
|
2008-10-11 04:39:40 +08:00
|
|
|
case REQ_DEVSET_EXEC:
|
|
|
|
{
|
|
|
|
int err, (*setfunc)(ide_drive_t *, int) = rq->special;
|
|
|
|
|
|
|
|
err = setfunc(drive, *(int *)&rq->cmd[1]);
|
|
|
|
if (err)
|
|
|
|
rq->errors = err;
|
|
|
|
else
|
|
|
|
err = 1;
|
|
|
|
ide_end_request(drive, err, 0);
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
2008-07-17 02:33:48 +08:00
|
|
|
case REQ_DRIVE_RESET:
|
|
|
|
return ide_do_reset(drive);
|
|
|
|
default:
|
|
|
|
blk_dump_rq_flags(rq, "ide_special_rq - bad request");
|
|
|
|
ide_end_request(drive, 0, 0);
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
|
|
|
* start_request - start of I/O and command issuing for IDE
|
|
|
|
*
|
|
|
|
* start_request() initiates handling of a new I/O request. It
|
2008-10-11 04:39:22 +08:00
|
|
|
* accepts commands and I/O (read/write) requests.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* FIXME: this function needs a rename
|
|
|
|
*/
|
|
|
|
|
|
|
|
static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
|
|
|
{
|
|
|
|
ide_startstop_t startstop;
|
|
|
|
|
2006-08-10 14:44:47 +08:00
|
|
|
BUG_ON(!blk_rq_started(rq));
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
printk("%s: start_request: current=0x%08lx\n",
|
|
|
|
HWIF(drive)->name, (unsigned long) rq);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* bail early if we've exceeded max_failures */
|
|
|
|
if (drive->max_failures && (drive->failures > drive->max_failures)) {
|
ide-io: set REQ_FAILED when drive is dead
Currently it's possible to ide-cd to set an incorrect blocksize by
reading garbage if the drive is dead:
ide_cd_probe()
-> cdrom_read_toc()
-> cdrom_read_capacity()
-> cdrom_queue_packet_command()
-> ide_do_drive_cmd()
-> ide_do_request()
-> start_request()
on start_request():
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
goto kill_rq;
}
(...)
kill_rq:
ide_kill_rq(drive, rq);
return ide_stopped;
ide_kill_rq() and the next calls won't set REQ_FAILED on rq->cmd_flags and thus
cdrom_queue_packet_command() won't return an error. then:
stat = cdrom_queue_packet_command(drive, &req);
if (stat == 0) {
*capacity = 1 + be32_to_cpu(capbuf.lba);
*sectors_per_frame =
be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
}
cdrom_read_capacity() ends believing capbuf is valid but in fact it's just
uninitialized data. back to cdrom_read_toc():
/* Try to get the total cdrom capacity and sector size. */
stat = cdrom_read_capacity(drive, &toc->capacity, §ors_per_frame,
sense);
if (stat)
toc->capacity = 0x1fffff;
set_capacity(info->disk, toc->capacity * sectors_per_frame);
/* Save a private copy of te TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame;
blk_queue_hardsect_size(drive->queue,
sectors_per_frame << SECTOR_BITS);
that will set drive->queue->hardsect_size to be the random value.
hardsect_size is used to calculate inode->i_blkbits. later on, on a read
path:
void create_empty_buffers(struct page *page,
unsigned long blocksize, unsigned long b_state)
{
struct buffer_head *bh, *head, *tail;
head = alloc_page_buffers(page, blocksize, 1);
bh = head;
do {
bh->b_state |= b_state;
tail = bh;
bh = bh->b_this_page;
} while (bh);
tail->b_this_page = head;
alloc_page_buffers() will return NULL if blocksize > 4096. blocksize is
calculed based on inode->i_blkbits. that will trigger a null
dereference on create_empty_buffers().
Signed-off-by: Aristeu Rozanski <arozansk@redhat.com>
Cc: Borislav Petkov <bbpetkov@yahoo.de>
Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
2008-01-26 05:17:04 +08:00
|
|
|
rq->cmd_flags |= REQ_FAILED;
|
2005-04-17 06:20:36 +08:00
|
|
|
goto kill_rq;
|
|
|
|
}
|
|
|
|
|
2006-06-13 14:46:57 +08:00
|
|
|
if (blk_pm_request(rq))
|
|
|
|
ide_check_pm_state(drive, rq);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
SELECT_DRIVE(drive);
|
2008-10-11 04:39:21 +08:00
|
|
|
if (ide_wait_stat(&startstop, drive, drive->ready_stat,
|
|
|
|
ATA_BUSY | ATA_DRQ, WAIT_READY)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
|
|
|
|
return startstop;
|
|
|
|
}
|
|
|
|
if (!drive->special.all) {
|
|
|
|
ide_driver_t *drv;
|
|
|
|
|
2007-03-27 05:03:20 +08:00
|
|
|
/*
|
|
|
|
* We reset the drive so we need to issue a SETFEATURES.
|
|
|
|
* Do it _after_ do_special() restored device parameters.
|
|
|
|
*/
|
|
|
|
if (drive->current_speed == 0xff)
|
|
|
|
ide_config_drive_speed(drive, drive->desired_speed);
|
|
|
|
|
2008-01-27 03:13:13 +08:00
|
|
|
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
|
2005-04-17 06:20:36 +08:00
|
|
|
return execute_drive_cmd(drive, rq);
|
|
|
|
else if (blk_pm_request(rq)) {
|
2006-10-01 02:29:12 +08:00
|
|
|
struct request_pm_state *pm = rq->data;
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef DEBUG_PM
|
|
|
|
printk("%s: start_power_step(step: %d)\n",
|
2008-12-03 03:40:03 +08:00
|
|
|
drive->name, pm->pm_step);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
startstop = ide_start_power_step(drive, rq);
|
|
|
|
if (startstop == ide_stopped &&
|
2008-10-14 03:39:38 +08:00
|
|
|
pm->pm_step == IDE_PM_COMPLETED)
|
2005-04-17 06:20:36 +08:00
|
|
|
ide_complete_pm_request(drive, rq);
|
|
|
|
return startstop;
|
2008-07-17 02:33:48 +08:00
|
|
|
} else if (!rq->rq_disk && blk_special_request(rq))
|
|
|
|
/*
|
|
|
|
* TODO: Once all ULDs have been modified to
|
|
|
|
* check for specific op codes rather than
|
|
|
|
* blindly accepting any special request, the
|
|
|
|
* check for ->rq_disk above may be replaced
|
|
|
|
* by a more suitable mechanism or even
|
|
|
|
* dropped entirely.
|
|
|
|
*/
|
|
|
|
return ide_special_rq(drive, rq);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
drv = *(ide_driver_t **)rq->rq_disk->private_data;
|
2008-10-11 04:39:22 +08:00
|
|
|
|
|
|
|
return drv->do_request(drive, rq, rq->sector);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return do_special(drive);
|
|
|
|
kill_rq:
|
|
|
|
ide_kill_rq(drive, rq);
|
|
|
|
return ide_stopped;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ide_stall_queue - pause an IDE device
|
|
|
|
* @drive: drive to stall
|
|
|
|
* @timeout: time to stall for (jiffies)
|
|
|
|
*
|
|
|
|
* ide_stall_queue() can be used by a drive to give excess bandwidth back
|
|
|
|
* to the hwgroup by sleeping for timeout jiffies.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
|
|
|
|
{
|
|
|
|
if (timeout > WAIT_WORSTCASE)
|
|
|
|
timeout = WAIT_WORSTCASE;
|
|
|
|
drive->sleep = timeout + jiffies;
|
2008-10-14 03:39:36 +08:00
|
|
|
drive->dev_flags |= IDE_DFLAG_SLEEPING;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ide_stall_queue);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue a new request to a drive from hwgroup
|
|
|
|
*
|
|
|
|
* A hwgroup is a serialized group of IDE interfaces. Usually there is
|
|
|
|
* exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
|
|
|
|
* may have both interfaces in a single hwgroup to "serialize" access.
|
|
|
|
* Or possibly multiple ISA interfaces can share a common IRQ by being grouped
|
|
|
|
* together into one hwgroup for serialized access.
|
|
|
|
*
|
|
|
|
* Note also that several hwgroups can end up sharing a single IRQ,
|
|
|
|
* possibly along with many other devices. This is especially common in
|
|
|
|
* PCI-based systems with off-board IDE controller cards.
|
|
|
|
*
|
2009-01-02 23:12:50 +08:00
|
|
|
* The IDE driver uses a per-hwgroup lock to protect the hwgroup->busy flag.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* The first thread into the driver for a particular hwgroup sets the
|
|
|
|
* hwgroup->busy flag to indicate that this hwgroup is now active,
|
|
|
|
* and then initiates processing of the top request from the request queue.
|
|
|
|
*
|
|
|
|
* Other threads attempting entry notice the busy setting, and will simply
|
|
|
|
* queue their new requests and exit immediately. Note that hwgroup->busy
|
|
|
|
* remains set even when the driver is merely awaiting the next interrupt.
|
|
|
|
* Thus, the meaning is "this hwgroup is busy processing a request".
|
|
|
|
*
|
|
|
|
* When processing of a request completes, the completing thread or IRQ-handler
|
|
|
|
* will start the next request from the queue. If no more work remains,
|
|
|
|
* the driver will clear the hwgroup->busy flag and exit.
|
|
|
|
*
|
2008-12-30 03:27:31 +08:00
|
|
|
* The per-hwgroup spinlock is used to protect all access to the
|
2005-04-17 06:20:36 +08:00
|
|
|
* hwgroup->busy flag, but is otherwise not needed for most processing in
|
|
|
|
* the driver. This makes the driver much more friendlier to shared IRQs
|
|
|
|
* than previous designs, while remaining 100% (?) SMP safe and capable.
|
|
|
|
*/
|
2009-01-02 23:12:48 +08:00
|
|
|
void do_ide_request(struct request_queue *q)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-01-02 23:12:50 +08:00
|
|
|
ide_drive_t *drive = q->queuedata;
|
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
|
ide_hwgroup_t *hwgroup = hwif->hwgroup;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct request *rq;
|
|
|
|
ide_startstop_t startstop;
|
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
/*
|
|
|
|
* drive is doing pre-flush, ordered write, post-flush sequence. even
|
|
|
|
* though that is 3 requests, it must be seen as a single transaction.
|
|
|
|
* we must not preempt this drive until that is complete
|
|
|
|
*/
|
|
|
|
if (blk_queue_flushing(q))
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-01-02 23:12:50 +08:00
|
|
|
* small race where queue could get replugged during
|
|
|
|
* the 3-request flush cycle, just yank the plug since
|
|
|
|
* we want it to finish asap
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-01-02 23:12:50 +08:00
|
|
|
blk_remove_plug(q);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
spin_lock_irq(&hwgroup->lock);
|
2009-01-02 23:12:48 +08:00
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
if (!ide_lock_hwgroup(hwgroup)) {
|
2009-01-07 00:20:47 +08:00
|
|
|
ide_hwif_t *prev_port;
|
2009-01-02 23:12:50 +08:00
|
|
|
repeat:
|
2009-01-07 00:20:48 +08:00
|
|
|
prev_port = hwif->host->cur_port;
|
2009-01-02 23:12:50 +08:00
|
|
|
hwgroup->rq = NULL;
|
2009-01-02 23:12:49 +08:00
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
|
|
|
|
if (time_before(drive->sleep, jiffies)) {
|
|
|
|
ide_unlock_hwgroup(hwgroup);
|
|
|
|
goto plug_device;
|
|
|
|
}
|
|
|
|
}
|
2009-01-02 23:12:48 +08:00
|
|
|
|
2009-01-07 00:20:48 +08:00
|
|
|
if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
|
|
|
|
hwif != prev_port) {
|
2008-01-26 05:17:14 +08:00
|
|
|
/*
|
2009-01-07 00:20:47 +08:00
|
|
|
* set nIEN for previous port, drives in the
|
2008-01-26 05:17:14 +08:00
|
|
|
* quirk_list may not like intr setups/cleanups
|
|
|
|
*/
|
2009-01-07 00:20:48 +08:00
|
|
|
if (prev_port && hwgroup->drive->quirk_list == 0)
|
2009-01-07 00:20:47 +08:00
|
|
|
prev_port->tp_ops->set_irq(prev_port, 0);
|
2009-01-07 00:20:48 +08:00
|
|
|
|
|
|
|
hwif->host->cur_port = hwif;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
hwgroup->drive = drive;
|
2008-10-14 03:39:50 +08:00
|
|
|
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
spin_unlock_irq(&hwgroup->lock);
|
|
|
|
spin_lock_irq(q->queue_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* we know that the queue isn't empty, but this can happen
|
|
|
|
* if the q->prep_rq_fn() decides to kill a request
|
|
|
|
*/
|
|
|
|
rq = elv_next_request(drive->queue);
|
2009-01-02 23:12:50 +08:00
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
spin_lock_irq(&hwgroup->lock);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!rq) {
|
2009-01-02 23:12:50 +08:00
|
|
|
ide_unlock_hwgroup(hwgroup);
|
2009-01-02 23:12:50 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity: don't accept a request that isn't a PM request
|
|
|
|
* if we are currently power managed. This is very important as
|
|
|
|
* blk_stop_queue() doesn't prevent the elv_next_request()
|
|
|
|
* above to return us whatever is in the queue. Since we call
|
|
|
|
* ide_do_request() ourselves, we end up taking requests while
|
|
|
|
* the queue is blocked...
|
|
|
|
*
|
|
|
|
* We let requests forced at head of queue with ide-preempt
|
|
|
|
* though. I hope that doesn't happen too much, hopefully not
|
|
|
|
* unless the subdriver triggers such a thing in its own PM
|
|
|
|
* state machine.
|
|
|
|
*/
|
2008-10-14 03:39:36 +08:00
|
|
|
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
|
|
|
|
blk_pm_request(rq) == 0 &&
|
|
|
|
(rq->cmd_flags & REQ_PREEMPT) == 0) {
|
2009-01-02 23:12:50 +08:00
|
|
|
/* there should be no pending command at this point */
|
|
|
|
ide_unlock_hwgroup(hwgroup);
|
2009-01-02 23:12:48 +08:00
|
|
|
goto plug_device;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
hwgroup->rq = rq;
|
|
|
|
|
2009-01-02 23:12:48 +08:00
|
|
|
spin_unlock_irq(&hwgroup->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
startstop = start_request(drive, rq);
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_lock_irq(&hwgroup->lock);
|
2009-01-02 23:12:48 +08:00
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
if (startstop == ide_stopped)
|
|
|
|
goto repeat;
|
|
|
|
} else
|
|
|
|
goto plug_device;
|
|
|
|
out:
|
|
|
|
spin_unlock_irq(&hwgroup->lock);
|
|
|
|
spin_lock_irq(q->queue_lock);
|
2009-01-02 23:12:48 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-02 23:12:48 +08:00
|
|
|
plug_device:
|
2009-01-02 23:12:50 +08:00
|
|
|
spin_unlock_irq(&hwgroup->lock);
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
|
|
|
|
if (!elv_queue_empty(q))
|
|
|
|
blk_plug_device(q);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* un-busy the hwgroup etc, and clear any pending DMA status. we want to
|
|
|
|
* retry the current request in pio mode instead of risking tossing it
|
|
|
|
* all away
|
|
|
|
*/
|
|
|
|
static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = HWIF(drive);
|
|
|
|
struct request *rq;
|
|
|
|
ide_startstop_t ret = ide_stopped;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* end current dma transaction
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (error < 0) {
|
|
|
|
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
|
2008-04-27 04:25:24 +08:00
|
|
|
(void)hwif->dma_ops->dma_end(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
ret = ide_error(drive, "dma timeout error",
|
2008-07-24 01:55:56 +08:00
|
|
|
hwif->tp_ops->read_status(hwif));
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
|
|
|
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
|
2008-04-27 04:25:24 +08:00
|
|
|
hwif->dma_ops->dma_timeout(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* disable dma for now, but remember that we did so because of
|
|
|
|
* a timeout -- we'll reenable after we finish this next request
|
|
|
|
* (or rather the first chunk of it) in pio.
|
|
|
|
*/
|
2008-10-14 03:39:37 +08:00
|
|
|
drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
|
2005-04-17 06:20:36 +08:00
|
|
|
drive->retry_pio++;
|
2008-01-27 03:13:01 +08:00
|
|
|
ide_dma_off_quietly(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* un-busy drive etc (hwgroup->busy is cleared on return) and
|
|
|
|
* make sure request is sane
|
|
|
|
*/
|
|
|
|
rq = HWGROUP(drive)->rq;
|
2006-10-03 16:14:15 +08:00
|
|
|
|
|
|
|
if (!rq)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
HWGROUP(drive)->rq = NULL;
|
|
|
|
|
|
|
|
rq->errors = 0;
|
|
|
|
|
|
|
|
if (!rq->bio)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rq->sector = rq->bio->bi_sector;
|
|
|
|
rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
|
|
|
|
rq->hard_cur_sectors = rq->current_nr_sectors;
|
|
|
|
rq->buffer = bio_data(rq->bio);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-01-02 23:12:50 +08:00
|
|
|
static void ide_plug_device(ide_drive_t *drive)
|
|
|
|
{
|
|
|
|
struct request_queue *q = drive->queue;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(q->queue_lock, flags);
|
|
|
|
if (!elv_queue_empty(q))
|
|
|
|
blk_plug_device(q);
|
|
|
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
|
|
|
* ide_timer_expiry - handle lack of an IDE interrupt
|
|
|
|
* @data: timer callback magic (hwgroup)
|
|
|
|
*
|
|
|
|
* An IDE command has timed out before the expected drive return
|
|
|
|
* occurred. At this point we attempt to clean up the current
|
|
|
|
* mess. If the current handler includes an expiry handler then
|
|
|
|
* we invoke the expiry handler, and providing it is happy the
|
|
|
|
* work is done. If that fails we apply generic recovery rules
|
|
|
|
* invoking the handler and checking the drive DMA status. We
|
|
|
|
* have an excessively incestuous relationship with the DMA
|
|
|
|
* logic that wants cleaning up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ide_timer_expiry (unsigned long data)
|
|
|
|
{
|
|
|
|
ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
|
2009-01-02 23:12:50 +08:00
|
|
|
ide_drive_t *uninitialized_var(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
ide_handler_t *handler;
|
|
|
|
ide_expiry_t *expiry;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long wait = -1;
|
2009-01-02 23:12:50 +08:00
|
|
|
int plug_device = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_lock_irqsave(&hwgroup->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-04-11 04:38:37 +08:00
|
|
|
if (((handler = hwgroup->handler) == NULL) ||
|
|
|
|
(hwgroup->req_gen != hwgroup->req_gen_timer)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Either a marginal timeout occurred
|
|
|
|
* (got the interrupt just as timer expired),
|
|
|
|
* or we were "sleeping" to give other devices a chance.
|
|
|
|
* Either way, we don't really want to complain about anything.
|
|
|
|
*/
|
|
|
|
} else {
|
2009-01-02 23:12:50 +08:00
|
|
|
drive = hwgroup->drive;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!drive) {
|
|
|
|
printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
|
|
|
|
hwgroup->handler = NULL;
|
|
|
|
} else {
|
|
|
|
ide_hwif_t *hwif;
|
|
|
|
ide_startstop_t startstop = ide_stopped;
|
2009-01-02 23:12:49 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if ((expiry = hwgroup->expiry) != NULL) {
|
|
|
|
/* continue */
|
|
|
|
if ((wait = expiry(drive)) > 0) {
|
|
|
|
/* reset timer */
|
|
|
|
hwgroup->timer.expires = jiffies + wait;
|
2007-04-11 04:38:37 +08:00
|
|
|
hwgroup->req_gen_timer = hwgroup->req_gen;
|
2005-04-17 06:20:36 +08:00
|
|
|
add_timer(&hwgroup->timer);
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_unlock_irqrestore(&hwgroup->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hwgroup->handler = NULL;
|
|
|
|
/*
|
|
|
|
* We need to simulate a real interrupt when invoking
|
|
|
|
* the handler() function, which means we need to
|
|
|
|
* globally mask the specific IRQ:
|
|
|
|
*/
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_unlock(&hwgroup->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
hwif = HWIF(drive);
|
|
|
|
/* disable_irq_nosync ?? */
|
|
|
|
disable_irq(hwif->irq);
|
|
|
|
/* local CPU only,
|
|
|
|
* as if we were handling an interrupt */
|
|
|
|
local_irq_disable();
|
|
|
|
if (hwgroup->polling) {
|
|
|
|
startstop = handler(drive);
|
|
|
|
} else if (drive_is_ready(drive)) {
|
|
|
|
if (drive->waiting_for_dma)
|
2008-04-27 04:25:24 +08:00
|
|
|
hwif->dma_ops->dma_lost_irq(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
(void)ide_ack_intr(hwif);
|
|
|
|
printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
|
|
|
|
startstop = handler(drive);
|
|
|
|
} else {
|
|
|
|
if (drive->waiting_for_dma) {
|
|
|
|
startstop = ide_dma_timeout_retry(drive, wait);
|
|
|
|
} else
|
|
|
|
startstop =
|
2008-02-06 09:57:51 +08:00
|
|
|
ide_error(drive, "irq timeout",
|
2008-07-24 01:55:56 +08:00
|
|
|
hwif->tp_ops->read_status(hwif));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_lock_irq(&hwgroup->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
enable_irq(hwif->irq);
|
2009-01-02 23:12:48 +08:00
|
|
|
if (startstop == ide_stopped) {
|
2009-01-02 23:12:50 +08:00
|
|
|
ide_unlock_hwgroup(hwgroup);
|
2009-01-02 23:12:50 +08:00
|
|
|
plug_device = 1;
|
2009-01-02 23:12:48 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_unlock_irqrestore(&hwgroup->lock, flags);
|
2009-01-02 23:12:50 +08:00
|
|
|
|
|
|
|
if (plug_device)
|
|
|
|
ide_plug_device(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* unexpected_intr - handle an unexpected IDE interrupt
|
|
|
|
* @irq: interrupt line
|
2009-01-07 00:20:48 +08:00
|
|
|
* @hwif: port being processed
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* There's nothing really useful we can do with an unexpected interrupt,
|
|
|
|
* other than reading the status register (to clear it), and logging it.
|
|
|
|
* There should be no way that an irq can happen before we're ready for it,
|
|
|
|
* so we needn't worry much about losing an "important" interrupt here.
|
|
|
|
*
|
|
|
|
* On laptops (and "green" PCs), an unexpected interrupt occurs whenever
|
|
|
|
* the drive enters "idle", "standby", or "sleep" mode, so if the status
|
|
|
|
* looks "good", we just ignore the interrupt completely.
|
|
|
|
*
|
|
|
|
* This routine assumes __cli() is in effect when called.
|
|
|
|
*
|
|
|
|
* If an unexpected interrupt happens on irq15 while we are handling irq14
|
|
|
|
* and if the two interfaces are "serialized" (CMD640), then it looks like
|
|
|
|
* we could screw up by interfering with a new request being set up for
|
|
|
|
* irq15.
|
|
|
|
*
|
|
|
|
* In reality, this is a non-issue. The new command is not sent unless
|
|
|
|
* the drive is ready to accept one, in which case we know the drive is
|
|
|
|
* not trying to interrupt us. And ide_set_handler() is always invoked
|
|
|
|
* before completing the issuance of any new drive command, so we will not
|
|
|
|
* be accidentally invoked as a result of any valid command completion
|
|
|
|
* interrupt.
|
|
|
|
*
|
|
|
|
* Note that we must walk the entire hwgroup here. We know which hwif
|
|
|
|
* is doing the current command, but we don't know which hwif burped
|
|
|
|
* mysteriously.
|
|
|
|
*/
|
2009-01-07 00:20:48 +08:00
|
|
|
|
|
|
|
static void unexpected_intr(int irq, ide_hwif_t *hwif)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2009-01-07 00:20:48 +08:00
|
|
|
ide_hwgroup_t *hwgroup = hwif->hwgroup;
|
2005-04-17 06:20:36 +08:00
|
|
|
u8 stat;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle the unexpected interrupt
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
if (hwif->irq == irq) {
|
2008-07-24 01:55:56 +08:00
|
|
|
stat = hwif->tp_ops->read_status(hwif);
|
2008-07-24 01:55:52 +08:00
|
|
|
|
2008-10-11 04:39:21 +08:00
|
|
|
if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Try to not flood the console with msgs */
|
|
|
|
static unsigned long last_msgtime, count;
|
|
|
|
++count;
|
|
|
|
if (time_after(jiffies, last_msgtime + HZ)) {
|
|
|
|
last_msgtime = jiffies;
|
|
|
|
printk(KERN_ERR "%s%s: unexpected interrupt, "
|
|
|
|
"status=0x%02x, count=%ld\n",
|
|
|
|
hwif->name,
|
|
|
|
(hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while ((hwif = hwif->next) != hwgroup->hwif);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ide_intr - default IDE interrupt handler
|
|
|
|
* @irq: interrupt number
|
|
|
|
* @dev_id: hwif group
|
|
|
|
* @regs: unused weirdness from the kernel irq layer
|
|
|
|
*
|
|
|
|
* This is the default IRQ handler for the IDE layer. You should
|
|
|
|
* not need to override it. If you do be aware it is subtle in
|
|
|
|
* places
|
|
|
|
*
|
2009-01-07 00:20:48 +08:00
|
|
|
* hwif is the interface in the group currently performing
|
2005-04-17 06:20:36 +08:00
|
|
|
* a command. hwgroup->drive is the drive and hwgroup->handler is
|
|
|
|
* the IRQ handler to call. As we issue a command the handlers
|
|
|
|
* step through multiple states, reassigning the handler to the
|
|
|
|
* next step in the process. Unlike a smart SCSI controller IDE
|
|
|
|
* expects the main processor to sequence the various transfer
|
|
|
|
* stages. We also manage a poll timer to catch up with most
|
|
|
|
* timeout situations. There are still a few where the handlers
|
|
|
|
* don't ever decide to give up.
|
|
|
|
*
|
|
|
|
* The handler eventually returns ide_stopped to indicate the
|
|
|
|
* request completed. At this point we issue the next request
|
|
|
|
* on the hwgroup and the process begins again.
|
|
|
|
*/
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 21:55:46 +08:00
|
|
|
irqreturn_t ide_intr (int irq, void *dev_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
|
2008-12-30 03:27:31 +08:00
|
|
|
ide_hwif_t *hwif = hwgroup->hwif;
|
2009-01-02 23:12:50 +08:00
|
|
|
ide_drive_t *uninitialized_var(drive);
|
2005-04-17 06:20:36 +08:00
|
|
|
ide_handler_t *handler;
|
|
|
|
ide_startstop_t startstop;
|
2008-12-30 03:27:29 +08:00
|
|
|
irqreturn_t irq_ret = IRQ_NONE;
|
2009-01-02 23:12:50 +08:00
|
|
|
int plug_device = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-07 00:20:48 +08:00
|
|
|
if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE)
|
|
|
|
hwif = hwif->host->cur_port;
|
|
|
|
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_lock_irqsave(&hwgroup->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-30 03:27:29 +08:00
|
|
|
if (!ide_ack_intr(hwif))
|
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
|
|
|
|
/*
|
|
|
|
* Not expecting an interrupt from this drive.
|
|
|
|
* That means this could be:
|
|
|
|
* (1) an interrupt from another PCI device
|
|
|
|
* sharing the same PCI INT# as us.
|
|
|
|
* or (2) a drive just entered sleep or standby mode,
|
|
|
|
* and is interrupting to let us know.
|
|
|
|
* or (3) a spurious interrupt of unknown origin.
|
|
|
|
*
|
|
|
|
* For PCI, we cannot tell the difference,
|
|
|
|
* so in that case we just ignore it and hope it goes away.
|
|
|
|
*
|
|
|
|
* FIXME: unexpected_intr should be hwif-> then we can
|
|
|
|
* remove all the ifdef PCI crap
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_BLK_DEV_IDEPCI
|
2008-02-02 06:09:31 +08:00
|
|
|
if (hwif->chipset != ide_pci)
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* CONFIG_BLK_DEV_IDEPCI */
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Probably not a shared PCI interrupt,
|
|
|
|
* so we can safely try to do something about it:
|
|
|
|
*/
|
2009-01-07 00:20:48 +08:00
|
|
|
unexpected_intr(irq, hwif);
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_BLK_DEV_IDEPCI
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Whack the status register, just in case
|
|
|
|
* we have a leftover pending IRQ.
|
|
|
|
*/
|
2008-07-24 01:55:56 +08:00
|
|
|
(void)hwif->tp_ops->read_status(hwif);
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif /* CONFIG_BLK_DEV_IDEPCI */
|
|
|
|
}
|
2008-12-30 03:27:29 +08:00
|
|
|
goto out;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-12-30 03:27:29 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
drive = hwgroup->drive;
|
|
|
|
if (!drive) {
|
|
|
|
/*
|
|
|
|
* This should NEVER happen, and there isn't much
|
|
|
|
* we could do about it here.
|
|
|
|
*
|
|
|
|
* [Note - this can occur if the drive is hot unplugged]
|
|
|
|
*/
|
2008-12-30 03:27:29 +08:00
|
|
|
goto out_handled;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-12-30 03:27:29 +08:00
|
|
|
|
|
|
|
if (!drive_is_ready(drive))
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This happens regularly when we share a PCI IRQ with
|
|
|
|
* another device. Unfortunately, it can also happen
|
|
|
|
* with some buggy drives that trigger the IRQ before
|
|
|
|
* their status register is up to date. Hopefully we have
|
|
|
|
* enough advance overhead that the latter isn't a problem.
|
|
|
|
*/
|
2008-12-30 03:27:29 +08:00
|
|
|
goto out;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
hwgroup->handler = NULL;
|
2007-04-11 04:38:37 +08:00
|
|
|
hwgroup->req_gen++;
|
2005-04-17 06:20:36 +08:00
|
|
|
del_timer(&hwgroup->timer);
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_unlock(&hwgroup->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-14 03:39:42 +08:00
|
|
|
if (hwif->port_ops && hwif->port_ops->clear_irq)
|
|
|
|
hwif->port_ops->clear_irq(drive);
|
2007-02-17 09:40:21 +08:00
|
|
|
|
2008-10-14 03:39:36 +08:00
|
|
|
if (drive->dev_flags & IDE_DFLAG_UNMASK)
|
2006-07-03 15:25:25 +08:00
|
|
|
local_irq_enable_in_hardirq();
|
2008-10-14 03:39:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* service this interrupt, may set handler for next interrupt */
|
|
|
|
startstop = handler(drive);
|
|
|
|
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_lock_irq(&hwgroup->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Note that handler() may have set things up for another
|
|
|
|
* interrupt to occur soon, but it cannot happen until
|
|
|
|
* we exit from this routine, because it will be the
|
|
|
|
* same irq as is currently being serviced here, and Linux
|
|
|
|
* won't allow another of the same (on any CPU) until we return.
|
|
|
|
*/
|
|
|
|
if (startstop == ide_stopped) {
|
|
|
|
if (hwgroup->handler == NULL) { /* paranoia */
|
2009-01-02 23:12:50 +08:00
|
|
|
ide_unlock_hwgroup(hwgroup);
|
2009-01-02 23:12:50 +08:00
|
|
|
plug_device = 1;
|
2009-01-02 23:12:48 +08:00
|
|
|
} else
|
|
|
|
printk(KERN_ERR "%s: %s: huh? expected NULL handler "
|
|
|
|
"on exit\n", __func__, drive->name);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2008-12-30 03:27:29 +08:00
|
|
|
out_handled:
|
|
|
|
irq_ret = IRQ_HANDLED;
|
|
|
|
out:
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_unlock_irqrestore(&hwgroup->lock, flags);
|
2009-01-02 23:12:50 +08:00
|
|
|
|
|
|
|
if (plug_device)
|
|
|
|
ide_plug_device(drive);
|
|
|
|
|
2008-12-30 03:27:29 +08:00
|
|
|
return irq_ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ide_do_drive_cmd - issue IDE special command
|
|
|
|
* @drive: device to issue command
|
|
|
|
* @rq: request to issue
|
|
|
|
*
|
|
|
|
* This function issues a special IDE device request
|
|
|
|
* onto the request queue.
|
|
|
|
*
|
2008-07-16 03:21:51 +08:00
|
|
|
* the rq is queued at the head of the request queue, displacing
|
|
|
|
* the currently-being-processed request and this function
|
|
|
|
* returns immediately without waiting for the new rq to be
|
|
|
|
* completed. This is VERY DANGEROUS, and is intended for
|
|
|
|
* careful use by the ATAPI tape/cdrom driver code.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2008-07-16 03:21:51 +08:00
|
|
|
|
|
|
|
void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-12-30 03:27:30 +08:00
|
|
|
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
|
2008-12-30 03:27:31 +08:00
|
|
|
struct request_queue *q = drive->queue;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
2008-07-16 03:21:41 +08:00
|
|
|
|
2008-07-16 03:21:51 +08:00
|
|
|
hwgroup->rq = NULL;
|
2008-12-30 03:27:30 +08:00
|
|
|
|
2008-12-30 03:27:31 +08:00
|
|
|
spin_lock_irqsave(q->queue_lock, flags);
|
|
|
|
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
|
|
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ide_do_drive_cmd);
|
2008-01-26 05:17:13 +08:00
|
|
|
|
|
|
|
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
|
|
|
|
{
|
2008-07-24 01:55:52 +08:00
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
2008-01-26 05:17:13 +08:00
|
|
|
ide_task_t task;
|
|
|
|
|
|
|
|
memset(&task, 0, sizeof(task));
|
|
|
|
task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
|
|
|
|
IDE_TFLAG_OUT_FEATURE | tf_flags;
|
|
|
|
task.tf.feature = dma; /* Use PIO/DMA */
|
|
|
|
task.tf.lbam = bcount & 0xff;
|
|
|
|
task.tf.lbah = (bcount >> 8) & 0xff;
|
|
|
|
|
2008-04-29 05:44:39 +08:00
|
|
|
ide_tf_dump(drive->name, &task.tf);
|
2008-07-24 01:55:56 +08:00
|
|
|
hwif->tp_ops->set_irq(hwif, 1);
|
2008-07-16 03:21:48 +08:00
|
|
|
SELECT_MASK(drive, 0);
|
2008-07-24 01:55:56 +08:00
|
|
|
hwif->tp_ops->tf_load(drive, &task);
|
2008-01-26 05:17:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
|
2008-04-29 05:44:41 +08:00
|
|
|
|
|
|
|
void ide_pad_transfer(ide_drive_t *drive, int write, int len)
|
|
|
|
{
|
|
|
|
ide_hwif_t *hwif = drive->hwif;
|
|
|
|
u8 buf[4] = { 0 };
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
if (write)
|
2008-07-24 01:55:56 +08:00
|
|
|
hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
|
2008-04-29 05:44:41 +08:00
|
|
|
else
|
2008-07-24 01:55:56 +08:00
|
|
|
hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
|
2008-04-29 05:44:41 +08:00
|
|
|
len -= 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ide_pad_transfer);
|