blk_end_request: changing mmc (take 4)
This patch converts mmc to use blk_end_request interfaces. Related 'uptodate' arguments are converted to 'error'. Cc: Pierre Ossman <drzeus-mmc@drzeus.cx> Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
1381b7e82a
commit
fd539832c7
|
@ -348,15 +348,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
* A block was successfully transferred.
|
* A block was successfully transferred.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&md->lock);
|
spin_lock_irq(&md->lock);
|
||||||
ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
|
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
||||||
if (!ret) {
|
|
||||||
/*
|
|
||||||
* The whole request completed successfully.
|
|
||||||
*/
|
|
||||||
add_disk_randomness(req->rq_disk);
|
|
||||||
blkdev_dequeue_request(req);
|
|
||||||
end_that_request_last(req, 1);
|
|
||||||
}
|
|
||||||
spin_unlock_irq(&md->lock);
|
spin_unlock_irq(&md->lock);
|
||||||
} while (ret);
|
} while (ret);
|
||||||
|
|
||||||
|
@ -386,27 +378,21 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
else
|
else
|
||||||
bytes = blocks << 9;
|
bytes = blocks << 9;
|
||||||
spin_lock_irq(&md->lock);
|
spin_lock_irq(&md->lock);
|
||||||
ret = end_that_request_chunk(req, 1, bytes);
|
ret = __blk_end_request(req, 0, bytes);
|
||||||
spin_unlock_irq(&md->lock);
|
spin_unlock_irq(&md->lock);
|
||||||
}
|
}
|
||||||
} else if (rq_data_dir(req) != READ &&
|
} else if (rq_data_dir(req) != READ &&
|
||||||
(card->host->caps & MMC_CAP_MULTIWRITE)) {
|
(card->host->caps & MMC_CAP_MULTIWRITE)) {
|
||||||
spin_lock_irq(&md->lock);
|
spin_lock_irq(&md->lock);
|
||||||
ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
|
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
||||||
spin_unlock_irq(&md->lock);
|
spin_unlock_irq(&md->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
mmc_release_host(card->host);
|
mmc_release_host(card->host);
|
||||||
|
|
||||||
spin_lock_irq(&md->lock);
|
spin_lock_irq(&md->lock);
|
||||||
while (ret) {
|
while (ret)
|
||||||
ret = end_that_request_chunk(req, 0,
|
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
|
||||||
req->current_nr_sectors << 9);
|
|
||||||
}
|
|
||||||
|
|
||||||
add_disk_randomness(req->rq_disk);
|
|
||||||
blkdev_dequeue_request(req);
|
|
||||||
end_that_request_last(req, 0);
|
|
||||||
spin_unlock_irq(&md->lock);
|
spin_unlock_irq(&md->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -94,8 +94,8 @@ static void mmc_request(struct request_queue *q)
|
||||||
printk(KERN_ERR "MMC: killing requests for dead queue\n");
|
printk(KERN_ERR "MMC: killing requests for dead queue\n");
|
||||||
while ((req = elv_next_request(q)) != NULL) {
|
while ((req = elv_next_request(q)) != NULL) {
|
||||||
do {
|
do {
|
||||||
ret = end_that_request_chunk(req, 0,
|
ret = __blk_end_request(req, -EIO,
|
||||||
req->current_nr_sectors << 9);
|
blk_rq_cur_bytes(req));
|
||||||
} while (ret);
|
} while (ret);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
|
Loading…
Reference in New Issue