mmc_block: inform block layer about sector count restriction
Make sure we consider the maximum block count when we tell the block layer about the maximum sector count. That way we don't have to chop up the request ourselves. Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
This commit is contained in:
parent
6501ff604a
commit
f3eb0aaa02
|
@ -215,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
struct mmc_blk_data *md = mq->data;
|
struct mmc_blk_data *md = mq->data;
|
||||||
struct mmc_card *card = md->queue.card;
|
struct mmc_card *card = md->queue.card;
|
||||||
struct mmc_blk_request brq;
|
struct mmc_blk_request brq;
|
||||||
int ret = 1, data_size, i;
|
int ret = 1;
|
||||||
struct scatterlist *sg;
|
|
||||||
|
|
||||||
mmc_claim_host(card->host);
|
mmc_claim_host(card->host);
|
||||||
|
|
||||||
|
@ -237,8 +236,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
brq.stop.arg = 0;
|
brq.stop.arg = 0;
|
||||||
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
||||||
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
|
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
|
||||||
if (brq.data.blocks > card->host->max_blk_count)
|
|
||||||
brq.data.blocks = card->host->max_blk_count;
|
|
||||||
|
|
||||||
if (brq.data.blocks > 1) {
|
if (brq.data.blocks > 1) {
|
||||||
/* SPI multiblock writes terminate using a special
|
/* SPI multiblock writes terminate using a special
|
||||||
|
@ -270,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||||
|
|
||||||
mmc_queue_bounce_pre(mq);
|
mmc_queue_bounce_pre(mq);
|
||||||
|
|
||||||
/*
|
|
||||||
* Adjust the sg list so it is the same size as the
|
|
||||||
* request.
|
|
||||||
*/
|
|
||||||
if (brq.data.blocks !=
|
|
||||||
(req->nr_sectors >> (md->block_bits - 9))) {
|
|
||||||
data_size = brq.data.blocks * brq.data.blksz;
|
|
||||||
for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
|
|
||||||
data_size -= sg->length;
|
|
||||||
if (data_size <= 0) {
|
|
||||||
sg->length += data_size;
|
|
||||||
i++;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
brq.data.sg_len = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
mmc_wait_for_req(card->host, &brq.mrq);
|
mmc_wait_for_req(card->host, &brq.mrq);
|
||||||
|
|
||||||
mmc_queue_bounce_post(mq);
|
mmc_queue_bounce_post(mq);
|
||||||
|
|
|
@ -142,12 +142,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
||||||
bouncesz = host->max_req_size;
|
bouncesz = host->max_req_size;
|
||||||
if (bouncesz > host->max_seg_size)
|
if (bouncesz > host->max_seg_size)
|
||||||
bouncesz = host->max_seg_size;
|
bouncesz = host->max_seg_size;
|
||||||
|
if (bouncesz > (host->max_blk_count * 512))
|
||||||
|
bouncesz = host->max_blk_count * 512;
|
||||||
|
|
||||||
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
if (bouncesz > 512) {
|
||||||
if (!mq->bounce_buf) {
|
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
|
||||||
printk(KERN_WARNING "%s: unable to allocate "
|
if (!mq->bounce_buf) {
|
||||||
"bounce buffer\n", mmc_card_name(card));
|
printk(KERN_WARNING "%s: unable to "
|
||||||
} else {
|
"allocate bounce buffer\n",
|
||||||
|
mmc_card_name(card));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mq->bounce_buf) {
|
||||||
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
|
||||||
blk_queue_max_sectors(mq->queue, bouncesz / 512);
|
blk_queue_max_sectors(mq->queue, bouncesz / 512);
|
||||||
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
|
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
|
||||||
|
@ -175,7 +182,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
|
||||||
|
|
||||||
if (!mq->bounce_buf) {
|
if (!mq->bounce_buf) {
|
||||||
blk_queue_bounce_limit(mq->queue, limit);
|
blk_queue_bounce_limit(mq->queue, limit);
|
||||||
blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
|
blk_queue_max_sectors(mq->queue,
|
||||||
|
min(host->max_blk_count, host->max_req_size / 512));
|
||||||
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
|
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
|
||||||
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
|
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
|
||||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||||
|
|
Loading…
Reference in New Issue