block: add request->raw_data_len
With padding and draining moved into it, block layer now may extend requests as directed by queue parameters, so now a request has two sizes - the original request size and the extended size which matches the size of area pointed to by bios and later by sgs. The latter size is what lower layers are primarily interested in when allocating, filling up DMA tables and setting up the controller. Both padding and draining extend the data area to accomodate controller characteristics. As any controller which speaks SCSI can handle underflows, feeding larger data area is safe. So, this patch makes the primary data length field, request->data_len, indicate the size of full data area and add a separate length field, request->raw_data_len, for the unmodified request size. The latter is used to report to higher layer (userland) and where the original request size should be fed to the controller or device. Signed-off-by: Tejun Heo <htejun@gmail.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
40b01b9bbd
commit
6b00769fe1
|
@ -127,6 +127,7 @@ void rq_init(struct request_queue *q, struct request *rq)
|
||||||
rq->nr_hw_segments = 0;
|
rq->nr_hw_segments = 0;
|
||||||
rq->ioprio = 0;
|
rq->ioprio = 0;
|
||||||
rq->special = NULL;
|
rq->special = NULL;
|
||||||
|
rq->raw_data_len = 0;
|
||||||
rq->buffer = NULL;
|
rq->buffer = NULL;
|
||||||
rq->tag = -1;
|
rq->tag = -1;
|
||||||
rq->errors = 0;
|
rq->errors = 0;
|
||||||
|
@ -2015,6 +2016,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||||
rq->hard_cur_sectors = rq->current_nr_sectors;
|
rq->hard_cur_sectors = rq->current_nr_sectors;
|
||||||
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
|
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
|
||||||
rq->buffer = bio_data(bio);
|
rq->buffer = bio_data(bio);
|
||||||
|
rq->raw_data_len = bio->bi_size;
|
||||||
rq->data_len = bio->bi_size;
|
rq->data_len = bio->bi_size;
|
||||||
|
|
||||||
rq->bio = rq->biotail = bio;
|
rq->bio = rq->biotail = bio;
|
||||||
|
|
|
@ -19,6 +19,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
||||||
rq->biotail->bi_next = bio;
|
rq->biotail->bi_next = bio;
|
||||||
rq->biotail = bio;
|
rq->biotail = bio;
|
||||||
|
|
||||||
|
rq->raw_data_len += bio->bi_size;
|
||||||
rq->data_len += bio->bi_size;
|
rq->data_len += bio->bi_size;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -154,6 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||||
|
|
||||||
bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
|
bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
|
||||||
bio->bi_size += pad_len;
|
bio->bi_size += pad_len;
|
||||||
|
rq->data_len += pad_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
rq->buffer = rq->data = NULL;
|
rq->buffer = rq->data = NULL;
|
||||||
|
|
|
@ -228,6 +228,7 @@ new_segment:
|
||||||
((unsigned long)q->dma_drain_buffer) &
|
((unsigned long)q->dma_drain_buffer) &
|
||||||
(PAGE_SIZE - 1));
|
(PAGE_SIZE - 1));
|
||||||
nsegs++;
|
nsegs++;
|
||||||
|
rq->data_len += q->dma_drain_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sg)
|
if (sg)
|
||||||
|
|
|
@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->next_rq) {
|
if (rq->next_rq) {
|
||||||
hdr->dout_resid = rq->data_len;
|
hdr->dout_resid = rq->raw_data_len;
|
||||||
hdr->din_resid = rq->next_rq->data_len;
|
hdr->din_resid = rq->next_rq->raw_data_len;
|
||||||
blk_rq_unmap_user(bidi_bio);
|
blk_rq_unmap_user(bidi_bio);
|
||||||
blk_put_request(rq->next_rq);
|
blk_put_request(rq->next_rq);
|
||||||
} else if (rq_data_dir(rq) == READ)
|
} else if (rq_data_dir(rq) == READ)
|
||||||
hdr->din_resid = rq->data_len;
|
hdr->din_resid = rq->raw_data_len;
|
||||||
else
|
else
|
||||||
hdr->dout_resid = rq->data_len;
|
hdr->dout_resid = rq->raw_data_len;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the request generated a negative error number, return it
|
* If the request generated a negative error number, return it
|
||||||
|
|
|
@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
|
||||||
hdr->info = 0;
|
hdr->info = 0;
|
||||||
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
|
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
|
||||||
hdr->info |= SG_INFO_CHECK;
|
hdr->info |= SG_INFO_CHECK;
|
||||||
hdr->resid = rq->data_len;
|
hdr->resid = rq->raw_data_len;
|
||||||
hdr->sb_len_wr = 0;
|
hdr->sb_len_wr = 0;
|
||||||
|
|
||||||
if (rq->sense_len && hdr->sbp) {
|
if (rq->sense_len && hdr->sbp) {
|
||||||
|
@ -528,6 +528,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
rq = blk_get_request(q, WRITE, __GFP_WAIT);
|
rq = blk_get_request(q, WRITE, __GFP_WAIT);
|
||||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||||
rq->data = NULL;
|
rq->data = NULL;
|
||||||
|
rq->raw_data_len = 0;
|
||||||
rq->data_len = 0;
|
rq->data_len = 0;
|
||||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||||
memset(rq->cmd, 0, sizeof(rq->cmd));
|
memset(rq->cmd, 0, sizeof(rq->cmd));
|
||||||
|
|
|
@ -1014,10 +1014,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
||||||
}
|
}
|
||||||
|
|
||||||
req->buffer = NULL;
|
req->buffer = NULL;
|
||||||
if (blk_pc_request(req))
|
|
||||||
sdb->length = req->data_len;
|
|
||||||
else
|
|
||||||
sdb->length = req->nr_sectors << 9;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Next, walk the list, and fill in the addresses and sizes of
|
* Next, walk the list, and fill in the addresses and sizes of
|
||||||
|
@ -1026,6 +1022,10 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
||||||
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
|
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
|
||||||
BUG_ON(count > sdb->table.nents);
|
BUG_ON(count > sdb->table.nents);
|
||||||
sdb->table.nents = count;
|
sdb->table.nents = count;
|
||||||
|
if (blk_pc_request(req))
|
||||||
|
sdb->length = req->data_len;
|
||||||
|
else
|
||||||
|
sdb->length = req->nr_sectors << 9;
|
||||||
return BLKPREP_OK;
|
return BLKPREP_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -216,6 +216,7 @@ struct request {
|
||||||
unsigned int cmd_len;
|
unsigned int cmd_len;
|
||||||
unsigned char cmd[BLK_MAX_CDB];
|
unsigned char cmd[BLK_MAX_CDB];
|
||||||
|
|
||||||
|
unsigned int raw_data_len;
|
||||||
unsigned int data_len;
|
unsigned int data_len;
|
||||||
unsigned int sense_len;
|
unsigned int sense_len;
|
||||||
void *data;
|
void *data;
|
||||||
|
|
Loading…
Reference in New Issue