block: remove the bi_phys_segments field in struct bio
We only need the number of segments in the blk-mq submission path. Remove the field from struct bio, and return it from a variant of blk_queue_split instead of that it can passed as an argument to those functions that need the value. This also means we stop recounting segments except for cloning and partial segments. To keep the number of arguments in this how path down remove pointless struct request_queue arguments from any of the functions that had it and grew a nr_segs argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
f924cddebc
commit
14ccb66b3f
|
@ -436,7 +436,6 @@ struct bio {
|
||||||
struct bvec_iter bi_iter; /* current index into bio_vec array */
|
struct bvec_iter bi_iter; /* current index into bio_vec array */
|
||||||
|
|
||||||
unsigned int bi_size; /* total size in bytes */
|
unsigned int bi_size; /* total size in bytes */
|
||||||
unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
|
|
||||||
unsigned short bi_hw_segments; /* segments after DMA remapping */
|
unsigned short bi_hw_segments; /* segments after DMA remapping */
|
||||||
unsigned int bi_max; /* max bio_vecs we can hold
|
unsigned int bi_max; /* max bio_vecs we can hold
|
||||||
used as index into pool */
|
used as index into pool */
|
||||||
|
|
|
@ -2027,7 +2027,8 @@ static void bfq_remove_request(struct request_queue *q,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
struct request_queue *q = hctx->queue;
|
struct request_queue *q = hctx->queue;
|
||||||
struct bfq_data *bfqd = q->elevator->elevator_data;
|
struct bfq_data *bfqd = q->elevator->elevator_data;
|
||||||
|
@ -2050,7 +2051,7 @@ static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
||||||
bfqd->bio_bfqq = NULL;
|
bfqd->bio_bfqq = NULL;
|
||||||
bfqd->bio_bic = bic;
|
bfqd->bio_bic = bic;
|
||||||
|
|
||||||
ret = blk_mq_sched_try_merge(q, bio, &free);
|
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
|
||||||
|
|
||||||
if (free)
|
if (free)
|
||||||
blk_mq_free_request(free);
|
blk_mq_free_request(free);
|
||||||
|
|
15
block/bio.c
15
block/bio.c
|
@ -558,14 +558,6 @@ void bio_put(struct bio *bio)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bio_put);
|
EXPORT_SYMBOL(bio_put);
|
||||||
|
|
||||||
int bio_phys_segments(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
|
|
||||||
blk_recount_segments(q, bio);
|
|
||||||
|
|
||||||
return bio->bi_phys_segments;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __bio_clone_fast - clone a bio that shares the original bio's biovec
|
* __bio_clone_fast - clone a bio that shares the original bio's biovec
|
||||||
* @bio: destination bio
|
* @bio: destination bio
|
||||||
|
@ -739,7 +731,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
|
||||||
if (bio_full(bio))
|
if (bio_full(bio))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (bio->bi_phys_segments >= queue_max_segments(q))
|
if (bio->bi_vcnt >= queue_max_segments(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bvec = &bio->bi_io_vec[bio->bi_vcnt];
|
bvec = &bio->bi_io_vec[bio->bi_vcnt];
|
||||||
|
@ -749,8 +741,6 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
|
||||||
bio->bi_vcnt++;
|
bio->bi_vcnt++;
|
||||||
done:
|
done:
|
||||||
bio->bi_iter.bi_size += len;
|
bio->bi_iter.bi_size += len;
|
||||||
bio->bi_phys_segments = bio->bi_vcnt;
|
|
||||||
bio_set_flag(bio, BIO_SEG_VALID);
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1909,10 +1899,7 @@ void bio_trim(struct bio *bio, int offset, int size)
|
||||||
if (offset == 0 && size == bio->bi_iter.bi_size)
|
if (offset == 0 && size == bio->bi_iter.bi_size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bio_clear_flag(bio, BIO_SEG_VALID);
|
|
||||||
|
|
||||||
bio_advance(bio, offset << 9);
|
bio_advance(bio, offset << 9);
|
||||||
|
|
||||||
bio->bi_iter.bi_size = size;
|
bio->bi_iter.bi_size = size;
|
||||||
|
|
||||||
if (bio_integrity(bio))
|
if (bio_integrity(bio))
|
||||||
|
|
|
@ -550,15 +550,15 @@ void blk_put_request(struct request *req)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_put_request);
|
EXPORT_SYMBOL(blk_put_request);
|
||||||
|
|
||||||
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
|
||||||
struct bio *bio)
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
if (!ll_back_merge_fn(q, req, bio))
|
if (!ll_back_merge_fn(req, bio, nr_segs))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
trace_block_bio_backmerge(q, req, bio);
|
trace_block_bio_backmerge(req->q, req, bio);
|
||||||
|
|
||||||
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
||||||
blk_rq_set_mixed_merge(req);
|
blk_rq_set_mixed_merge(req);
|
||||||
|
@ -571,15 +571,15 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
|
||||||
struct bio *bio)
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
if (!ll_front_merge_fn(q, req, bio))
|
if (!ll_front_merge_fn(req, bio, nr_segs))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
trace_block_bio_frontmerge(q, req, bio);
|
trace_block_bio_frontmerge(req->q, req, bio);
|
||||||
|
|
||||||
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
||||||
blk_rq_set_mixed_merge(req);
|
blk_rq_set_mixed_merge(req);
|
||||||
|
@ -621,6 +621,7 @@ no_merge:
|
||||||
* blk_attempt_plug_merge - try to merge with %current's plugged list
|
* blk_attempt_plug_merge - try to merge with %current's plugged list
|
||||||
* @q: request_queue new bio is being queued at
|
* @q: request_queue new bio is being queued at
|
||||||
* @bio: new bio being queued
|
* @bio: new bio being queued
|
||||||
|
* @nr_segs: number of segments in @bio
|
||||||
* @same_queue_rq: pointer to &struct request that gets filled in when
|
* @same_queue_rq: pointer to &struct request that gets filled in when
|
||||||
* another request associated with @q is found on the plug list
|
* another request associated with @q is found on the plug list
|
||||||
* (optional, may be %NULL)
|
* (optional, may be %NULL)
|
||||||
|
@ -639,7 +640,7 @@ no_merge:
|
||||||
* Caller must ensure !blk_queue_nomerges(q) beforehand.
|
* Caller must ensure !blk_queue_nomerges(q) beforehand.
|
||||||
*/
|
*/
|
||||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
struct request **same_queue_rq)
|
unsigned int nr_segs, struct request **same_queue_rq)
|
||||||
{
|
{
|
||||||
struct blk_plug *plug;
|
struct blk_plug *plug;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
@ -668,10 +669,10 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
|
|
||||||
switch (blk_try_merge(rq, bio)) {
|
switch (blk_try_merge(rq, bio)) {
|
||||||
case ELEVATOR_BACK_MERGE:
|
case ELEVATOR_BACK_MERGE:
|
||||||
merged = bio_attempt_back_merge(q, rq, bio);
|
merged = bio_attempt_back_merge(rq, bio, nr_segs);
|
||||||
break;
|
break;
|
||||||
case ELEVATOR_FRONT_MERGE:
|
case ELEVATOR_FRONT_MERGE:
|
||||||
merged = bio_attempt_front_merge(q, rq, bio);
|
merged = bio_attempt_front_merge(rq, bio, nr_segs);
|
||||||
break;
|
break;
|
||||||
case ELEVATOR_DISCARD_MERGE:
|
case ELEVATOR_DISCARD_MERGE:
|
||||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
merged = bio_attempt_discard_merge(q, rq, bio);
|
||||||
|
@ -1427,14 +1428,9 @@ bool blk_update_request(struct request *req, blk_status_t error,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_update_request);
|
EXPORT_SYMBOL_GPL(blk_update_request);
|
||||||
|
|
||||||
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
void blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs)
|
||||||
struct bio *bio)
|
|
||||||
{
|
{
|
||||||
if (bio_has_data(bio))
|
rq->nr_phys_segments = nr_segs;
|
||||||
rq->nr_phys_segments = bio_phys_segments(q, bio);
|
|
||||||
else if (bio_op(bio) == REQ_OP_DISCARD)
|
|
||||||
rq->nr_phys_segments = 1;
|
|
||||||
|
|
||||||
rq->__data_len = bio->bi_iter.bi_size;
|
rq->__data_len = bio->bi_iter.bi_size;
|
||||||
rq->bio = rq->biotail = bio;
|
rq->bio = rq->biotail = bio;
|
||||||
rq->ioprio = bio_prio(bio);
|
rq->ioprio = bio_prio(bio);
|
||||||
|
|
|
@ -18,13 +18,19 @@
|
||||||
int blk_rq_append_bio(struct request *rq, struct bio **bio)
|
int blk_rq_append_bio(struct request *rq, struct bio **bio)
|
||||||
{
|
{
|
||||||
struct bio *orig_bio = *bio;
|
struct bio *orig_bio = *bio;
|
||||||
|
struct bvec_iter iter;
|
||||||
|
struct bio_vec bv;
|
||||||
|
unsigned int nr_segs = 0;
|
||||||
|
|
||||||
blk_queue_bounce(rq->q, bio);
|
blk_queue_bounce(rq->q, bio);
|
||||||
|
|
||||||
|
bio_for_each_bvec(bv, *bio, iter)
|
||||||
|
nr_segs++;
|
||||||
|
|
||||||
if (!rq->bio) {
|
if (!rq->bio) {
|
||||||
blk_rq_bio_prep(rq->q, rq, *bio);
|
blk_rq_bio_prep(rq, *bio, nr_segs);
|
||||||
} else {
|
} else {
|
||||||
if (!ll_back_merge_fn(rq->q, rq, *bio)) {
|
if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
|
||||||
if (orig_bio != *bio) {
|
if (orig_bio != *bio) {
|
||||||
bio_put(*bio);
|
bio_put(*bio);
|
||||||
*bio = orig_bio;
|
*bio = orig_bio;
|
||||||
|
|
|
@ -258,32 +258,29 @@ split:
|
||||||
return do_split ? new : NULL;
|
return do_split ? new : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_queue_split(struct request_queue *q, struct bio **bio)
|
void __blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||||
|
unsigned int *nr_segs)
|
||||||
{
|
{
|
||||||
struct bio *split, *res;
|
struct bio *split;
|
||||||
unsigned nsegs;
|
|
||||||
|
|
||||||
switch (bio_op(*bio)) {
|
switch (bio_op(*bio)) {
|
||||||
case REQ_OP_DISCARD:
|
case REQ_OP_DISCARD:
|
||||||
case REQ_OP_SECURE_ERASE:
|
case REQ_OP_SECURE_ERASE:
|
||||||
split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
|
split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
|
||||||
break;
|
break;
|
||||||
case REQ_OP_WRITE_ZEROES:
|
case REQ_OP_WRITE_ZEROES:
|
||||||
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
|
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
|
||||||
|
nr_segs);
|
||||||
break;
|
break;
|
||||||
case REQ_OP_WRITE_SAME:
|
case REQ_OP_WRITE_SAME:
|
||||||
split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
|
split = blk_bio_write_same_split(q, *bio, &q->bio_split,
|
||||||
|
nr_segs);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
|
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* physical segments can be figured out during splitting */
|
|
||||||
res = split ? split : *bio;
|
|
||||||
res->bi_phys_segments = nsegs;
|
|
||||||
bio_set_flag(res, BIO_SEG_VALID);
|
|
||||||
|
|
||||||
if (split) {
|
if (split) {
|
||||||
/* there isn't chance to merge the splitted bio */
|
/* there isn't chance to merge the splitted bio */
|
||||||
split->bi_opf |= REQ_NOMERGE;
|
split->bi_opf |= REQ_NOMERGE;
|
||||||
|
@ -304,6 +301,13 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
|
||||||
*bio = split;
|
*bio = split;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_queue_split(struct request_queue *q, struct bio **bio)
|
||||||
|
{
|
||||||
|
unsigned int nr_segs;
|
||||||
|
|
||||||
|
__blk_queue_split(q, bio, &nr_segs);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_split);
|
EXPORT_SYMBOL(blk_queue_split);
|
||||||
|
|
||||||
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
||||||
|
@ -338,17 +342,6 @@ void blk_recalc_rq_segments(struct request *rq)
|
||||||
rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
|
rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
struct bio *nxt = bio->bi_next;
|
|
||||||
|
|
||||||
bio->bi_next = NULL;
|
|
||||||
bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
|
|
||||||
bio->bi_next = nxt;
|
|
||||||
|
|
||||||
bio_set_flag(bio, BIO_SEG_VALID);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
||||||
struct scatterlist *sglist)
|
struct scatterlist *sglist)
|
||||||
{
|
{
|
||||||
|
@ -519,16 +512,13 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_rq_map_sg);
|
EXPORT_SYMBOL(blk_rq_map_sg);
|
||||||
|
|
||||||
static inline int ll_new_hw_segment(struct request_queue *q,
|
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
|
||||||
struct request *req,
|
unsigned int nr_phys_segs)
|
||||||
struct bio *bio)
|
|
||||||
{
|
{
|
||||||
int nr_phys_segs = bio_phys_segments(q, bio);
|
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
|
||||||
|
|
||||||
if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
|
|
||||||
goto no_merge;
|
goto no_merge;
|
||||||
|
|
||||||
if (blk_integrity_merge_bio(q, req, bio) == false)
|
if (blk_integrity_merge_bio(req->q, req, bio) == false)
|
||||||
goto no_merge;
|
goto no_merge;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -539,12 +529,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
no_merge:
|
no_merge:
|
||||||
req_set_nomerge(q, req);
|
req_set_nomerge(req->q, req);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
|
||||||
struct bio *bio)
|
|
||||||
{
|
{
|
||||||
if (req_gap_back_merge(req, bio))
|
if (req_gap_back_merge(req, bio))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -553,21 +542,15 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||||
return 0;
|
return 0;
|
||||||
if (blk_rq_sectors(req) + bio_sectors(bio) >
|
if (blk_rq_sectors(req) + bio_sectors(bio) >
|
||||||
blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
|
blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
|
||||||
req_set_nomerge(q, req);
|
req_set_nomerge(req->q, req);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!bio_flagged(req->biotail, BIO_SEG_VALID))
|
|
||||||
blk_recount_segments(q, req->biotail);
|
|
||||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
|
||||||
blk_recount_segments(q, bio);
|
|
||||||
|
|
||||||
return ll_new_hw_segment(q, req, bio);
|
return ll_new_hw_segment(req, bio, nr_segs);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
|
||||||
struct bio *bio)
|
|
||||||
{
|
{
|
||||||
|
|
||||||
if (req_gap_front_merge(req, bio))
|
if (req_gap_front_merge(req, bio))
|
||||||
return 0;
|
return 0;
|
||||||
if (blk_integrity_rq(req) &&
|
if (blk_integrity_rq(req) &&
|
||||||
|
@ -575,15 +558,11 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||||
return 0;
|
return 0;
|
||||||
if (blk_rq_sectors(req) + bio_sectors(bio) >
|
if (blk_rq_sectors(req) + bio_sectors(bio) >
|
||||||
blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
|
blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
|
||||||
req_set_nomerge(q, req);
|
req_set_nomerge(req->q, req);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!bio_flagged(bio, BIO_SEG_VALID))
|
|
||||||
blk_recount_segments(q, bio);
|
|
||||||
if (!bio_flagged(req->bio, BIO_SEG_VALID))
|
|
||||||
blk_recount_segments(q, req->bio);
|
|
||||||
|
|
||||||
return ll_new_hw_segment(q, req, bio);
|
return ll_new_hw_segment(req, bio, nr_segs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
|
static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
|
||||||
|
|
|
@ -224,7 +224,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
struct request **merged_request)
|
unsigned int nr_segs, struct request **merged_request)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
case ELEVATOR_BACK_MERGE:
|
case ELEVATOR_BACK_MERGE:
|
||||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||||
return false;
|
return false;
|
||||||
if (!bio_attempt_back_merge(q, rq, bio))
|
if (!bio_attempt_back_merge(rq, bio, nr_segs))
|
||||||
return false;
|
return false;
|
||||||
*merged_request = attempt_back_merge(q, rq);
|
*merged_request = attempt_back_merge(q, rq);
|
||||||
if (!*merged_request)
|
if (!*merged_request)
|
||||||
|
@ -241,7 +241,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
case ELEVATOR_FRONT_MERGE:
|
case ELEVATOR_FRONT_MERGE:
|
||||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||||
return false;
|
return false;
|
||||||
if (!bio_attempt_front_merge(q, rq, bio))
|
if (!bio_attempt_front_merge(rq, bio, nr_segs))
|
||||||
return false;
|
return false;
|
||||||
*merged_request = attempt_front_merge(q, rq);
|
*merged_request = attempt_front_merge(q, rq);
|
||||||
if (!*merged_request)
|
if (!*merged_request)
|
||||||
|
@ -260,7 +260,7 @@ EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
|
||||||
* of them.
|
* of them.
|
||||||
*/
|
*/
|
||||||
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||||
struct bio *bio)
|
struct bio *bio, unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
int checked = 8;
|
int checked = 8;
|
||||||
|
@ -277,11 +277,13 @@ bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||||
switch (blk_try_merge(rq, bio)) {
|
switch (blk_try_merge(rq, bio)) {
|
||||||
case ELEVATOR_BACK_MERGE:
|
case ELEVATOR_BACK_MERGE:
|
||||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
if (blk_mq_sched_allow_merge(q, rq, bio))
|
||||||
merged = bio_attempt_back_merge(q, rq, bio);
|
merged = bio_attempt_back_merge(rq, bio,
|
||||||
|
nr_segs);
|
||||||
break;
|
break;
|
||||||
case ELEVATOR_FRONT_MERGE:
|
case ELEVATOR_FRONT_MERGE:
|
||||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
if (blk_mq_sched_allow_merge(q, rq, bio))
|
||||||
merged = bio_attempt_front_merge(q, rq, bio);
|
merged = bio_attempt_front_merge(rq, bio,
|
||||||
|
nr_segs);
|
||||||
break;
|
break;
|
||||||
case ELEVATOR_DISCARD_MERGE:
|
case ELEVATOR_DISCARD_MERGE:
|
||||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
merged = bio_attempt_discard_merge(q, rq, bio);
|
||||||
|
@ -304,13 +306,14 @@ EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
|
||||||
*/
|
*/
|
||||||
static bool blk_mq_attempt_merge(struct request_queue *q,
|
static bool blk_mq_attempt_merge(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx,
|
struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx, struct bio *bio)
|
struct blk_mq_ctx *ctx, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
enum hctx_type type = hctx->type;
|
enum hctx_type type = hctx->type;
|
||||||
|
|
||||||
lockdep_assert_held(&ctx->lock);
|
lockdep_assert_held(&ctx->lock);
|
||||||
|
|
||||||
if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) {
|
if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
|
||||||
ctx->rq_merged++;
|
ctx->rq_merged++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -318,7 +321,8 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
struct elevator_queue *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
||||||
|
@ -328,7 +332,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
if (e && e->type->ops.bio_merge) {
|
if (e && e->type->ops.bio_merge) {
|
||||||
blk_mq_put_ctx(ctx);
|
blk_mq_put_ctx(ctx);
|
||||||
return e->type->ops.bio_merge(hctx, bio);
|
return e->type->ops.bio_merge(hctx, bio, nr_segs);
|
||||||
}
|
}
|
||||||
|
|
||||||
type = hctx->type;
|
type = hctx->type;
|
||||||
|
@ -336,7 +340,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||||
!list_empty_careful(&ctx->rq_lists[type])) {
|
!list_empty_careful(&ctx->rq_lists[type])) {
|
||||||
/* default per sw-queue merge */
|
/* default per sw-queue merge */
|
||||||
spin_lock(&ctx->lock);
|
spin_lock(&ctx->lock);
|
||||||
ret = blk_mq_attempt_merge(q, hctx, ctx, bio);
|
ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock(&ctx->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,9 @@ void blk_mq_sched_assign_ioc(struct request *rq);
|
||||||
|
|
||||||
void blk_mq_sched_request_inserted(struct request *rq);
|
void blk_mq_sched_request_inserted(struct request *rq);
|
||||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||||
struct request **merged_request);
|
unsigned int nr_segs, struct request **merged_request);
|
||||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
|
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||||
|
unsigned int nr_segs);
|
||||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
||||||
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
|
||||||
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
||||||
|
@ -31,12 +32,13 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
|
||||||
void blk_mq_sched_free_requests(struct request_queue *q);
|
void blk_mq_sched_free_requests(struct request_queue *q);
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
|
if (blk_queue_nomerges(q) || !bio_mergeable(bio))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return __blk_mq_sched_bio_merge(q, bio);
|
return __blk_mq_sched_bio_merge(q, bio, nr_segs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
|
|
|
@ -1764,14 +1764,15 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
|
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
if (bio->bi_opf & REQ_RAHEAD)
|
if (bio->bi_opf & REQ_RAHEAD)
|
||||||
rq->cmd_flags |= REQ_FAILFAST_MASK;
|
rq->cmd_flags |= REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
rq->__sector = bio->bi_iter.bi_sector;
|
rq->__sector = bio->bi_iter.bi_sector;
|
||||||
rq->write_hint = bio->bi_write_hint;
|
rq->write_hint = bio->bi_write_hint;
|
||||||
blk_rq_bio_prep(rq->q, rq, bio);
|
blk_rq_bio_prep(rq, bio, nr_segs);
|
||||||
|
|
||||||
blk_account_io_start(rq, true);
|
blk_account_io_start(rq, true);
|
||||||
}
|
}
|
||||||
|
@ -1941,20 +1942,20 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
struct blk_plug *plug;
|
struct blk_plug *plug;
|
||||||
struct request *same_queue_rq = NULL;
|
struct request *same_queue_rq = NULL;
|
||||||
|
unsigned int nr_segs;
|
||||||
blk_qc_t cookie;
|
blk_qc_t cookie;
|
||||||
|
|
||||||
blk_queue_bounce(q, &bio);
|
blk_queue_bounce(q, &bio);
|
||||||
|
__blk_queue_split(q, &bio, &nr_segs);
|
||||||
blk_queue_split(q, &bio);
|
|
||||||
|
|
||||||
if (!bio_integrity_prep(bio))
|
if (!bio_integrity_prep(bio))
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
|
|
||||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||||
blk_attempt_plug_merge(q, bio, &same_queue_rq))
|
blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
|
|
||||||
if (blk_mq_sched_bio_merge(q, bio))
|
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
|
|
||||||
rq_qos_throttle(q, bio);
|
rq_qos_throttle(q, bio);
|
||||||
|
@ -1977,7 +1978,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
plug = current->plug;
|
plug = current->plug;
|
||||||
if (unlikely(is_flush_fua)) {
|
if (unlikely(is_flush_fua)) {
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||||
|
|
||||||
/* bypass scheduler for flush rq */
|
/* bypass scheduler for flush rq */
|
||||||
blk_insert_flush(rq);
|
blk_insert_flush(rq);
|
||||||
|
@ -1991,7 +1992,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
struct request *last = NULL;
|
struct request *last = NULL;
|
||||||
|
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||||
|
|
||||||
if (!request_count)
|
if (!request_count)
|
||||||
trace_block_plug(q);
|
trace_block_plug(q);
|
||||||
|
@ -2006,7 +2007,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
blk_add_rq_to_plug(plug, rq);
|
blk_add_rq_to_plug(plug, rq);
|
||||||
} else if (plug && !blk_queue_nomerges(q)) {
|
} else if (plug && !blk_queue_nomerges(q)) {
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We do limited plugging. If the bio can be merged, do that.
|
* We do limited plugging. If the bio can be merged, do that.
|
||||||
|
@ -2035,11 +2036,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
|
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
|
||||||
!data.hctx->dispatch_busy)) {
|
!data.hctx->dispatch_busy)) {
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
||||||
} else {
|
} else {
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio, nr_segs);
|
||||||
blk_mq_sched_insert_request(rq, false, true, true);
|
blk_mq_sched_insert_request(rq, false, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
23
block/blk.h
23
block/blk.h
|
@ -51,8 +51,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
||||||
int node, int cmd_size, gfp_t flags);
|
int node, int cmd_size, gfp_t flags);
|
||||||
void blk_free_flush_queue(struct blk_flush_queue *q);
|
void blk_free_flush_queue(struct blk_flush_queue *q);
|
||||||
|
|
||||||
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
void blk_rq_bio_prep(struct request *rq, struct bio *bio, unsigned int nr_segs);
|
||||||
struct bio *bio);
|
|
||||||
void blk_freeze_queue(struct request_queue *q);
|
void blk_freeze_queue(struct request_queue *q);
|
||||||
|
|
||||||
static inline void blk_queue_enter_live(struct request_queue *q)
|
static inline void blk_queue_enter_live(struct request_queue *q)
|
||||||
|
@ -154,14 +153,14 @@ static inline bool bio_integrity_endio(struct bio *bio)
|
||||||
unsigned long blk_rq_timeout(unsigned long timeout);
|
unsigned long blk_rq_timeout(unsigned long timeout);
|
||||||
void blk_add_timer(struct request *req);
|
void blk_add_timer(struct request *req);
|
||||||
|
|
||||||
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
|
||||||
struct bio *bio);
|
unsigned int nr_segs);
|
||||||
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
|
||||||
struct bio *bio);
|
unsigned int nr_segs);
|
||||||
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
||||||
struct bio *bio);
|
struct bio *bio);
|
||||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||||
struct request **same_queue_rq);
|
unsigned int nr_segs, struct request **same_queue_rq);
|
||||||
|
|
||||||
void blk_account_io_start(struct request *req, bool new_io);
|
void blk_account_io_start(struct request *req, bool new_io);
|
||||||
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
void blk_account_io_completion(struct request *req, unsigned int bytes);
|
||||||
|
@ -202,10 +201,12 @@ static inline int blk_should_fake_timeout(struct request_queue *q)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
void __blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||||
struct bio *bio);
|
unsigned int *nr_segs);
|
||||||
int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||||
struct bio *bio);
|
unsigned int nr_segs);
|
||||||
|
int ll_front_merge_fn(struct request *req, struct bio *bio,
|
||||||
|
unsigned int nr_segs);
|
||||||
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
|
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
|
||||||
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
|
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
|
||||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||||
|
|
|
@ -562,7 +562,8 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
struct kyber_hctx_data *khd = hctx->sched_data;
|
struct kyber_hctx_data *khd = hctx->sched_data;
|
||||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
|
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
|
||||||
|
@ -572,7 +573,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
||||||
bool merged;
|
bool merged;
|
||||||
|
|
||||||
spin_lock(&kcq->lock);
|
spin_lock(&kcq->lock);
|
||||||
merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio);
|
merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
|
||||||
spin_unlock(&kcq->lock);
|
spin_unlock(&kcq->lock);
|
||||||
blk_mq_put_ctx(ctx);
|
blk_mq_put_ctx(ctx);
|
||||||
|
|
||||||
|
|
|
@ -469,7 +469,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
|
||||||
return ELEVATOR_NO_MERGE;
|
return ELEVATOR_NO_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
||||||
|
unsigned int nr_segs)
|
||||||
{
|
{
|
||||||
struct request_queue *q = hctx->queue;
|
struct request_queue *q = hctx->queue;
|
||||||
struct deadline_data *dd = q->elevator->elevator_data;
|
struct deadline_data *dd = q->elevator->elevator_data;
|
||||||
|
@ -477,7 +478,7 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
spin_lock(&dd->lock);
|
spin_lock(&dd->lock);
|
||||||
ret = blk_mq_sched_try_merge(q, bio, &free);
|
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
|
||||||
spin_unlock(&dd->lock);
|
spin_unlock(&dd->lock);
|
||||||
|
|
||||||
if (free)
|
if (free)
|
||||||
|
|
|
@ -5251,7 +5251,6 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
raid_bio->bi_next = (void*)rdev;
|
raid_bio->bi_next = (void*)rdev;
|
||||||
bio_set_dev(align_bi, rdev->bdev);
|
bio_set_dev(align_bi, rdev->bdev);
|
||||||
bio_clear_flag(align_bi, BIO_SEG_VALID);
|
|
||||||
|
|
||||||
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
|
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
|
||||||
bio_sectors(align_bi),
|
bio_sectors(align_bi),
|
||||||
|
|
|
@ -408,7 +408,6 @@ static inline void bio_wouldblock_error(struct bio *bio)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct request_queue;
|
struct request_queue;
|
||||||
extern int bio_phys_segments(struct request_queue *, struct bio *);
|
|
||||||
|
|
||||||
extern int submit_bio_wait(struct bio *bio);
|
extern int submit_bio_wait(struct bio *bio);
|
||||||
extern void bio_advance(struct bio *, unsigned);
|
extern void bio_advance(struct bio *, unsigned);
|
||||||
|
|
|
@ -306,7 +306,7 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs
|
||||||
bool blk_mq_complete_request(struct request *rq);
|
bool blk_mq_complete_request(struct request *rq);
|
||||||
void blk_mq_complete_request_sync(struct request *rq);
|
void blk_mq_complete_request_sync(struct request *rq);
|
||||||
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
|
||||||
struct bio *bio);
|
struct bio *bio, unsigned int nr_segs);
|
||||||
bool blk_mq_queue_stopped(struct request_queue *q);
|
bool blk_mq_queue_stopped(struct request_queue *q);
|
||||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||||
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||||
|
|
|
@ -154,11 +154,6 @@ struct bio {
|
||||||
blk_status_t bi_status;
|
blk_status_t bi_status;
|
||||||
u8 bi_partno;
|
u8 bi_partno;
|
||||||
|
|
||||||
/* Number of segments in this BIO after
|
|
||||||
* physical address coalescing is performed.
|
|
||||||
*/
|
|
||||||
unsigned int bi_phys_segments;
|
|
||||||
|
|
||||||
struct bvec_iter bi_iter;
|
struct bvec_iter bi_iter;
|
||||||
|
|
||||||
atomic_t __bi_remaining;
|
atomic_t __bi_remaining;
|
||||||
|
@ -210,7 +205,6 @@ struct bio {
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
BIO_NO_PAGE_REF, /* don't put release vec pages */
|
BIO_NO_PAGE_REF, /* don't put release vec pages */
|
||||||
BIO_SEG_VALID, /* bi_phys_segments valid */
|
|
||||||
BIO_CLONED, /* doesn't own data */
|
BIO_CLONED, /* doesn't own data */
|
||||||
BIO_BOUNCED, /* bio is a bounce bio */
|
BIO_BOUNCED, /* bio is a bounce bio */
|
||||||
BIO_USER_MAPPED, /* contains user pages */
|
BIO_USER_MAPPED, /* contains user pages */
|
||||||
|
|
|
@ -841,7 +841,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||||
struct request *rq);
|
struct request *rq);
|
||||||
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
|
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
|
||||||
extern void blk_queue_split(struct request_queue *, struct bio **);
|
extern void blk_queue_split(struct request_queue *, struct bio **);
|
||||||
extern void blk_recount_segments(struct request_queue *, struct bio *);
|
|
||||||
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
||||||
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
|
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
|
||||||
unsigned int, void __user *);
|
unsigned int, void __user *);
|
||||||
|
|
|
@ -34,7 +34,7 @@ struct elevator_mq_ops {
|
||||||
void (*depth_updated)(struct blk_mq_hw_ctx *);
|
void (*depth_updated)(struct blk_mq_hw_ctx *);
|
||||||
|
|
||||||
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
|
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
|
||||||
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
|
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
|
||||||
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
|
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
|
||||||
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
|
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
|
||||||
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
|
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
|
||||||
|
|
Loading…
Reference in New Issue