block: get rid of unnecessary forward declarations in blk-core.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
86db1e2977
commit
26b8256e2b
|
@ -33,9 +33,7 @@
|
||||||
|
|
||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
|
|
||||||
static void drive_stat_acct(struct request *rq, int new_io);
|
|
||||||
static int __make_request(struct request_queue *q, struct bio *bio);
|
static int __make_request(struct request_queue *q, struct bio *bio);
|
||||||
static void blk_recalc_rq_segments(struct request *rq);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For the allocated request tables
|
* For the allocated request tables
|
||||||
|
@ -54,6 +52,21 @@ static struct workqueue_struct *kblockd_workqueue;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
||||||
|
|
||||||
|
static void drive_stat_acct(struct request *rq, int new_io)
|
||||||
|
{
|
||||||
|
int rw = rq_data_dir(rq);
|
||||||
|
|
||||||
|
if (!blk_fs_request(rq) || !rq->rq_disk)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!new_io) {
|
||||||
|
__disk_stat_inc(rq->rq_disk, merges[rw]);
|
||||||
|
} else {
|
||||||
|
disk_round_stats(rq->rq_disk);
|
||||||
|
rq->rq_disk->in_flight++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void blk_queue_congestion_threshold(struct request_queue *q)
|
void blk_queue_congestion_threshold(struct request_queue *q)
|
||||||
{
|
{
|
||||||
int nr;
|
int nr;
|
||||||
|
@ -168,21 +181,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
|
||||||
|
|
||||||
EXPORT_SYMBOL(blk_dump_rq_flags);
|
EXPORT_SYMBOL(blk_dump_rq_flags);
|
||||||
|
|
||||||
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
|
||||||
{
|
|
||||||
struct request rq;
|
|
||||||
struct bio *nxt = bio->bi_next;
|
|
||||||
rq.q = q;
|
|
||||||
rq.bio = rq.biotail = bio;
|
|
||||||
bio->bi_next = NULL;
|
|
||||||
blk_recalc_rq_segments(&rq);
|
|
||||||
bio->bi_next = nxt;
|
|
||||||
bio->bi_phys_segments = rq.nr_phys_segments;
|
|
||||||
bio->bi_hw_segments = rq.nr_hw_segments;
|
|
||||||
bio->bi_flags |= (1 << BIO_SEG_VALID);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_recount_segments);
|
|
||||||
|
|
||||||
static void blk_recalc_rq_segments(struct request *rq)
|
static void blk_recalc_rq_segments(struct request *rq)
|
||||||
{
|
{
|
||||||
int nr_phys_segs;
|
int nr_phys_segs;
|
||||||
|
@ -255,6 +253,21 @@ new_hw_segment:
|
||||||
rq->nr_hw_segments = nr_hw_segs;
|
rq->nr_hw_segments = nr_hw_segs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_recount_segments(struct request_queue *q, struct bio *bio)
|
||||||
|
{
|
||||||
|
struct request rq;
|
||||||
|
struct bio *nxt = bio->bi_next;
|
||||||
|
rq.q = q;
|
||||||
|
rq.bio = rq.biotail = bio;
|
||||||
|
bio->bi_next = NULL;
|
||||||
|
blk_recalc_rq_segments(&rq);
|
||||||
|
bio->bi_next = nxt;
|
||||||
|
bio->bi_phys_segments = rq.nr_phys_segments;
|
||||||
|
bio->bi_hw_segments = rq.nr_hw_segments;
|
||||||
|
bio->bi_flags |= (1 << BIO_SEG_VALID);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_recount_segments);
|
||||||
|
|
||||||
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||||
struct bio *nxt)
|
struct bio *nxt)
|
||||||
{
|
{
|
||||||
|
@ -1305,21 +1318,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
|
||||||
|
|
||||||
EXPORT_SYMBOL(blk_insert_request);
|
EXPORT_SYMBOL(blk_insert_request);
|
||||||
|
|
||||||
static void drive_stat_acct(struct request *rq, int new_io)
|
|
||||||
{
|
|
||||||
int rw = rq_data_dir(rq);
|
|
||||||
|
|
||||||
if (!blk_fs_request(rq) || !rq->rq_disk)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!new_io) {
|
|
||||||
__disk_stat_inc(rq->rq_disk, merges[rw]);
|
|
||||||
} else {
|
|
||||||
disk_round_stats(rq->rq_disk);
|
|
||||||
rq->rq_disk->in_flight++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* add-request adds a request to the linked list.
|
* add-request adds a request to the linked list.
|
||||||
* queue lock is held and interrupts disabled, as we muck with the
|
* queue lock is held and interrupts disabled, as we muck with the
|
||||||
|
|
Loading…
Reference in New Issue