blk-mq: move blk_mq_get_ctx/blk_mq_put_ctx to mq private header
The blk-mq tag code need these helpers. Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
3de0ef8d0d
commit
1aecfe4887
|
@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list);
|
||||||
|
|
||||||
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
|
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||||
|
|
||||||
static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
|
||||||
unsigned int cpu)
|
|
||||||
{
|
|
||||||
return per_cpu_ptr(q->queue_ctx, cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This assumes per-cpu software queueing queues. They could be per-node
|
|
||||||
* as well, for instance. For now this is hardcoded as-is. Note that we don't
|
|
||||||
* care about preemption, since we know the ctx's are persistent. This does
|
|
||||||
* mean that we can't rely on ctx always matching the currently running CPU.
|
|
||||||
*/
|
|
||||||
static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
|
|
||||||
{
|
|
||||||
return __blk_mq_get_ctx(q, get_cpu());
|
|
||||||
}
|
|
||||||
|
|
||||||
static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
|
|
||||||
{
|
|
||||||
put_cpu();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if any of the ctx's have pending work in this hardware queue
|
* Check if any of the ctx's have pending work in this hardware queue
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -69,4 +69,26 @@ struct blk_align_bitmap {
|
||||||
unsigned long depth;
|
unsigned long depth;
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
|
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
|
||||||
|
unsigned int cpu)
|
||||||
|
{
|
||||||
|
return per_cpu_ptr(q->queue_ctx, cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This assumes per-cpu software queueing queues. They could be per-node
|
||||||
|
* as well, for instance. For now this is hardcoded as-is. Note that we don't
|
||||||
|
* care about preemption, since we know the ctx's are persistent. This does
|
||||||
|
* mean that we can't rely on ctx always matching the currently running CPU.
|
||||||
|
*/
|
||||||
|
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return __blk_mq_get_ctx(q, get_cpu());
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
|
||||||
|
{
|
||||||
|
put_cpu();
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue