cfq-iosched: add message logging through blktrace
Now that blktrace has the ability to carry arbitrary messages in its stream, use that for some CFQ logging. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
863fddcb4b
commit
7b679138b3
|
@ -11,6 +11,7 @@
|
|||
#include <linux/elevator.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/ioprio.h>
|
||||
#include <linux/blktrace_api.h>
|
||||
|
||||
/*
|
||||
* tunables
|
||||
|
@ -41,7 +42,7 @@ static int cfq_slice_idle = HZ / 125;
|
|||
|
||||
#define RQ_CIC(rq) \
|
||||
((struct cfq_io_context *) (rq)->elevator_private)
|
||||
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
|
||||
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
|
||||
|
||||
static struct kmem_cache *cfq_pool;
|
||||
static struct kmem_cache *cfq_ioc_pool;
|
||||
|
@ -156,6 +157,7 @@ struct cfq_queue {
|
|||
unsigned short ioprio, org_ioprio;
|
||||
unsigned short ioprio_class, org_ioprio_class;
|
||||
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
enum cfqq_state_flags {
|
||||
|
@ -199,6 +201,11 @@ CFQ_CFQQ_FNS(slice_new);
|
|||
CFQ_CFQQ_FNS(sync);
|
||||
#undef CFQ_CFQQ_FNS
|
||||
|
||||
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
|
||||
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
|
||||
#define cfq_log(cfqd, fmt, args...) \
|
||||
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
|
||||
|
||||
static void cfq_dispatch_insert(struct request_queue *, struct request *);
|
||||
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
|
||||
struct io_context *, gfp_t);
|
||||
|
@ -235,9 +242,11 @@ static inline int cfq_bio_sync(struct bio *bio)
|
|||
*/
|
||||
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
|
||||
{
|
||||
if (cfqd->busy_queues)
|
||||
if (cfqd->busy_queues) {
|
||||
cfq_log(cfqd, "schedule dispatch");
|
||||
kblockd_schedule_work(&cfqd->unplug_work);
|
||||
}
|
||||
}
|
||||
|
||||
static int cfq_queue_empty(struct request_queue *q)
|
||||
{
|
||||
|
@ -271,6 +280,7 @@ static inline void
|
|||
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -540,6 +550,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|||
*/
|
||||
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
|
||||
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
||||
cfq_mark_cfqq_on_rr(cfqq);
|
||||
cfqd->busy_queues++;
|
||||
|
@ -553,6 +564,7 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
|||
*/
|
||||
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
|
||||
BUG_ON(!cfq_cfqq_on_rr(cfqq));
|
||||
cfq_clear_cfqq_on_rr(cfqq);
|
||||
|
||||
|
@ -639,6 +651,8 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
|
|||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
|
||||
cfqd->rq_in_driver++;
|
||||
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
|
||||
cfqd->rq_in_driver);
|
||||
|
||||
/*
|
||||
* If the depth is larger 1, it really could be queueing. But lets
|
||||
|
@ -658,6 +672,8 @@ static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
|
|||
|
||||
WARN_ON(!cfqd->rq_in_driver);
|
||||
cfqd->rq_in_driver--;
|
||||
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
|
||||
cfqd->rq_in_driver);
|
||||
}
|
||||
|
||||
static void cfq_remove_request(struct request *rq)
|
||||
|
@ -747,6 +763,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
|||
struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfqq) {
|
||||
cfq_log_cfqq(cfqd, cfqq, "set_active");
|
||||
cfqq->slice_end = 0;
|
||||
cfq_clear_cfqq_must_alloc_slice(cfqq);
|
||||
cfq_clear_cfqq_fifo_expire(cfqq);
|
||||
|
@ -764,6 +781,8 @@ static void
|
|||
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
int timed_out)
|
||||
{
|
||||
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
|
||||
|
||||
if (cfq_cfqq_wait_request(cfqq))
|
||||
del_timer(&cfqd->idle_slice_timer);
|
||||
|
||||
|
@ -773,8 +792,10 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
/*
|
||||
* store what was left of this slice, if the queue idled/timed out
|
||||
*/
|
||||
if (timed_out && !cfq_cfqq_slice_new(cfqq))
|
||||
if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
|
||||
cfqq->slice_resid = cfqq->slice_end - jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
|
||||
}
|
||||
|
||||
cfq_resort_rr_list(cfqd, cfqq);
|
||||
|
||||
|
@ -866,6 +887,12 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|||
if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
|
||||
return;
|
||||
|
||||
/*
|
||||
* still requests with the driver, don't idle
|
||||
*/
|
||||
if (cfqd->rq_in_driver)
|
||||
return;
|
||||
|
||||
/*
|
||||
* task has exited, don't wait
|
||||
*/
|
||||
|
@ -893,6 +920,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
|||
sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
|
||||
|
||||
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
|
||||
cfq_log(cfqd, "arm_idle: %lu", sl);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -903,6 +931,8 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
|
|||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
|
||||
|
||||
cfq_remove_request(rq);
|
||||
cfqq->dispatched++;
|
||||
elv_dispatch_sort(q, rq);
|
||||
|
@ -932,8 +962,9 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
|||
rq = rq_entry_fifo(cfqq->fifo.next);
|
||||
|
||||
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
|
||||
return NULL;
|
||||
rq = NULL;
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
|
||||
return rq;
|
||||
}
|
||||
|
||||
|
@ -1073,6 +1104,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
|
|||
|
||||
BUG_ON(cfqd->busy_queues);
|
||||
|
||||
cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
|
||||
return dispatched;
|
||||
}
|
||||
|
||||
|
@ -1113,6 +1145,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
|||
dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
|
||||
}
|
||||
|
||||
cfq_log(cfqd, "dispatched=%d", dispatched);
|
||||
return dispatched;
|
||||
}
|
||||
|
||||
|
@ -1131,6 +1164,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
|||
if (!atomic_dec_and_test(&cfqq->ref))
|
||||
return;
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "put_queue");
|
||||
BUG_ON(rb_first(&cfqq->sort_list));
|
||||
BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
|
||||
BUG_ON(cfq_cfqq_on_rr(cfqq));
|
||||
|
@ -1439,6 +1473,8 @@ retry:
|
|||
cfq_mark_cfqq_idle_window(cfqq);
|
||||
cfq_mark_cfqq_sync(cfqq);
|
||||
}
|
||||
cfqq->pid = current->pid;
|
||||
cfq_log_cfqq(cfqd, cfqq, "alloced");
|
||||
}
|
||||
|
||||
if (new_cfqq)
|
||||
|
@ -1687,7 +1723,7 @@ static void
|
|||
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
struct cfq_io_context *cic)
|
||||
{
|
||||
int enable_idle;
|
||||
int old_idle, enable_idle;
|
||||
|
||||
/*
|
||||
* Don't idle for async or idle io prio class
|
||||
|
@ -1695,7 +1731,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
|
||||
return;
|
||||
|
||||
enable_idle = cfq_cfqq_idle_window(cfqq);
|
||||
old_idle = cfq_cfqq_idle_window(cfqq);
|
||||
|
||||
if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
|
||||
(cfqd->hw_tag && CIC_SEEKY(cic)))
|
||||
|
@ -1707,11 +1743,14 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
enable_idle = 1;
|
||||
}
|
||||
|
||||
if (old_idle != enable_idle) {
|
||||
cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
|
||||
if (enable_idle)
|
||||
cfq_mark_cfqq_idle_window(cfqq);
|
||||
else
|
||||
cfq_clear_cfqq_idle_window(cfqq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if new_cfqq should preempt the currently active queue. Return 0 for
|
||||
|
@ -1769,6 +1808,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|||
*/
|
||||
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
cfq_log_cfqq(cfqd, cfqq, "preempt");
|
||||
cfq_slice_expired(cfqd, 1);
|
||||
|
||||
/*
|
||||
|
@ -1830,6 +1870,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
|
|||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
struct cfq_queue *cfqq = RQ_CFQQ(rq);
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "insert_request");
|
||||
cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
|
||||
|
||||
cfq_add_rq_rb(rq);
|
||||
|
@ -1847,6 +1888,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
unsigned long now;
|
||||
|
||||
now = jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "complete");
|
||||
|
||||
WARN_ON(!cfqd->rq_in_driver);
|
||||
WARN_ON(!cfqq->dispatched);
|
||||
|
@ -2016,6 +2058,7 @@ queue_fail:
|
|||
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
cfq_log(cfqd, "set_request fail");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -2041,6 +2084,8 @@ static void cfq_idle_slice_timer(unsigned long data)
|
|||
unsigned long flags;
|
||||
int timed_out = 1;
|
||||
|
||||
cfq_log(cfqd, "idle timer fired");
|
||||
|
||||
spin_lock_irqsave(cfqd->queue->queue_lock, flags);
|
||||
|
||||
cfqq = cfqd->active_queue;
|
||||
|
|
Loading…
Reference in New Issue