2017-01-26 00:06:40 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 Facebook
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
|
|
|
|
#include <linux/blk-mq.h>
|
2017-02-01 06:53:20 +08:00
|
|
|
#include "blk.h"
|
2017-01-26 00:06:40 +08:00
|
|
|
#include "blk-mq.h"
|
2017-01-26 00:06:46 +08:00
|
|
|
#include "blk-mq-tag.h"
|
2017-01-26 00:06:40 +08:00
|
|
|
|
|
|
|
struct blk_mq_debugfs_attr {
|
|
|
|
const char *name;
|
|
|
|
umode_t mode;
|
|
|
|
const struct file_operations *fops;
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:42 +08:00
|
|
|
static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
|
|
|
|
const struct seq_operations *ops)
|
|
|
|
{
|
|
|
|
struct seq_file *m;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = seq_open(file, ops);
|
|
|
|
if (!ret) {
|
|
|
|
m = file->private_data;
|
|
|
|
m->private = inode->i_private;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-04-11 06:13:15 +08:00
|
|
|
static int blk_flags_show(struct seq_file *m, const unsigned long flags,
|
|
|
|
const char *const *flag_name, int flag_name_count)
|
|
|
|
{
|
|
|
|
bool sep = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
|
|
|
|
if (!(flags & BIT(i)))
|
|
|
|
continue;
|
|
|
|
if (sep)
|
2017-05-04 15:31:23 +08:00
|
|
|
seq_puts(m, "|");
|
2017-04-11 06:13:15 +08:00
|
|
|
sep = true;
|
|
|
|
if (i < flag_name_count && flag_name[i])
|
|
|
|
seq_puts(m, flag_name[i]);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%d", i);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
|
2017-04-11 06:13:15 +08:00
|
|
|
static const char *const blk_queue_flag_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
QUEUE_FLAG_NAME(QUEUED),
|
|
|
|
QUEUE_FLAG_NAME(STOPPED),
|
|
|
|
QUEUE_FLAG_NAME(SYNCFULL),
|
|
|
|
QUEUE_FLAG_NAME(ASYNCFULL),
|
|
|
|
QUEUE_FLAG_NAME(DYING),
|
|
|
|
QUEUE_FLAG_NAME(BYPASS),
|
|
|
|
QUEUE_FLAG_NAME(BIDI),
|
|
|
|
QUEUE_FLAG_NAME(NOMERGES),
|
|
|
|
QUEUE_FLAG_NAME(SAME_COMP),
|
|
|
|
QUEUE_FLAG_NAME(FAIL_IO),
|
|
|
|
QUEUE_FLAG_NAME(STACKABLE),
|
|
|
|
QUEUE_FLAG_NAME(NONROT),
|
|
|
|
QUEUE_FLAG_NAME(IO_STAT),
|
|
|
|
QUEUE_FLAG_NAME(DISCARD),
|
|
|
|
QUEUE_FLAG_NAME(NOXMERGES),
|
|
|
|
QUEUE_FLAG_NAME(ADD_RANDOM),
|
|
|
|
QUEUE_FLAG_NAME(SECERASE),
|
|
|
|
QUEUE_FLAG_NAME(SAME_FORCE),
|
|
|
|
QUEUE_FLAG_NAME(DEAD),
|
|
|
|
QUEUE_FLAG_NAME(INIT_DONE),
|
|
|
|
QUEUE_FLAG_NAME(NO_SG_MERGE),
|
|
|
|
QUEUE_FLAG_NAME(POLL),
|
|
|
|
QUEUE_FLAG_NAME(WC),
|
|
|
|
QUEUE_FLAG_NAME(FUA),
|
|
|
|
QUEUE_FLAG_NAME(FLUSH_NQ),
|
|
|
|
QUEUE_FLAG_NAME(DAX),
|
|
|
|
QUEUE_FLAG_NAME(STATS),
|
|
|
|
QUEUE_FLAG_NAME(POLL_STATS),
|
|
|
|
QUEUE_FLAG_NAME(REGISTERED),
|
2017-04-11 06:13:15 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef QUEUE_FLAG_NAME
|
2017-04-11 06:13:15 +08:00
|
|
|
|
|
|
|
static int blk_queue_flags_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct request_queue *q = m->private;
|
|
|
|
|
|
|
|
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
|
|
|
|
ARRAY_SIZE(blk_queue_flag_name));
|
2017-04-27 04:47:54 +08:00
|
|
|
seq_puts(m, "\n");
|
2017-04-11 06:13:15 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-04 15:31:25 +08:00
|
|
|
static ssize_t blk_queue_flags_store(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
2017-04-11 06:13:15 +08:00
|
|
|
{
|
|
|
|
struct request_queue *q = file_inode(file)->i_private;
|
|
|
|
char op[16] = { }, *s;
|
|
|
|
|
2017-05-04 15:31:25 +08:00
|
|
|
if (count >= sizeof(op)) {
|
|
|
|
pr_err("%s: operation too long\n", __func__);
|
|
|
|
goto inval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copy_from_user(op, buf, count))
|
2017-04-11 06:13:15 +08:00
|
|
|
return -EFAULT;
|
|
|
|
s = op;
|
|
|
|
strsep(&s, " \t\n"); /* strip trailing whitespace */
|
|
|
|
if (strcmp(op, "run") == 0) {
|
|
|
|
blk_mq_run_hw_queues(q, true);
|
|
|
|
} else if (strcmp(op, "start") == 0) {
|
|
|
|
blk_mq_start_stopped_hw_queues(q, true);
|
|
|
|
} else {
|
2017-05-04 15:31:25 +08:00
|
|
|
pr_err("%s: unsupported operation '%s'\n", __func__, op);
|
|
|
|
inval:
|
|
|
|
pr_err("%s: use either 'run' or 'start'\n", __func__);
|
2017-04-11 06:13:15 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-05-04 15:31:25 +08:00
|
|
|
return count;
|
2017-04-11 06:13:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int blk_queue_flags_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, blk_queue_flags_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations blk_queue_flags_fops = {
|
|
|
|
.open = blk_queue_flags_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.write = blk_queue_flags_store,
|
|
|
|
};
|
|
|
|
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
|
|
|
|
{
|
|
|
|
if (stat->nr_samples) {
|
|
|
|
seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
|
|
|
|
stat->nr_samples, stat->mean, stat->min, stat->max);
|
|
|
|
} else {
|
|
|
|
seq_puts(m, "samples=0");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int queue_poll_stat_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct request_queue *q = m->private;
|
2017-04-21 06:59:11 +08:00
|
|
|
int bucket;
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
|
2017-04-21 06:59:11 +08:00
|
|
|
for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
|
|
|
|
seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
|
|
|
|
print_stat(m, &q->poll_stat[2*bucket]);
|
|
|
|
seq_puts(m, "\n");
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
|
2017-04-21 06:59:11 +08:00
|
|
|
seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
|
|
|
|
print_stat(m, &q->poll_stat[2*bucket+1]);
|
|
|
|
seq_puts(m, "\n");
|
|
|
|
}
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int queue_poll_stat_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, queue_poll_stat_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations queue_poll_stat_fops = {
|
|
|
|
.open = queue_poll_stat_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
|
2017-03-31 02:21:27 +08:00
|
|
|
static const char *const hctx_state_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
HCTX_STATE_NAME(STOPPED),
|
|
|
|
HCTX_STATE_NAME(TAG_ACTIVE),
|
|
|
|
HCTX_STATE_NAME(SCHED_RESTART),
|
|
|
|
HCTX_STATE_NAME(TAG_WAITING),
|
|
|
|
HCTX_STATE_NAME(START_ON_RUN),
|
2017-03-31 02:21:27 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef HCTX_STATE_NAME
|
|
|
|
|
2017-01-26 00:06:41 +08:00
|
|
|
static int hctx_state_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
2017-03-31 02:21:27 +08:00
|
|
|
blk_flags_show(m, hctx->state, hctx_state_name,
|
|
|
|
ARRAY_SIZE(hctx_state_name));
|
2017-04-27 04:47:54 +08:00
|
|
|
seq_puts(m, "\n");
|
2017-01-26 00:06:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_state_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_state_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_state_fops = {
|
|
|
|
.open = hctx_state_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
|
2017-03-31 02:21:27 +08:00
|
|
|
static const char *const alloc_policy_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
BLK_TAG_ALLOC_NAME(FIFO),
|
|
|
|
BLK_TAG_ALLOC_NAME(RR),
|
2017-03-31 02:21:27 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef BLK_TAG_ALLOC_NAME
|
2017-03-31 02:21:27 +08:00
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
|
2017-03-31 02:21:27 +08:00
|
|
|
static const char *const hctx_flag_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
HCTX_FLAG_NAME(SHOULD_MERGE),
|
|
|
|
HCTX_FLAG_NAME(TAG_SHARED),
|
|
|
|
HCTX_FLAG_NAME(SG_MERGE),
|
|
|
|
HCTX_FLAG_NAME(BLOCKING),
|
|
|
|
HCTX_FLAG_NAME(NO_SCHED),
|
2017-03-31 02:21:27 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef HCTX_FLAG_NAME
|
2017-03-31 02:21:27 +08:00
|
|
|
|
2017-01-26 00:06:41 +08:00
|
|
|
static int hctx_flags_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
2017-03-31 02:21:27 +08:00
|
|
|
const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
|
2017-01-26 00:06:41 +08:00
|
|
|
|
2017-03-31 02:21:27 +08:00
|
|
|
seq_puts(m, "alloc_policy=");
|
|
|
|
if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
|
|
|
|
alloc_policy_name[alloc_policy])
|
|
|
|
seq_puts(m, alloc_policy_name[alloc_policy]);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%d", alloc_policy);
|
|
|
|
seq_puts(m, " ");
|
|
|
|
blk_flags_show(m,
|
|
|
|
hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
|
|
|
|
hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
|
2017-04-27 04:47:54 +08:00
|
|
|
seq_puts(m, "\n");
|
2017-01-26 00:06:41 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_flags_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_flags_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_flags_fops = {
|
|
|
|
.open = hctx_flags_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
|
2017-04-27 04:47:55 +08:00
|
|
|
static const char *const op_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
REQ_OP_NAME(READ),
|
|
|
|
REQ_OP_NAME(WRITE),
|
|
|
|
REQ_OP_NAME(FLUSH),
|
|
|
|
REQ_OP_NAME(DISCARD),
|
|
|
|
REQ_OP_NAME(ZONE_REPORT),
|
|
|
|
REQ_OP_NAME(SECURE_ERASE),
|
|
|
|
REQ_OP_NAME(ZONE_RESET),
|
|
|
|
REQ_OP_NAME(WRITE_SAME),
|
|
|
|
REQ_OP_NAME(WRITE_ZEROES),
|
|
|
|
REQ_OP_NAME(SCSI_IN),
|
|
|
|
REQ_OP_NAME(SCSI_OUT),
|
|
|
|
REQ_OP_NAME(DRV_IN),
|
|
|
|
REQ_OP_NAME(DRV_OUT),
|
2017-04-27 04:47:55 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef REQ_OP_NAME
|
2017-04-27 04:47:55 +08:00
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
|
2017-04-27 04:47:55 +08:00
|
|
|
static const char *const cmd_flag_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
CMD_FLAG_NAME(FAILFAST_DEV),
|
|
|
|
CMD_FLAG_NAME(FAILFAST_TRANSPORT),
|
|
|
|
CMD_FLAG_NAME(FAILFAST_DRIVER),
|
|
|
|
CMD_FLAG_NAME(SYNC),
|
|
|
|
CMD_FLAG_NAME(META),
|
|
|
|
CMD_FLAG_NAME(PRIO),
|
|
|
|
CMD_FLAG_NAME(NOMERGE),
|
|
|
|
CMD_FLAG_NAME(IDLE),
|
|
|
|
CMD_FLAG_NAME(INTEGRITY),
|
|
|
|
CMD_FLAG_NAME(FUA),
|
|
|
|
CMD_FLAG_NAME(PREFLUSH),
|
|
|
|
CMD_FLAG_NAME(RAHEAD),
|
|
|
|
CMD_FLAG_NAME(BACKGROUND),
|
|
|
|
CMD_FLAG_NAME(NOUNMAP),
|
2017-04-27 04:47:55 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef CMD_FLAG_NAME
|
2017-04-27 04:47:55 +08:00
|
|
|
|
2017-05-04 15:31:24 +08:00
|
|
|
#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
|
2017-04-27 04:47:55 +08:00
|
|
|
static const char *const rqf_name[] = {
|
2017-05-04 15:31:24 +08:00
|
|
|
RQF_NAME(SORTED),
|
|
|
|
RQF_NAME(STARTED),
|
|
|
|
RQF_NAME(QUEUED),
|
|
|
|
RQF_NAME(SOFTBARRIER),
|
|
|
|
RQF_NAME(FLUSH_SEQ),
|
|
|
|
RQF_NAME(MIXED_MERGE),
|
|
|
|
RQF_NAME(MQ_INFLIGHT),
|
|
|
|
RQF_NAME(DONTPREP),
|
|
|
|
RQF_NAME(PREEMPT),
|
|
|
|
RQF_NAME(COPY_USER),
|
|
|
|
RQF_NAME(FAILED),
|
|
|
|
RQF_NAME(QUIET),
|
|
|
|
RQF_NAME(ELVPRIV),
|
|
|
|
RQF_NAME(IO_STAT),
|
|
|
|
RQF_NAME(ALLOCED),
|
|
|
|
RQF_NAME(PM),
|
|
|
|
RQF_NAME(HASHED),
|
|
|
|
RQF_NAME(STATS),
|
|
|
|
RQF_NAME(SPECIAL_PAYLOAD),
|
2017-04-27 04:47:55 +08:00
|
|
|
};
|
2017-05-04 15:31:24 +08:00
|
|
|
#undef RQF_NAME
|
2017-04-27 04:47:55 +08:00
|
|
|
|
2017-01-26 00:06:42 +08:00
|
|
|
static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct request *rq = list_entry_rq(v);
|
2017-04-27 04:47:56 +08:00
|
|
|
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
|
2017-04-27 04:47:55 +08:00
|
|
|
const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
|
2017-01-26 00:06:42 +08:00
|
|
|
|
2017-04-27 04:47:55 +08:00
|
|
|
seq_printf(m, "%p {.op=", rq);
|
|
|
|
if (op < ARRAY_SIZE(op_name) && op_name[op])
|
|
|
|
seq_printf(m, "%s", op_name[op]);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%d", op);
|
|
|
|
seq_puts(m, ", .cmd_flags=");
|
|
|
|
blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
|
|
|
|
ARRAY_SIZE(cmd_flag_name));
|
|
|
|
seq_puts(m, ", .rq_flags=");
|
|
|
|
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
|
|
|
|
ARRAY_SIZE(rqf_name));
|
2017-04-27 04:47:56 +08:00
|
|
|
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
|
2017-04-27 04:47:55 +08:00
|
|
|
rq->internal_tag);
|
2017-04-27 04:47:56 +08:00
|
|
|
if (mq_ops->show_rq)
|
|
|
|
mq_ops->show_rq(m, rq);
|
|
|
|
seq_puts(m, "}\n");
|
2017-01-26 00:06:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
|
2017-02-02 02:20:56 +08:00
|
|
|
__acquires(&hctx->lock)
|
2017-01-26 00:06:42 +08:00
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
spin_lock(&hctx->lock);
|
|
|
|
return seq_list_start(&hctx->dispatch, *pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
return seq_list_next(v, &hctx->dispatch, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hctx_dispatch_stop(struct seq_file *m, void *v)
|
2017-02-02 02:20:56 +08:00
|
|
|
__releases(&hctx->lock)
|
2017-01-26 00:06:42 +08:00
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
spin_unlock(&hctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations hctx_dispatch_seq_ops = {
|
|
|
|
.start = hctx_dispatch_start,
|
|
|
|
.next = hctx_dispatch_next,
|
|
|
|
.stop = hctx_dispatch_stop,
|
|
|
|
.show = blk_mq_debugfs_rq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hctx_dispatch_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return blk_mq_debugfs_seq_open(inode, file, &hctx_dispatch_seq_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_dispatch_fops = {
|
|
|
|
.open = hctx_dispatch_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:45 +08:00
|
|
|
static int hctx_ctx_map_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
sbitmap_bitmap_show(&hctx->ctx_map, m);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_ctx_map_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_ctx_map_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_ctx_map_fops = {
|
|
|
|
.open = hctx_ctx_map_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:46 +08:00
|
|
|
static void blk_mq_debugfs_tags_show(struct seq_file *m,
|
|
|
|
struct blk_mq_tags *tags)
|
|
|
|
{
|
|
|
|
seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
|
|
|
|
seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
|
|
|
|
seq_printf(m, "active_queues=%d\n",
|
|
|
|
atomic_read(&tags->active_queues));
|
|
|
|
|
|
|
|
seq_puts(m, "\nbitmap_tags:\n");
|
|
|
|
sbitmap_queue_show(&tags->bitmap_tags, m);
|
|
|
|
|
|
|
|
if (tags->nr_reserved_tags) {
|
|
|
|
seq_puts(m, "\nbreserved_tags:\n");
|
|
|
|
sbitmap_queue_show(&tags->breserved_tags, m);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_tags_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-02 02:20:58 +08:00
|
|
|
int res;
|
2017-01-26 00:06:46 +08:00
|
|
|
|
2017-02-02 02:20:58 +08:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-26 00:06:46 +08:00
|
|
|
if (hctx->tags)
|
|
|
|
blk_mq_debugfs_tags_show(m, hctx->tags);
|
|
|
|
mutex_unlock(&q->sysfs_lock);
|
|
|
|
|
2017-02-02 02:20:58 +08:00
|
|
|
out:
|
|
|
|
return res;
|
2017-01-26 00:06:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_tags_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_tags_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_tags_fops = {
|
|
|
|
.open = hctx_tags_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:47 +08:00
|
|
|
static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-02 02:20:58 +08:00
|
|
|
int res;
|
2017-01-26 00:06:47 +08:00
|
|
|
|
2017-02-02 02:20:58 +08:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-26 00:06:47 +08:00
|
|
|
if (hctx->tags)
|
|
|
|
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
|
|
|
|
mutex_unlock(&q->sysfs_lock);
|
2017-02-02 02:20:58 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return res;
|
2017-01-26 00:06:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_tags_bitmap_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_tags_bitmap_fops = {
|
|
|
|
.open = hctx_tags_bitmap_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:46 +08:00
|
|
|
static int hctx_sched_tags_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-02 02:20:58 +08:00
|
|
|
int res;
|
2017-01-26 00:06:46 +08:00
|
|
|
|
2017-02-02 02:20:58 +08:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-26 00:06:46 +08:00
|
|
|
if (hctx->sched_tags)
|
|
|
|
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
|
|
|
|
mutex_unlock(&q->sysfs_lock);
|
|
|
|
|
2017-02-02 02:20:58 +08:00
|
|
|
out:
|
|
|
|
return res;
|
2017-01-26 00:06:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_sched_tags_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_sched_tags_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_sched_tags_fops = {
|
|
|
|
.open = hctx_sched_tags_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:47 +08:00
|
|
|
static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
struct request_queue *q = hctx->queue;
|
2017-02-02 02:20:58 +08:00
|
|
|
int res;
|
2017-01-26 00:06:47 +08:00
|
|
|
|
2017-02-02 02:20:58 +08:00
|
|
|
res = mutex_lock_interruptible(&q->sysfs_lock);
|
|
|
|
if (res)
|
|
|
|
goto out;
|
2017-01-26 00:06:47 +08:00
|
|
|
if (hctx->sched_tags)
|
|
|
|
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
|
|
|
|
mutex_unlock(&q->sysfs_lock);
|
2017-02-02 02:20:58 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
return res;
|
2017-01-26 00:06:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_sched_tags_bitmap_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_sched_tags_bitmap_fops = {
|
|
|
|
.open = hctx_sched_tags_bitmap_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:48 +08:00
|
|
|
static int hctx_io_poll_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "considered=%lu\n", hctx->poll_considered);
|
|
|
|
seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
|
|
|
|
seq_printf(m, "success=%lu\n", hctx->poll_success);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_io_poll_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_io_poll_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hctx_io_poll_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_io_poll_fops = {
|
|
|
|
.open = hctx_io_poll_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hctx_io_poll_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hctx_dispatched_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
|
|
|
|
|
|
|
|
for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
|
|
|
|
unsigned int d = 1U << (i - 1);
|
|
|
|
|
|
|
|
seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_dispatched_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_dispatched_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hctx_dispatched_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
|
|
|
|
hctx->dispatched[i] = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_dispatched_fops = {
|
|
|
|
.open = hctx_dispatched_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hctx_dispatched_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:49 +08:00
|
|
|
static int hctx_queued_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "%lu\n", hctx->queued);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_queued_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_queued_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hctx_queued_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
hctx->queued = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_queued_fops = {
|
|
|
|
.open = hctx_queued_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hctx_queued_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hctx_run_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "%lu\n", hctx->run);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_run_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_run_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hctx_run_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
hctx->run = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_run_fops = {
|
|
|
|
.open = hctx_run_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hctx_run_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hctx_active_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hctx_active_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, hctx_active_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hctx_active_fops = {
|
|
|
|
.open = hctx_active_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:42 +08:00
|
|
|
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
|
2017-02-02 02:20:56 +08:00
|
|
|
__acquires(&ctx->lock)
|
2017-01-26 00:06:42 +08:00
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
spin_lock(&ctx->lock);
|
|
|
|
return seq_list_start(&ctx->rq_list, *pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
return seq_list_next(v, &ctx->rq_list, pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ctx_rq_list_stop(struct seq_file *m, void *v)
|
2017-02-02 02:20:56 +08:00
|
|
|
__releases(&ctx->lock)
|
2017-01-26 00:06:42 +08:00
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
spin_unlock(&ctx->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct seq_operations ctx_rq_list_seq_ops = {
|
|
|
|
.start = ctx_rq_list_start,
|
|
|
|
.next = ctx_rq_list_next,
|
|
|
|
.stop = ctx_rq_list_stop,
|
|
|
|
.show = blk_mq_debugfs_rq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ctx_rq_list_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return blk_mq_debugfs_seq_open(inode, file, &ctx_rq_list_seq_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations ctx_rq_list_fops = {
|
|
|
|
.open = ctx_rq_list_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = seq_release,
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:49 +08:00
|
|
|
static int ctx_dispatched_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ctx_dispatched_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, ctx_dispatched_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ctx_dispatched_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations ctx_dispatched_fops = {
|
|
|
|
.open = ctx_dispatched_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = ctx_dispatched_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ctx_merged_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "%lu\n", ctx->rq_merged);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ctx_merged_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, ctx_merged_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ctx_merged_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
ctx->rq_merged = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations ctx_merged_fops = {
|
|
|
|
.open = ctx_merged_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = ctx_merged_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ctx_completed_show(struct seq_file *m, void *v)
|
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ctx_completed_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, ctx_completed_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t ctx_completed_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = file->private_data;
|
|
|
|
struct blk_mq_ctx *ctx = m->private;
|
|
|
|
|
|
|
|
ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations ctx_completed_fops = {
|
|
|
|
.open = ctx_completed_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = ctx_completed_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
|
|
|
|
{"poll_stat", 0400, &queue_poll_stat_fops},
|
2017-04-27 04:47:53 +08:00
|
|
|
{"state", 0600, &blk_queue_flags_fops},
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2017-01-26 00:06:40 +08:00
|
|
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
|
2017-01-26 00:06:41 +08:00
|
|
|
{"state", 0400, &hctx_state_fops},
|
|
|
|
{"flags", 0400, &hctx_flags_fops},
|
2017-01-26 00:06:42 +08:00
|
|
|
{"dispatch", 0400, &hctx_dispatch_fops},
|
2017-01-26 00:06:45 +08:00
|
|
|
{"ctx_map", 0400, &hctx_ctx_map_fops},
|
2017-01-26 00:06:46 +08:00
|
|
|
{"tags", 0400, &hctx_tags_fops},
|
2017-01-26 00:06:47 +08:00
|
|
|
{"tags_bitmap", 0400, &hctx_tags_bitmap_fops},
|
2017-01-26 00:06:46 +08:00
|
|
|
{"sched_tags", 0400, &hctx_sched_tags_fops},
|
2017-01-26 00:06:47 +08:00
|
|
|
{"sched_tags_bitmap", 0400, &hctx_sched_tags_bitmap_fops},
|
2017-01-26 00:06:48 +08:00
|
|
|
{"io_poll", 0600, &hctx_io_poll_fops},
|
|
|
|
{"dispatched", 0600, &hctx_dispatched_fops},
|
2017-01-26 00:06:49 +08:00
|
|
|
{"queued", 0600, &hctx_queued_fops},
|
|
|
|
{"run", 0600, &hctx_run_fops},
|
|
|
|
{"active", 0400, &hctx_active_fops},
|
2017-02-02 02:20:59 +08:00
|
|
|
{},
|
2017-01-26 00:06:40 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
|
2017-01-26 00:06:42 +08:00
|
|
|
{"rq_list", 0400, &ctx_rq_list_fops},
|
2017-01-26 00:06:49 +08:00
|
|
|
{"dispatched", 0600, &ctx_dispatched_fops},
|
|
|
|
{"merged", 0600, &ctx_merged_fops},
|
|
|
|
{"completed", 0600, &ctx_completed_fops},
|
2017-02-02 02:20:59 +08:00
|
|
|
{},
|
2017-01-26 00:06:40 +08:00
|
|
|
};
|
|
|
|
|
2017-04-27 04:47:49 +08:00
|
|
|
int blk_mq_debugfs_register(struct request_queue *q)
|
2017-01-26 00:06:40 +08:00
|
|
|
{
|
2017-02-01 06:53:20 +08:00
|
|
|
if (!blk_debugfs_root)
|
2017-01-26 00:06:40 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2017-04-27 04:47:49 +08:00
|
|
|
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
|
|
|
|
blk_debugfs_root);
|
2017-01-26 00:06:40 +08:00
|
|
|
if (!q->debugfs_dir)
|
|
|
|
goto err;
|
|
|
|
|
2017-04-27 04:47:50 +08:00
|
|
|
if (blk_mq_debugfs_register_mq(q))
|
2017-01-26 00:06:40 +08:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
blk_mq_debugfs_unregister(q);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
void blk_mq_debugfs_unregister(struct request_queue *q)
|
|
|
|
{
|
|
|
|
debugfs_remove_recursive(q->debugfs_dir);
|
|
|
|
q->mq_debugfs_dir = NULL;
|
|
|
|
q->debugfs_dir = NULL;
|
|
|
|
}
|
|
|
|
|
2017-02-02 02:20:59 +08:00
|
|
|
static bool debugfs_create_files(struct dentry *parent, void *data,
|
|
|
|
const struct blk_mq_debugfs_attr *attr)
|
|
|
|
{
|
|
|
|
for (; attr->name; attr++) {
|
|
|
|
if (!debugfs_create_file(attr->name, attr->mode, parent,
|
|
|
|
data, attr->fops))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-26 00:06:40 +08:00
|
|
|
static int blk_mq_debugfs_register_ctx(struct request_queue *q,
|
|
|
|
struct blk_mq_ctx *ctx,
|
|
|
|
struct dentry *hctx_dir)
|
|
|
|
{
|
|
|
|
struct dentry *ctx_dir;
|
|
|
|
char name[20];
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
|
|
|
|
ctx_dir = debugfs_create_dir(name, hctx_dir);
|
|
|
|
if (!ctx_dir)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-02-02 02:20:59 +08:00
|
|
|
if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
|
|
|
|
return -ENOMEM;
|
2017-01-26 00:06:40 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
|
|
|
struct blk_mq_hw_ctx *hctx)
|
|
|
|
{
|
|
|
|
struct blk_mq_ctx *ctx;
|
|
|
|
struct dentry *hctx_dir;
|
|
|
|
char name[20];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "%u", hctx->queue_num);
|
|
|
|
hctx_dir = debugfs_create_dir(name, q->mq_debugfs_dir);
|
|
|
|
if (!hctx_dir)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-02-02 02:20:59 +08:00
|
|
|
if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
|
|
|
|
return -ENOMEM;
|
2017-01-26 00:06:40 +08:00
|
|
|
|
|
|
|
hctx_for_each_ctx(hctx, ctx, i) {
|
|
|
|
if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-04-27 04:47:50 +08:00
|
|
|
int blk_mq_debugfs_register_mq(struct request_queue *q)
|
2017-01-26 00:06:40 +08:00
|
|
|
{
|
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!q->debugfs_dir)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
q->mq_debugfs_dir = debugfs_create_dir("mq", q->debugfs_dir);
|
|
|
|
if (!q->mq_debugfs_dir)
|
|
|
|
goto err;
|
|
|
|
|
blk-stat: convert to callback-based statistics reporting
Currently, statistics are gathered in ~0.13s windows, and users grab the
statistics whenever they need them. This is not ideal for both in-tree
users:
1. Writeback throttling wants its own dynamically sized window of
statistics. Since the blk-stats statistics are reset after every
window and the wbt windows don't line up with the blk-stats windows,
wbt doesn't see every I/O.
2. Polling currently grabs the statistics on every I/O. Again, depending
on how the window lines up, we may miss some I/Os. It's also
unnecessary overhead to get the statistics on every I/O; the hybrid
polling heuristic would be just as happy with the statistics from the
previous full window.
This reworks the blk-stats infrastructure to be callback-based: users
register a callback that they want called at a given time with all of
the statistics from the window during which the callback was active.
Users can dynamically bucketize the statistics. wbt and polling both
currently use read vs. write, but polling can be extended to further
subdivide based on request size.
The callbacks are kept on an RCU list, and each callback has percpu
stats buffers. There will only be a few users, so the overhead on the
I/O completion side is low. The stats flushing is also simplified
considerably: since the timer function is responsible for clearing the
statistics, we don't have to worry about stale statistics.
wbt is a trivial conversion. After the conversion, the windowing problem
mentioned above is fixed.
For polling, we register an extra callback that caches the previous
window's statistics in the struct request_queue for the hybrid polling
heuristic to use.
Since we no longer have a single stats buffer for the request queue,
this also removes the sysfs and debugfs stats entries. To replace those,
we add a debugfs entry for the poll statistics.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-03-21 23:56:08 +08:00
|
|
|
if (!debugfs_create_files(q->mq_debugfs_dir, q, blk_mq_debugfs_queue_attrs))
|
|
|
|
goto err;
|
|
|
|
|
2017-01-26 00:06:40 +08:00
|
|
|
queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
|
if (blk_mq_debugfs_register_hctx(q, hctx))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2017-04-27 04:47:50 +08:00
|
|
|
blk_mq_debugfs_unregister_mq(q);
|
2017-01-26 00:06:40 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2017-04-27 04:47:50 +08:00
|
|
|
void blk_mq_debugfs_unregister_mq(struct request_queue *q)
|
2017-01-26 00:06:40 +08:00
|
|
|
{
|
|
|
|
debugfs_remove_recursive(q->mq_debugfs_dir);
|
|
|
|
q->mq_debugfs_dir = NULL;
|
|
|
|
}
|