nvmet: use xarray for ctrl ns storing
This patch replaces the ctrl->namespaces tracking from linked list to xarray and improves the performance when accessing one namespce :- XArray vs Default:- IOPS and BW (more the better) increase BW (~1.8%):- --------------------------------------------------- XArray :- read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec) read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec) read: IOPS=162k, BW=631MiB/s (662MB/s)(18.5GiB/30001msec) Default:- read: IOPS=156k, BW=609MiB/s (639MB/s)(17.8GiB/30001msec) read: IOPS=157k, BW=613MiB/s (643MB/s)(17.0GiB/30001msec) read: IOPS=160k, BW=626MiB/s (656MB/s)(18.3GiB/30001msec) Submission latency (less the better) decrease (~8.3%):- ------------------------------------------------------- XArray:- slat (usec): min=7, max=8386, avg=11.19, stdev=5.96 slat (usec): min=7, max=441, avg=11.09, stdev=4.48 slat (usec): min=7, max=1088, avg=11.21, stdev=4.54 Default :- slat (usec): min=8, max=2826.5k, avg=23.96, stdev=3911.50 slat (usec): min=8, max=503, avg=12.52, stdev=5.07 slat (usec): min=8, max=2384, avg=12.50, stdev=5.28 CPU Usage (less the better) decrease (~5.2%):- ---------------------------------------------- XArray:- cpu : usr=1.84%, sys=18.61%, ctx=949471, majf=0, minf=250 cpu : usr=1.83%, sys=18.41%, ctx=950262, majf=0, minf=237 cpu : usr=1.82%, sys=18.82%, ctx=957224, majf=0, minf=234 Default:- cpu : usr=1.70%, sys=19.21%, ctx=858196, majf=0, minf=251 cpu : usr=1.82%, sys=19.98%, ctx=929720, majf=0, minf=227 cpu : usr=1.83%, sys=20.33%, ctx=947208, majf=0, minf=235. Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
ca0f1a8055
commit
7774e77ebe
|
@ -113,11 +113,10 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
|||
u64 data_units_read = 0, data_units_written = 0;
|
||||
struct nvmet_ns *ns;
|
||||
struct nvmet_ctrl *ctrl;
|
||||
unsigned long idx;
|
||||
|
||||
ctrl = req->sq->ctrl;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
/* we don't have the right data for file backed ns */
|
||||
if (!ns->bdev)
|
||||
continue;
|
||||
|
@ -127,9 +126,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
|||
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
|
||||
data_units_written += DIV_ROUND_UP(
|
||||
part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
|
||||
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
||||
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
||||
|
@ -230,14 +227,13 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
|
|||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvmet_ns *ns;
|
||||
unsigned long idx;
|
||||
u32 count = 0;
|
||||
|
||||
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
|
||||
if (ns->anagrpid == grpid)
|
||||
desc->nsids[count++] = cpu_to_le32(ns->nsid);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
desc->grpid = cpu_to_le32(grpid);
|
||||
|
@ -556,6 +552,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
|
|||
static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvmet_ns *ns;
|
||||
unsigned long idx;
|
||||
u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
|
||||
__le32 *list;
|
||||
u16 status = 0;
|
||||
|
@ -567,15 +564,13 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req)
|
|||
goto out;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
|
||||
if (ns->nsid <= min_nsid)
|
||||
continue;
|
||||
list[i++] = cpu_to_le32(ns->nsid);
|
||||
if (i == buf_size / sizeof(__le32))
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
|
||||
|
||||
|
|
|
@ -115,13 +115,14 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
|
|||
|
||||
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
unsigned long nsid = 0;
|
||||
struct nvmet_ns *cur;
|
||||
unsigned long idx;
|
||||
|
||||
if (list_empty(&subsys->namespaces))
|
||||
return 0;
|
||||
xa_for_each(&subsys->namespaces, idx, cur)
|
||||
nsid = cur->nsid;
|
||||
|
||||
ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
|
||||
return ns->nsid;
|
||||
return nsid;
|
||||
}
|
||||
|
||||
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
|
||||
|
@ -410,28 +411,13 @@ static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
|
|||
cancel_delayed_work_sync(&ctrl->ka_work);
|
||||
}
|
||||
|
||||
static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
|
||||
__le32 nsid)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
|
||||
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
|
||||
if (ns->nsid == le32_to_cpu(nsid))
|
||||
return ns;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
|
||||
rcu_read_lock();
|
||||
ns = __nvmet_find_namespace(ctrl, nsid);
|
||||
ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
|
||||
if (ns)
|
||||
percpu_ref_get(&ns->ref);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ns;
|
||||
}
|
||||
|
@ -586,24 +572,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
|||
if (ns->nsid > subsys->max_nsid)
|
||||
subsys->max_nsid = ns->nsid;
|
||||
|
||||
/*
|
||||
* The namespaces list needs to be sorted to simplify the implementation
|
||||
* of the Identify Namepace List subcommand.
|
||||
*/
|
||||
if (list_empty(&subsys->namespaces)) {
|
||||
list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
|
||||
} else {
|
||||
struct nvmet_ns *old;
|
||||
ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out_restore_subsys_maxnsid;
|
||||
|
||||
list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
|
||||
lockdep_is_held(&subsys->lock)) {
|
||||
BUG_ON(ns->nsid == old->nsid);
|
||||
if (ns->nsid < old->nsid)
|
||||
break;
|
||||
}
|
||||
|
||||
list_add_tail_rcu(&ns->dev_link, &old->dev_link);
|
||||
}
|
||||
subsys->nr_namespaces++;
|
||||
|
||||
nvmet_ns_changed(subsys, ns->nsid);
|
||||
|
@ -612,6 +584,10 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
|||
out_unlock:
|
||||
mutex_unlock(&subsys->lock);
|
||||
return ret;
|
||||
|
||||
out_restore_subsys_maxnsid:
|
||||
subsys->max_nsid = nvmet_max_nsid(subsys);
|
||||
percpu_ref_exit(&ns->ref);
|
||||
out_dev_put:
|
||||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
|
||||
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
|
||||
|
@ -630,7 +606,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
|
|||
goto out_unlock;
|
||||
|
||||
ns->enabled = false;
|
||||
list_del_rcu(&ns->dev_link);
|
||||
xa_erase(&ns->subsys->namespaces, ns->nsid);
|
||||
if (ns->nsid == subsys->max_nsid)
|
||||
subsys->max_nsid = nvmet_max_nsid(subsys);
|
||||
|
||||
|
@ -681,7 +657,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
|
|||
if (!ns)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&ns->dev_link);
|
||||
init_completion(&ns->disable_done);
|
||||
|
||||
ns->nsid = nsid;
|
||||
|
@ -1263,14 +1238,14 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
|
|||
struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ns *ns;
|
||||
unsigned long idx;
|
||||
|
||||
if (!req->p2p_client)
|
||||
return;
|
||||
|
||||
ctrl->p2p_client = get_device(req->p2p_client);
|
||||
|
||||
list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
|
||||
lockdep_is_held(&ctrl->subsys->lock))
|
||||
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
|
||||
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
|
||||
}
|
||||
|
||||
|
@ -1523,7 +1498,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
|||
kref_init(&subsys->ref);
|
||||
|
||||
mutex_init(&subsys->lock);
|
||||
INIT_LIST_HEAD(&subsys->namespaces);
|
||||
xa_init(&subsys->namespaces);
|
||||
INIT_LIST_HEAD(&subsys->ctrls);
|
||||
INIT_LIST_HEAD(&subsys->hosts);
|
||||
|
||||
|
@ -1535,8 +1510,9 @@ static void nvmet_subsys_free(struct kref *ref)
|
|||
struct nvmet_subsys *subsys =
|
||||
container_of(ref, struct nvmet_subsys, ref);
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&subsys->namespaces));
|
||||
WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
|
||||
|
||||
xa_destroy(&subsys->namespaces);
|
||||
kfree(subsys->subsysnqn);
|
||||
kfree_rcu(subsys->model, rcuhead);
|
||||
kfree(subsys);
|
||||
|
|
|
@ -52,7 +52,6 @@
|
|||
(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
|
||||
|
||||
struct nvmet_ns {
|
||||
struct list_head dev_link;
|
||||
struct percpu_ref ref;
|
||||
struct block_device *bdev;
|
||||
struct file *file;
|
||||
|
@ -219,7 +218,7 @@ struct nvmet_subsys {
|
|||
struct mutex lock;
|
||||
struct kref ref;
|
||||
|
||||
struct list_head namespaces;
|
||||
struct xarray namespaces;
|
||||
unsigned int nr_namespaces;
|
||||
unsigned int max_nsid;
|
||||
u16 cntlid_min;
|
||||
|
|
Loading…
Reference in New Issue