2015-11-26 16:08:36 +08:00
|
|
|
/*
|
|
|
|
* NVM Express device driver
|
|
|
|
* Copyright (c) 2011-2014, Intel Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/blk-mq.h>
|
2015-11-28 22:03:49 +08:00
|
|
|
#include <linux/delay.h>
|
2015-11-26 16:08:36 +08:00
|
|
|
#include <linux/errno.h>
|
2015-11-26 17:54:19 +08:00
|
|
|
#include <linux/hdreg.h>
|
2015-11-26 16:08:36 +08:00
|
|
|
#include <linux/kernel.h>
|
2015-11-28 22:39:07 +08:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/list_sort.h>
|
2015-11-26 16:08:36 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/types.h>
|
2015-11-26 17:54:19 +08:00
|
|
|
#include <linux/pr.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/nvme_ioctl.h>
|
|
|
|
#include <linux/t10-pi.h>
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
#include <linux/pm_qos.h>
|
2015-11-26 17:54:19 +08:00
|
|
|
#include <scsi/sg.h>
|
|
|
|
#include <asm/unaligned.h>
|
2015-11-26 16:08:36 +08:00
|
|
|
|
|
|
|
#include "nvme.h"
|
2016-06-13 22:45:28 +08:00
|
|
|
#include "fabrics.h"
|
2015-11-26 16:08:36 +08:00
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
#define NVME_MINORS (1U << MINORBITS)
|
|
|
|
|
2016-02-11 02:03:30 +08:00
|
|
|
unsigned char admin_timeout = 60;
|
|
|
|
module_param(admin_timeout, byte, 0644);
|
|
|
|
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(admin_timeout);
|
2016-02-11 02:03:30 +08:00
|
|
|
|
|
|
|
unsigned char nvme_io_timeout = 30;
|
|
|
|
module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
|
|
|
|
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_io_timeout);
|
2016-02-11 02:03:30 +08:00
|
|
|
|
2017-06-13 00:30:51 +08:00
|
|
|
static unsigned char shutdown_timeout = 5;
|
2016-02-11 02:03:30 +08:00
|
|
|
module_param(shutdown_timeout, byte, 0644);
|
|
|
|
MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
|
|
|
|
|
2017-04-06 01:18:11 +08:00
|
|
|
static u8 nvme_max_retries = 5;
|
|
|
|
module_param_named(max_retries, nvme_max_retries, byte, 0644);
|
2016-07-13 07:20:31 +08:00
|
|
|
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static int nvme_char_major;
|
|
|
|
module_param(nvme_char_major, int, 0);
|
|
|
|
|
2017-06-07 15:25:43 +08:00
|
|
|
static unsigned long default_ps_max_latency_us = 100000;
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
module_param(default_ps_max_latency_us, ulong, 0644);
|
|
|
|
MODULE_PARM_DESC(default_ps_max_latency_us,
|
|
|
|
"max power saving latency for new devices; use PM QOS to change per device");
|
|
|
|
|
2017-04-22 07:19:24 +08:00
|
|
|
static bool force_apst;
|
|
|
|
module_param(force_apst, bool, 0644);
|
|
|
|
MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
|
|
|
|
|
2017-06-08 02:31:55 +08:00
|
|
|
struct workqueue_struct *nvme_wq;
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_wq);
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static LIST_HEAD(nvme_ctrl_list);
|
2016-02-11 02:03:31 +08:00
|
|
|
static DEFINE_SPINLOCK(dev_list_lock);
|
2015-11-26 17:54:19 +08:00
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static struct class *nvme_class;
|
|
|
|
|
2017-06-15 21:41:08 +08:00
|
|
|
int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
|
|
|
|
return -EBUSY;
|
|
|
|
if (!queue_work(nvme_wq, &ctrl->reset_work))
|
|
|
|
return -EBUSY;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
|
|
|
|
|
|
|
|
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nvme_reset_ctrl(ctrl);
|
|
|
|
if (!ret)
|
|
|
|
flush_work(&ctrl->reset_work);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
static blk_status_t nvme_error_status(struct request *req)
|
2017-04-20 22:02:57 +08:00
|
|
|
{
|
|
|
|
switch (nvme_req(req)->status & 0x7ff) {
|
|
|
|
case NVME_SC_SUCCESS:
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_OK;
|
2017-04-20 22:02:57 +08:00
|
|
|
case NVME_SC_CAP_EXCEEDED:
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_NOSPC;
|
2017-04-21 18:59:07 +08:00
|
|
|
case NVME_SC_ONCS_NOT_SUPPORTED:
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_NOTSUPP;
|
2017-04-21 18:59:07 +08:00
|
|
|
case NVME_SC_WRITE_FAULT:
|
|
|
|
case NVME_SC_READ_ERROR:
|
|
|
|
case NVME_SC_UNWRITTEN_BLOCK:
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_MEDIUM;
|
|
|
|
default:
|
|
|
|
return BLK_STS_IOERR;
|
2017-04-20 22:02:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 01:18:09 +08:00
|
|
|
static inline bool nvme_req_needs_retry(struct request *req)
|
2017-03-30 19:41:32 +08:00
|
|
|
{
|
2017-04-06 01:18:09 +08:00
|
|
|
if (blk_noretry_request(req))
|
|
|
|
return false;
|
2017-04-20 22:02:57 +08:00
|
|
|
if (nvme_req(req)->status & NVME_SC_DNR)
|
2017-04-06 01:18:09 +08:00
|
|
|
return false;
|
|
|
|
if (jiffies - req->start_time >= req->timeout)
|
|
|
|
return false;
|
2017-04-06 01:18:11 +08:00
|
|
|
if (nvme_req(req)->retries >= nvme_max_retries)
|
2017-04-06 01:18:09 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
2017-03-30 19:41:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void nvme_complete_rq(struct request *req)
|
|
|
|
{
|
2017-04-20 22:02:57 +08:00
|
|
|
if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
|
|
|
|
nvme_req(req)->retries++;
|
|
|
|
blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
|
|
|
|
return;
|
2017-03-30 19:41:32 +08:00
|
|
|
}
|
|
|
|
|
2017-04-20 22:02:57 +08:00
|
|
|
blk_mq_end_request(req, nvme_error_status(req));
|
2017-03-30 19:41:32 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_complete_rq);
|
|
|
|
|
2016-05-19 05:05:02 +08:00
|
|
|
void nvme_cancel_request(struct request *req, void *data, bool reserved)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (!blk_mq_request_started(req))
|
|
|
|
return;
|
|
|
|
|
|
|
|
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
|
|
|
|
"Cancelling I/O %d", req->tag);
|
|
|
|
|
|
|
|
status = NVME_SC_ABORT_REQ;
|
|
|
|
if (blk_queue_dying(req->q))
|
|
|
|
status |= NVME_SC_DNR;
|
2017-04-20 22:02:57 +08:00
|
|
|
nvme_req(req)->status = status;
|
2017-04-20 22:03:09 +08:00
|
|
|
blk_mq_complete_request(req);
|
2017-04-20 22:02:57 +08:00
|
|
|
|
2016-05-19 05:05:02 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_cancel_request);
|
|
|
|
|
2016-04-26 19:51:57 +08:00
|
|
|
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|
|
|
enum nvme_ctrl_state new_state)
|
|
|
|
{
|
2016-07-30 03:15:18 +08:00
|
|
|
enum nvme_ctrl_state old_state;
|
2016-04-26 19:51:57 +08:00
|
|
|
bool changed = false;
|
|
|
|
|
|
|
|
spin_lock_irq(&ctrl->lock);
|
2016-07-30 03:15:18 +08:00
|
|
|
|
|
|
|
old_state = ctrl->state;
|
2016-04-26 19:51:57 +08:00
|
|
|
switch (new_state) {
|
|
|
|
case NVME_CTRL_LIVE:
|
|
|
|
switch (old_state) {
|
2016-06-13 22:45:22 +08:00
|
|
|
case NVME_CTRL_NEW:
|
2016-04-26 19:51:57 +08:00
|
|
|
case NVME_CTRL_RESETTING:
|
2016-07-06 20:55:49 +08:00
|
|
|
case NVME_CTRL_RECONNECTING:
|
2016-04-26 19:51:57 +08:00
|
|
|
changed = true;
|
|
|
|
/* FALLTHRU */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NVME_CTRL_RESETTING:
|
|
|
|
switch (old_state) {
|
|
|
|
case NVME_CTRL_NEW:
|
2016-07-06 20:55:49 +08:00
|
|
|
case NVME_CTRL_LIVE:
|
|
|
|
changed = true;
|
|
|
|
/* FALLTHRU */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NVME_CTRL_RECONNECTING:
|
|
|
|
switch (old_state) {
|
2016-04-26 19:51:57 +08:00
|
|
|
case NVME_CTRL_LIVE:
|
|
|
|
changed = true;
|
|
|
|
/* FALLTHRU */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NVME_CTRL_DELETING:
|
|
|
|
switch (old_state) {
|
|
|
|
case NVME_CTRL_LIVE:
|
|
|
|
case NVME_CTRL_RESETTING:
|
2016-07-06 20:55:49 +08:00
|
|
|
case NVME_CTRL_RECONNECTING:
|
2016-04-26 19:51:57 +08:00
|
|
|
changed = true;
|
|
|
|
/* FALLTHRU */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2016-05-12 22:37:14 +08:00
|
|
|
case NVME_CTRL_DEAD:
|
|
|
|
switch (old_state) {
|
|
|
|
case NVME_CTRL_DELETING:
|
|
|
|
changed = true;
|
|
|
|
/* FALLTHRU */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
2016-04-26 19:51:57 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (changed)
|
|
|
|
ctrl->state = new_state;
|
|
|
|
|
2016-07-30 03:15:18 +08:00
|
|
|
spin_unlock_irq(&ctrl->lock);
|
|
|
|
|
2016-04-26 19:51:57 +08:00
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
|
|
|
|
|
2015-11-26 17:54:19 +08:00
|
|
|
static void nvme_free_ns(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
|
|
|
|
|
2016-09-16 20:25:07 +08:00
|
|
|
if (ns->ndev)
|
|
|
|
nvme_nvm_unregister(ns);
|
2015-11-26 17:54:19 +08:00
|
|
|
|
2016-09-16 20:25:07 +08:00
|
|
|
if (ns->disk) {
|
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
ns->disk->private_data = NULL;
|
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
}
|
2015-11-26 17:54:19 +08:00
|
|
|
|
|
|
|
put_disk(ns->disk);
|
2016-02-25 00:15:53 +08:00
|
|
|
ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
|
|
|
|
nvme_put_ctrl(ns->ctrl);
|
2015-11-26 17:54:19 +08:00
|
|
|
kfree(ns);
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
static void nvme_put_ns(struct nvme_ns *ns)
|
2015-11-26 17:54:19 +08:00
|
|
|
{
|
|
|
|
kref_put(&ns->kref, nvme_free_ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
ns = disk->private_data;
|
2016-02-11 02:03:29 +08:00
|
|
|
if (ns) {
|
|
|
|
if (!kref_get_unless_zero(&ns->kref))
|
|
|
|
goto fail;
|
|
|
|
if (!try_module_get(ns->ctrl->ops->module))
|
|
|
|
goto fail_put_ns;
|
|
|
|
}
|
2015-11-26 17:54:19 +08:00
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
|
|
|
|
return ns;
|
2016-02-11 02:03:29 +08:00
|
|
|
|
|
|
|
fail_put_ns:
|
|
|
|
kref_put(&ns->kref, nvme_free_ns);
|
|
|
|
fail:
|
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
return NULL;
|
2015-11-26 17:54:19 +08:00
|
|
|
}
|
|
|
|
|
2015-11-20 16:00:02 +08:00
|
|
|
struct request *nvme_alloc_request(struct request_queue *q,
|
2016-06-13 22:45:23 +08:00
|
|
|
struct nvme_command *cmd, unsigned int flags, int qid)
|
2015-11-26 16:08:36 +08:00
|
|
|
{
|
2017-01-31 23:57:31 +08:00
|
|
|
unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
|
2015-11-26 16:08:36 +08:00
|
|
|
struct request *req;
|
|
|
|
|
2016-06-13 22:45:23 +08:00
|
|
|
if (qid == NVME_QID_ANY) {
|
2017-01-31 23:57:31 +08:00
|
|
|
req = blk_mq_alloc_request(q, op, flags);
|
2016-06-13 22:45:23 +08:00
|
|
|
} else {
|
2017-01-31 23:57:31 +08:00
|
|
|
req = blk_mq_alloc_request_hctx(q, op, flags,
|
2016-06-13 22:45:23 +08:00
|
|
|
qid ? qid - 1 : 0);
|
|
|
|
}
|
2015-11-26 16:08:36 +08:00
|
|
|
if (IS_ERR(req))
|
2015-11-20 16:00:02 +08:00
|
|
|
return req;
|
2015-11-26 16:08:36 +08:00
|
|
|
|
|
|
|
req->cmd_flags |= REQ_FAILFAST_DRIVER;
|
2016-11-10 23:32:33 +08:00
|
|
|
nvme_req(req)->cmd = cmd;
|
2015-11-26 16:08:36 +08:00
|
|
|
|
2015-11-20 16:00:02 +08:00
|
|
|
return req;
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_alloc_request);
|
2015-11-20 16:00:02 +08:00
|
|
|
|
2016-04-13 03:10:14 +08:00
|
|
|
static inline void nvme_setup_flush(struct nvme_ns *ns,
|
|
|
|
struct nvme_command *cmnd)
|
|
|
|
{
|
|
|
|
memset(cmnd, 0, sizeof(*cmnd));
|
|
|
|
cmnd->common.opcode = nvme_cmd_flush;
|
|
|
|
cmnd->common.nsid = cpu_to_le32(ns->ns_id);
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:05 +08:00
|
|
|
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
2016-04-13 03:10:14 +08:00
|
|
|
struct nvme_command *cmnd)
|
|
|
|
{
|
2017-02-08 21:46:50 +08:00
|
|
|
unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
|
2016-04-13 03:10:14 +08:00
|
|
|
struct nvme_dsm_range *range;
|
2017-02-08 21:46:50 +08:00
|
|
|
struct bio *bio;
|
2016-04-13 03:10:14 +08:00
|
|
|
|
2017-02-08 21:46:50 +08:00
|
|
|
range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
|
2016-04-13 03:10:14 +08:00
|
|
|
if (!range)
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_RESOURCE;
|
2016-04-13 03:10:14 +08:00
|
|
|
|
2017-02-08 21:46:50 +08:00
|
|
|
__rq_for_each_bio(bio, req) {
|
|
|
|
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
|
|
|
|
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
|
|
|
|
|
|
|
|
range[n].cattr = cpu_to_le32(0);
|
|
|
|
range[n].nlb = cpu_to_le32(nlb);
|
|
|
|
range[n].slba = cpu_to_le64(slba);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (WARN_ON_ONCE(n != segments)) {
|
|
|
|
kfree(range);
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_IOERR;
|
2017-02-08 21:46:50 +08:00
|
|
|
}
|
2016-04-13 03:10:14 +08:00
|
|
|
|
|
|
|
memset(cmnd, 0, sizeof(*cmnd));
|
|
|
|
cmnd->dsm.opcode = nvme_cmd_dsm;
|
|
|
|
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
|
2017-03-31 23:00:05 +08:00
|
|
|
cmnd->dsm.nr = cpu_to_le32(segments - 1);
|
2016-04-13 03:10:14 +08:00
|
|
|
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
|
|
|
|
2016-12-09 06:20:32 +08:00
|
|
|
req->special_vec.bv_page = virt_to_page(range);
|
|
|
|
req->special_vec.bv_offset = offset_in_page(range);
|
2017-02-08 21:46:50 +08:00
|
|
|
req->special_vec.bv_len = sizeof(*range) * segments;
|
2016-12-09 06:20:32 +08:00
|
|
|
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
|
2016-04-13 03:10:14 +08:00
|
|
|
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_OK;
|
2016-04-13 03:10:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-13 00:36:32 +08:00
|
|
|
static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
|
|
|
|
struct request *req, struct nvme_command *cmnd)
|
2016-04-13 03:10:14 +08:00
|
|
|
{
|
|
|
|
u16 control = 0;
|
|
|
|
u32 dsmgmt = 0;
|
|
|
|
|
2017-06-13 00:36:32 +08:00
|
|
|
/*
|
|
|
|
* If formated with metadata, require the block layer provide a buffer
|
|
|
|
* unless this namespace is formated such that the metadata can be
|
|
|
|
* stripped/generated by the controller with PRACT=1.
|
|
|
|
*/
|
2017-06-15 21:31:29 +08:00
|
|
|
if (ns && ns->ms &&
|
|
|
|
(!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
|
2017-06-13 00:36:32 +08:00
|
|
|
!blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
|
|
|
|
return BLK_STS_NOTSUPP;
|
|
|
|
|
2016-04-13 03:10:14 +08:00
|
|
|
if (req->cmd_flags & REQ_FUA)
|
|
|
|
control |= NVME_RW_FUA;
|
|
|
|
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
|
|
|
|
control |= NVME_RW_LR;
|
|
|
|
|
|
|
|
if (req->cmd_flags & REQ_RAHEAD)
|
|
|
|
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
|
|
|
|
|
|
|
|
memset(cmnd, 0, sizeof(*cmnd));
|
|
|
|
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
|
|
|
|
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
|
|
|
|
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
|
|
|
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
|
|
|
|
|
|
|
|
if (ns->ms) {
|
|
|
|
switch (ns->pi_type) {
|
|
|
|
case NVME_NS_DPS_PI_TYPE3:
|
|
|
|
control |= NVME_RW_PRINFO_PRCHK_GUARD;
|
|
|
|
break;
|
|
|
|
case NVME_NS_DPS_PI_TYPE1:
|
|
|
|
case NVME_NS_DPS_PI_TYPE2:
|
|
|
|
control |= NVME_RW_PRINFO_PRCHK_GUARD |
|
|
|
|
NVME_RW_PRINFO_PRCHK_REF;
|
|
|
|
cmnd->rw.reftag = cpu_to_le32(
|
|
|
|
nvme_block_nr(ns, blk_rq_pos(req)));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!blk_integrity_rq(req))
|
|
|
|
control |= NVME_RW_PRINFO_PRACT;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmnd->rw.control = cpu_to_le16(control);
|
|
|
|
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
|
2017-06-13 00:36:32 +08:00
|
|
|
return 0;
|
2016-04-13 03:10:14 +08:00
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:05 +08:00
|
|
|
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
|
2016-04-13 03:10:14 +08:00
|
|
|
struct nvme_command *cmd)
|
|
|
|
{
|
2017-06-03 15:38:05 +08:00
|
|
|
blk_status_t ret = BLK_STS_OK;
|
2016-04-13 03:10:14 +08:00
|
|
|
|
2017-04-06 01:18:08 +08:00
|
|
|
if (!(req->rq_flags & RQF_DONTPREP)) {
|
2017-04-06 01:18:11 +08:00
|
|
|
nvme_req(req)->retries = 0;
|
2017-04-20 22:02:57 +08:00
|
|
|
nvme_req(req)->flags = 0;
|
2017-04-06 01:18:08 +08:00
|
|
|
req->rq_flags |= RQF_DONTPREP;
|
|
|
|
}
|
|
|
|
|
2017-01-31 23:57:31 +08:00
|
|
|
switch (req_op(req)) {
|
|
|
|
case REQ_OP_DRV_IN:
|
|
|
|
case REQ_OP_DRV_OUT:
|
2016-11-10 23:32:33 +08:00
|
|
|
memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
|
2017-01-31 23:57:31 +08:00
|
|
|
break;
|
|
|
|
case REQ_OP_FLUSH:
|
2016-04-13 03:10:14 +08:00
|
|
|
nvme_setup_flush(ns, cmd);
|
2017-01-31 23:57:31 +08:00
|
|
|
break;
|
2017-04-06 01:21:13 +08:00
|
|
|
case REQ_OP_WRITE_ZEROES:
|
|
|
|
/* currently only aliased to deallocate for a few ctrls: */
|
2017-01-31 23:57:31 +08:00
|
|
|
case REQ_OP_DISCARD:
|
2016-04-13 03:10:14 +08:00
|
|
|
ret = nvme_setup_discard(ns, req, cmd);
|
2017-01-31 23:57:31 +08:00
|
|
|
break;
|
|
|
|
case REQ_OP_READ:
|
|
|
|
case REQ_OP_WRITE:
|
2017-06-13 00:36:32 +08:00
|
|
|
ret = nvme_setup_rw(ns, req, cmd);
|
2017-01-31 23:57:31 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
2017-06-03 15:38:05 +08:00
|
|
|
return BLK_STS_IOERR;
|
2017-01-31 23:57:31 +08:00
|
|
|
}
|
2016-04-13 03:10:14 +08:00
|
|
|
|
2016-10-22 04:33:34 +08:00
|
|
|
cmd->common.command_id = req->tag;
|
2016-04-13 03:10:14 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
|
|
|
|
|
2015-11-20 16:00:02 +08:00
|
|
|
/*
|
|
|
|
* Returns 0 on success. If the result is negative, it's a Linux error code;
|
|
|
|
* if the result is positive, it's an NVM Express status code
|
|
|
|
*/
|
|
|
|
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
2016-11-10 23:32:33 +08:00
|
|
|
union nvme_result *result, void *buffer, unsigned bufflen,
|
2016-06-13 22:45:23 +08:00
|
|
|
unsigned timeout, int qid, int at_head, int flags)
|
2015-11-20 16:00:02 +08:00
|
|
|
{
|
|
|
|
struct request *req;
|
|
|
|
int ret;
|
|
|
|
|
2016-06-13 22:45:23 +08:00
|
|
|
req = nvme_alloc_request(q, cmd, flags, qid);
|
2015-11-20 16:00:02 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
|
|
|
|
|
|
|
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
|
|
|
|
2015-11-26 16:08:36 +08:00
|
|
|
if (buffer && bufflen) {
|
|
|
|
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2015-11-20 16:00:02 +08:00
|
|
|
}
|
|
|
|
|
2016-06-13 22:45:23 +08:00
|
|
|
blk_execute_rq(req->q, NULL, req, at_head);
|
2016-11-10 23:32:33 +08:00
|
|
|
if (result)
|
|
|
|
*result = nvme_req(req)->result;
|
2017-04-20 22:02:57 +08:00
|
|
|
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
|
|
|
|
ret = -EINTR;
|
|
|
|
else
|
|
|
|
ret = nvme_req(req)->status;
|
2015-11-20 16:00:02 +08:00
|
|
|
out:
|
|
|
|
blk_mq_free_request(req);
|
|
|
|
return ret;
|
|
|
|
}
|
2016-06-13 22:45:23 +08:00
|
|
|
EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
|
2015-11-20 16:00:02 +08:00
|
|
|
|
|
|
|
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
|
|
|
|
void *buffer, unsigned bufflen)
|
|
|
|
{
|
2016-06-13 22:45:23 +08:00
|
|
|
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
|
|
|
|
NVME_QID_ANY, 0, 0);
|
2015-11-20 16:00:02 +08:00
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
|
2015-11-20 16:00:02 +08:00
|
|
|
|
2015-10-23 23:47:28 +08:00
|
|
|
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
|
|
|
|
void __user *ubuffer, unsigned bufflen,
|
|
|
|
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
|
|
|
|
u32 *result, unsigned timeout)
|
2015-11-20 16:00:02 +08:00
|
|
|
{
|
2016-06-07 05:20:49 +08:00
|
|
|
bool write = nvme_is_write(cmd);
|
2015-10-23 23:47:28 +08:00
|
|
|
struct nvme_ns *ns = q->queuedata;
|
|
|
|
struct gendisk *disk = ns ? ns->disk : NULL;
|
2015-11-20 16:00:02 +08:00
|
|
|
struct request *req;
|
2015-10-23 23:47:28 +08:00
|
|
|
struct bio *bio = NULL;
|
|
|
|
void *meta = NULL;
|
2015-11-20 16:00:02 +08:00
|
|
|
int ret;
|
|
|
|
|
2016-06-13 22:45:23 +08:00
|
|
|
req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY);
|
2015-11-20 16:00:02 +08:00
|
|
|
if (IS_ERR(req))
|
|
|
|
return PTR_ERR(req);
|
|
|
|
|
|
|
|
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
|
|
|
|
|
|
|
|
if (ubuffer && bufflen) {
|
2015-11-26 16:08:36 +08:00
|
|
|
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
bio = req->bio;
|
|
|
|
|
2015-10-23 23:47:28 +08:00
|
|
|
if (!disk)
|
|
|
|
goto submit;
|
|
|
|
bio->bi_bdev = bdget_disk(disk, 0);
|
|
|
|
if (!bio->bi_bdev) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto out_unmap;
|
|
|
|
}
|
|
|
|
|
2016-02-25 00:15:58 +08:00
|
|
|
if (meta_buffer && meta_len) {
|
2015-10-23 23:47:28 +08:00
|
|
|
struct bio_integrity_payload *bip;
|
|
|
|
|
|
|
|
meta = kmalloc(meta_len, GFP_KERNEL);
|
|
|
|
if (!meta) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_unmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (write) {
|
|
|
|
if (copy_from_user(meta, meta_buffer,
|
|
|
|
meta_len)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out_free_meta;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
|
2015-12-04 00:32:21 +08:00
|
|
|
if (IS_ERR(bip)) {
|
|
|
|
ret = PTR_ERR(bip);
|
2015-10-23 23:47:28 +08:00
|
|
|
goto out_free_meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
bip->bip_iter.bi_size = meta_len;
|
|
|
|
bip->bip_iter.bi_sector = meta_seed;
|
|
|
|
|
|
|
|
ret = bio_integrity_add_page(bio, virt_to_page(meta),
|
|
|
|
meta_len, offset_in_page(meta));
|
|
|
|
if (ret != meta_len) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_free_meta;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
submit:
|
|
|
|
blk_execute_rq(req->q, disk, req, 0);
|
2017-04-20 22:02:57 +08:00
|
|
|
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
|
|
|
|
ret = -EINTR;
|
|
|
|
else
|
|
|
|
ret = nvme_req(req)->status;
|
2015-11-26 16:08:36 +08:00
|
|
|
if (result)
|
2016-11-10 23:32:33 +08:00
|
|
|
*result = le32_to_cpu(nvme_req(req)->result.u32);
|
2015-10-23 23:47:28 +08:00
|
|
|
if (meta && !ret && !write) {
|
|
|
|
if (copy_to_user(meta_buffer, meta, meta_len))
|
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
|
|
|
out_free_meta:
|
|
|
|
kfree(meta);
|
|
|
|
out_unmap:
|
|
|
|
if (bio) {
|
|
|
|
if (disk && bio->bi_bdev)
|
|
|
|
bdput(bio->bi_bdev);
|
|
|
|
blk_rq_unmap_user(bio);
|
|
|
|
}
|
2015-11-26 16:08:36 +08:00
|
|
|
out:
|
|
|
|
blk_mq_free_request(req);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-10-23 23:47:28 +08:00
|
|
|
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
|
|
|
|
void __user *ubuffer, unsigned bufflen, u32 *result,
|
|
|
|
unsigned timeout)
|
|
|
|
{
|
|
|
|
return __nvme_submit_user_cmd(q, cmd, ubuffer, bufflen, NULL, 0, 0,
|
|
|
|
result, timeout);
|
|
|
|
}
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
|
2016-06-13 22:45:28 +08:00
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = rq->end_io_data;
|
|
|
|
|
|
|
|
blk_mq_free_request(rq);
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
if (status) {
|
2016-06-13 22:45:28 +08:00
|
|
|
dev_err(ctrl->device,
|
2017-06-03 15:38:04 +08:00
|
|
|
"failed nvme_keep_alive_end_io error=%d\n",
|
|
|
|
status);
|
2016-06-13 22:45:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_command c;
|
|
|
|
struct request *rq;
|
|
|
|
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
c.common.opcode = nvme_admin_keep_alive;
|
|
|
|
|
|
|
|
rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
|
|
|
|
NVME_QID_ANY);
|
|
|
|
if (IS_ERR(rq))
|
|
|
|
return PTR_ERR(rq);
|
|
|
|
|
|
|
|
rq->timeout = ctrl->kato * HZ;
|
|
|
|
rq->end_io_data = ctrl;
|
|
|
|
|
|
|
|
blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_keep_alive_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
|
|
|
|
struct nvme_ctrl, ka_work);
|
|
|
|
|
|
|
|
if (nvme_keep_alive(ctrl)) {
|
|
|
|
/* allocation failure, reset the controller */
|
|
|
|
dev_err(ctrl->device, "keep-alive failed\n");
|
2017-06-13 00:21:19 +08:00
|
|
|
nvme_reset_ctrl(ctrl);
|
2016-06-13 22:45:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
if (unlikely(ctrl->kato == 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
|
|
|
|
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
|
|
|
|
|
|
|
|
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
if (unlikely(ctrl->kato == 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&ctrl->ka_work);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_stop_keep_alive);
|
|
|
|
|
2015-11-26 17:06:56 +08:00
|
|
|
int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
|
2015-11-26 16:08:36 +08:00
|
|
|
{
|
|
|
|
struct nvme_command c = { };
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
|
|
|
|
c.identify.opcode = nvme_admin_identify;
|
2017-01-26 23:17:28 +08:00
|
|
|
c.identify.cns = NVME_ID_CNS_CTRL;
|
2015-11-26 16:08:36 +08:00
|
|
|
|
|
|
|
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
|
|
|
|
if (!*id)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
|
|
|
|
sizeof(struct nvme_id_ctrl));
|
|
|
|
if (error)
|
|
|
|
kfree(*id);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2017-06-07 17:45:34 +08:00
|
|
|
static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid)
|
|
|
|
{
|
|
|
|
struct nvme_command c = { };
|
|
|
|
int status;
|
|
|
|
void *data;
|
|
|
|
int pos;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
c.identify.opcode = nvme_admin_identify;
|
|
|
|
c.identify.nsid = cpu_to_le32(nsid);
|
|
|
|
c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
|
|
|
|
|
|
|
|
data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
|
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data,
|
|
|
|
NVME_IDENTIFY_DATA_SIZE);
|
|
|
|
if (status)
|
|
|
|
goto free_data;
|
|
|
|
|
|
|
|
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
|
|
|
|
struct nvme_ns_id_desc *cur = data + pos;
|
|
|
|
|
|
|
|
if (cur->nidl == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
switch (cur->nidt) {
|
|
|
|
case NVME_NIDT_EUI64:
|
|
|
|
if (cur->nidl != NVME_NIDT_EUI64_LEN) {
|
|
|
|
dev_warn(ns->ctrl->device,
|
|
|
|
"ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
|
|
|
|
cur->nidl);
|
|
|
|
goto free_data;
|
|
|
|
}
|
|
|
|
len = NVME_NIDT_EUI64_LEN;
|
|
|
|
memcpy(ns->eui, data + pos + sizeof(*cur), len);
|
|
|
|
break;
|
|
|
|
case NVME_NIDT_NGUID:
|
|
|
|
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
|
|
|
|
dev_warn(ns->ctrl->device,
|
|
|
|
"ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
|
|
|
|
cur->nidl);
|
|
|
|
goto free_data;
|
|
|
|
}
|
|
|
|
len = NVME_NIDT_NGUID_LEN;
|
|
|
|
memcpy(ns->nguid, data + pos + sizeof(*cur), len);
|
|
|
|
break;
|
|
|
|
case NVME_NIDT_UUID:
|
|
|
|
if (cur->nidl != NVME_NIDT_UUID_LEN) {
|
|
|
|
dev_warn(ns->ctrl->device,
|
|
|
|
"ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
|
|
|
|
cur->nidl);
|
|
|
|
goto free_data;
|
|
|
|
}
|
|
|
|
len = NVME_NIDT_UUID_LEN;
|
|
|
|
uuid_copy(&ns->uuid, data + pos + sizeof(*cur));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Skip unnkown types */
|
|
|
|
len = cur->nidl;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
len += sizeof(*cur);
|
|
|
|
}
|
|
|
|
free_data:
|
|
|
|
kfree(data);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2015-10-23 05:45:06 +08:00
|
|
|
static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
|
|
|
|
{
|
|
|
|
struct nvme_command c = { };
|
|
|
|
|
|
|
|
c.identify.opcode = nvme_admin_identify;
|
2017-01-26 23:17:28 +08:00
|
|
|
c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST;
|
2015-10-23 05:45:06 +08:00
|
|
|
c.identify.nsid = cpu_to_le32(nsid);
|
|
|
|
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
|
|
|
|
}
|
|
|
|
|
2015-11-26 17:06:56 +08:00
|
|
|
int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
|
2015-11-26 16:08:36 +08:00
|
|
|
struct nvme_id_ns **id)
|
|
|
|
{
|
|
|
|
struct nvme_command c = { };
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
|
2017-01-26 23:17:27 +08:00
|
|
|
c.identify.opcode = nvme_admin_identify;
|
|
|
|
c.identify.nsid = cpu_to_le32(nsid);
|
2017-01-26 23:17:28 +08:00
|
|
|
c.identify.cns = NVME_ID_CNS_NS;
|
2015-11-26 16:08:36 +08:00
|
|
|
|
|
|
|
*id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
|
|
|
|
if (!*id)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
|
|
|
|
sizeof(struct nvme_id_ns));
|
|
|
|
if (error)
|
|
|
|
kfree(*id);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2015-11-26 17:06:56 +08:00
|
|
|
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
|
2016-09-17 02:16:10 +08:00
|
|
|
void *buffer, size_t buflen, u32 *result)
|
2015-11-26 16:08:36 +08:00
|
|
|
{
|
|
|
|
struct nvme_command c;
|
2016-11-10 23:32:33 +08:00
|
|
|
union nvme_result res;
|
2016-02-29 22:59:47 +08:00
|
|
|
int ret;
|
2015-11-26 16:08:36 +08:00
|
|
|
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
c.features.opcode = nvme_admin_get_features;
|
|
|
|
c.features.nsid = cpu_to_le32(nsid);
|
|
|
|
c.features.fid = cpu_to_le32(fid);
|
|
|
|
|
2016-11-10 23:32:33 +08:00
|
|
|
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
|
2016-06-13 22:45:23 +08:00
|
|
|
NVME_QID_ANY, 0, 0);
|
2016-08-24 18:52:12 +08:00
|
|
|
if (ret >= 0 && result)
|
2016-11-10 23:32:33 +08:00
|
|
|
*result = le32_to_cpu(res.u32);
|
2016-02-29 22:59:47 +08:00
|
|
|
return ret;
|
2015-11-26 16:08:36 +08:00
|
|
|
}
|
|
|
|
|
2015-11-26 17:06:56 +08:00
|
|
|
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
|
2016-09-17 02:16:10 +08:00
|
|
|
void *buffer, size_t buflen, u32 *result)
|
2015-11-26 16:08:36 +08:00
|
|
|
{
|
|
|
|
struct nvme_command c;
|
2016-11-10 23:32:33 +08:00
|
|
|
union nvme_result res;
|
2016-02-29 22:59:47 +08:00
|
|
|
int ret;
|
2015-11-26 16:08:36 +08:00
|
|
|
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
c.features.opcode = nvme_admin_set_features;
|
|
|
|
c.features.fid = cpu_to_le32(fid);
|
|
|
|
c.features.dword11 = cpu_to_le32(dword11);
|
|
|
|
|
2016-11-10 23:32:33 +08:00
|
|
|
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
|
2016-09-17 02:16:10 +08:00
|
|
|
buffer, buflen, 0, NVME_QID_ANY, 0, 0);
|
2016-08-24 18:52:12 +08:00
|
|
|
if (ret >= 0 && result)
|
2016-11-10 23:32:33 +08:00
|
|
|
*result = le32_to_cpu(res.u32);
|
2016-02-29 22:59:47 +08:00
|
|
|
return ret;
|
2015-11-26 16:08:36 +08:00
|
|
|
}
|
|
|
|
|
2015-11-26 17:06:56 +08:00
|
|
|
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
|
2015-11-26 16:08:36 +08:00
|
|
|
{
|
|
|
|
struct nvme_command c = { };
|
|
|
|
int error;
|
|
|
|
|
|
|
|
c.common.opcode = nvme_admin_get_log_page,
|
|
|
|
c.common.nsid = cpu_to_le32(0xFFFFFFFF),
|
|
|
|
c.common.cdw10[0] = cpu_to_le32(
|
|
|
|
(((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
|
|
|
|
NVME_LOG_SMART),
|
|
|
|
|
|
|
|
*log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
|
|
|
|
if (!*log)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
error = nvme_submit_sync_cmd(dev->admin_q, &c, *log,
|
|
|
|
sizeof(struct nvme_smart_log));
|
|
|
|
if (error)
|
|
|
|
kfree(*log);
|
|
|
|
return error;
|
|
|
|
}
|
2015-11-26 17:54:19 +08:00
|
|
|
|
2015-11-26 18:09:06 +08:00
|
|
|
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
|
|
|
|
{
|
|
|
|
u32 q_count = (*count - 1) | ((*count - 1) << 16);
|
|
|
|
u32 result;
|
|
|
|
int status, nr_io_queues;
|
|
|
|
|
2016-09-17 02:16:10 +08:00
|
|
|
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
|
2015-11-26 18:09:06 +08:00
|
|
|
&result);
|
2016-06-07 05:20:50 +08:00
|
|
|
if (status < 0)
|
2015-11-26 18:09:06 +08:00
|
|
|
return status;
|
|
|
|
|
2016-06-07 05:20:50 +08:00
|
|
|
/*
|
|
|
|
* Degraded controllers might return an error when setting the queue
|
|
|
|
* count. We still want to be able to bring them online and offer
|
|
|
|
* access to the admin queue, as that might be only way to fix them up.
|
|
|
|
*/
|
|
|
|
if (status > 0) {
|
2017-06-09 22:17:21 +08:00
|
|
|
dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
|
2016-06-07 05:20:50 +08:00
|
|
|
*count = 0;
|
|
|
|
} else {
|
|
|
|
nr_io_queues = min(result & 0xffff, result >> 16) + 1;
|
|
|
|
*count = min(*count, nr_io_queues);
|
|
|
|
}
|
|
|
|
|
2015-11-26 18:09:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_set_queue_count);
|
2015-11-26 18:09:06 +08:00
|
|
|
|
2015-11-26 17:54:19 +08:00
|
|
|
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
|
|
|
{
|
|
|
|
struct nvme_user_io io;
|
|
|
|
struct nvme_command c;
|
|
|
|
unsigned length, meta_len;
|
|
|
|
void __user *metadata;
|
|
|
|
|
|
|
|
if (copy_from_user(&io, uio, sizeof(io)))
|
|
|
|
return -EFAULT;
|
2016-02-25 00:15:57 +08:00
|
|
|
if (io.flags)
|
|
|
|
return -EINVAL;
|
2015-11-26 17:54:19 +08:00
|
|
|
|
|
|
|
switch (io.opcode) {
|
|
|
|
case nvme_cmd_write:
|
|
|
|
case nvme_cmd_read:
|
|
|
|
case nvme_cmd_compare:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
length = (io.nblocks + 1) << ns->lba_shift;
|
|
|
|
meta_len = (io.nblocks + 1) * ns->ms;
|
|
|
|
metadata = (void __user *)(uintptr_t)io.metadata;
|
|
|
|
|
|
|
|
if (ns->ext) {
|
|
|
|
length += meta_len;
|
|
|
|
meta_len = 0;
|
|
|
|
} else if (meta_len) {
|
|
|
|
if ((io.metadata & 3) || !io.metadata)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
c.rw.opcode = io.opcode;
|
|
|
|
c.rw.flags = io.flags;
|
|
|
|
c.rw.nsid = cpu_to_le32(ns->ns_id);
|
|
|
|
c.rw.slba = cpu_to_le64(io.slba);
|
|
|
|
c.rw.length = cpu_to_le16(io.nblocks);
|
|
|
|
c.rw.control = cpu_to_le16(io.control);
|
|
|
|
c.rw.dsmgmt = cpu_to_le32(io.dsmgmt);
|
|
|
|
c.rw.reftag = cpu_to_le32(io.reftag);
|
|
|
|
c.rw.apptag = cpu_to_le16(io.apptag);
|
|
|
|
c.rw.appmask = cpu_to_le16(io.appmask);
|
|
|
|
|
|
|
|
return __nvme_submit_user_cmd(ns->queue, &c,
|
|
|
|
(void __user *)(uintptr_t)io.addr, length,
|
|
|
|
metadata, meta_len, io.slba, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
2015-11-26 17:54:19 +08:00
|
|
|
struct nvme_passthru_cmd __user *ucmd)
|
|
|
|
{
|
|
|
|
struct nvme_passthru_cmd cmd;
|
|
|
|
struct nvme_command c;
|
|
|
|
unsigned timeout = 0;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EACCES;
|
|
|
|
if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
|
|
|
|
return -EFAULT;
|
2016-02-25 00:15:57 +08:00
|
|
|
if (cmd.flags)
|
|
|
|
return -EINVAL;
|
2015-11-26 17:54:19 +08:00
|
|
|
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
c.common.opcode = cmd.opcode;
|
|
|
|
c.common.flags = cmd.flags;
|
|
|
|
c.common.nsid = cpu_to_le32(cmd.nsid);
|
|
|
|
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
|
|
|
|
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
|
|
|
|
c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
|
|
|
|
c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
|
|
|
|
c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
|
|
|
|
c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
|
|
|
|
c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
|
|
|
|
c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
|
|
|
|
|
|
|
|
if (cmd.timeout_ms)
|
|
|
|
timeout = msecs_to_jiffies(cmd.timeout_ms);
|
|
|
|
|
|
|
|
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
2015-12-08 23:22:17 +08:00
|
|
|
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
2015-11-26 17:54:19 +08:00
|
|
|
&cmd.result, timeout);
|
|
|
|
if (status >= 0) {
|
|
|
|
if (put_user(cmd.result, &ucmd->result))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = bdev->bd_disk->private_data;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case NVME_IOCTL_ID:
|
|
|
|
force_successful_syscall_return();
|
|
|
|
return ns->ns_id;
|
|
|
|
case NVME_IOCTL_ADMIN_CMD:
|
|
|
|
return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
|
|
|
|
case NVME_IOCTL_IO_CMD:
|
|
|
|
return nvme_user_cmd(ns->ctrl, ns, (void __user *)arg);
|
|
|
|
case NVME_IOCTL_SUBMIT_IO:
|
|
|
|
return nvme_submit_io(ns, (void __user *)arg);
|
2015-12-24 22:27:02 +08:00
|
|
|
#ifdef CONFIG_BLK_DEV_NVME_SCSI
|
2015-11-26 17:54:19 +08:00
|
|
|
case SG_GET_VERSION_NUM:
|
|
|
|
return nvme_sg_get_version_num((void __user *)arg);
|
|
|
|
case SG_IO:
|
|
|
|
return nvme_sg_io(ns, (void __user *)arg);
|
2015-12-24 22:27:02 +08:00
|
|
|
#endif
|
2015-11-26 17:54:19 +08:00
|
|
|
default:
|
2017-01-31 20:17:16 +08:00
|
|
|
#ifdef CONFIG_NVM
|
|
|
|
if (ns->ndev)
|
|
|
|
return nvme_nvm_ioctl(ns, cmd, arg);
|
|
|
|
#endif
|
2017-02-04 03:50:32 +08:00
|
|
|
if (is_sed_ioctl(cmd))
|
2017-02-17 20:59:39 +08:00
|
|
|
return sed_ioctl(ns->ctrl->opal_dev, cmd,
|
2017-02-15 08:29:36 +08:00
|
|
|
(void __user *) arg);
|
2015-11-26 17:54:19 +08:00
|
|
|
return -ENOTTY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
switch (cmd) {
|
|
|
|
case SG_IO:
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
}
|
|
|
|
return nvme_ioctl(bdev, mode, cmd, arg);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define nvme_compat_ioctl NULL
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static int nvme_open(struct block_device *bdev, fmode_t mode)
|
|
|
|
{
|
|
|
|
return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_release(struct gendisk *disk, fmode_t mode)
|
|
|
|
{
|
2016-02-11 02:03:29 +08:00
|
|
|
struct nvme_ns *ns = disk->private_data;
|
|
|
|
|
|
|
|
module_put(ns->ctrl->ops->module);
|
|
|
|
nvme_put_ns(ns);
|
2015-11-26 17:54:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
|
{
|
|
|
|
/* some standard values */
|
|
|
|
geo->heads = 1 << 6;
|
|
|
|
geo->sectors = 1 << 5;
|
|
|
|
geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
2017-05-20 21:14:45 +08:00
|
|
|
static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
|
|
|
|
u16 bs)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = disk->private_data;
|
|
|
|
u16 old_ms = ns->ms;
|
|
|
|
u8 pi_type = 0;
|
|
|
|
|
|
|
|
ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
|
|
|
|
ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
|
|
|
|
|
|
|
|
/* PI implementation requires metadata equal t10 pi tuple size */
|
|
|
|
if (ns->ms == sizeof(struct t10_pi_tuple))
|
|
|
|
pi_type = id->dps & NVME_NS_DPS_PI_MASK;
|
|
|
|
|
|
|
|
if (blk_get_integrity(disk) &&
|
|
|
|
(ns->pi_type != pi_type || ns->ms != old_ms ||
|
|
|
|
bs != queue_logical_block_size(disk->queue) ||
|
|
|
|
(ns->ms && ns->ext)))
|
|
|
|
blk_integrity_unregister(disk);
|
|
|
|
|
|
|
|
ns->pi_type = pi_type;
|
|
|
|
}
|
|
|
|
|
2015-11-26 17:54:19 +08:00
|
|
|
static void nvme_init_integrity(struct nvme_ns *ns)
|
|
|
|
{
|
|
|
|
struct blk_integrity integrity;
|
|
|
|
|
2016-07-21 11:26:16 +08:00
|
|
|
memset(&integrity, 0, sizeof(integrity));
|
2015-11-26 17:54:19 +08:00
|
|
|
switch (ns->pi_type) {
|
|
|
|
case NVME_NS_DPS_PI_TYPE3:
|
|
|
|
integrity.profile = &t10_pi_type3_crc;
|
2016-04-09 11:04:42 +08:00
|
|
|
integrity.tag_size = sizeof(u16) + sizeof(u32);
|
|
|
|
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
|
2015-11-26 17:54:19 +08:00
|
|
|
break;
|
|
|
|
case NVME_NS_DPS_PI_TYPE1:
|
|
|
|
case NVME_NS_DPS_PI_TYPE2:
|
|
|
|
integrity.profile = &t10_pi_type1_crc;
|
2016-04-09 11:04:42 +08:00
|
|
|
integrity.tag_size = sizeof(u16);
|
|
|
|
integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
|
2015-11-26 17:54:19 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
integrity.profile = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
integrity.tuple_size = ns->ms;
|
|
|
|
blk_integrity_register(ns->disk, &integrity);
|
|
|
|
blk_queue_max_integrity_segments(ns->queue, 1);
|
|
|
|
}
|
|
|
|
#else
|
2017-05-20 21:14:45 +08:00
|
|
|
static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
|
|
|
|
u16 bs)
|
|
|
|
{
|
|
|
|
}
|
2015-11-26 17:54:19 +08:00
|
|
|
static void nvme_init_integrity(struct nvme_ns *ns)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
|
|
|
|
2017-06-16 00:44:30 +08:00
|
|
|
static void nvme_set_chunk_size(struct nvme_ns *ns)
|
|
|
|
{
|
|
|
|
u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
|
|
|
|
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
|
|
|
|
}
|
|
|
|
|
2015-11-26 17:54:19 +08:00
|
|
|
static void nvme_config_discard(struct nvme_ns *ns)
|
|
|
|
{
|
2016-03-05 04:15:17 +08:00
|
|
|
struct nvme_ctrl *ctrl = ns->ctrl;
|
2015-11-26 17:54:19 +08:00
|
|
|
u32 logical_block_size = queue_logical_block_size(ns->queue);
|
2016-03-05 04:15:17 +08:00
|
|
|
|
2017-02-08 21:46:50 +08:00
|
|
|
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
|
|
|
|
NVME_DSM_MAX_RANGES);
|
|
|
|
|
2015-11-26 17:54:19 +08:00
|
|
|
ns->queue->limits.discard_alignment = logical_block_size;
|
|
|
|
ns->queue->limits.discard_granularity = logical_block_size;
|
2016-05-17 15:58:41 +08:00
|
|
|
blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
|
2017-02-08 21:46:50 +08:00
|
|
|
blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
|
2015-11-26 17:54:19 +08:00
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
|
2017-04-06 01:21:13 +08:00
|
|
|
|
|
|
|
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
|
|
|
|
blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
|
2015-11-26 17:54:19 +08:00
|
|
|
}
|
|
|
|
|
2016-09-16 20:25:04 +08:00
|
|
|
static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
|
2015-11-26 17:54:19 +08:00
|
|
|
{
|
2016-09-16 20:25:04 +08:00
|
|
|
if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) {
|
2016-09-16 20:25:07 +08:00
|
|
|
dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__);
|
2015-11-26 17:54:19 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2016-09-16 20:25:04 +08:00
|
|
|
if ((*id)->ncap == 0) {
|
|
|
|
kfree(*id);
|
|
|
|
return -ENODEV;
|
2015-11-26 17:54:19 +08:00
|
|
|
}
|
|
|
|
|
2016-10-19 23:51:05 +08:00
|
|
|
if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
|
2016-09-16 20:25:04 +08:00
|
|
|
memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
|
2016-10-19 23:51:05 +08:00
|
|
|
if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
|
2017-06-07 17:45:31 +08:00
|
|
|
memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid));
|
2017-06-07 17:45:34 +08:00
|
|
|
if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) {
|
|
|
|
/* Don't treat error as fatal we potentially
|
|
|
|
* already have a NGUID or EUI-64
|
|
|
|
*/
|
|
|
|
if (nvme_identify_ns_descs(ns, ns->ns_id))
|
|
|
|
dev_warn(ns->ctrl->device,
|
|
|
|
"%s: Identify Descriptors failed\n", __func__);
|
|
|
|
}
|
2016-09-16 20:25:04 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = disk->private_data;
|
2017-05-20 21:14:45 +08:00
|
|
|
u16 bs;
|
2015-11-26 17:54:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If identify namespace failed, use default 512 byte block size so
|
|
|
|
* block layer can use before failing read/write for 0 capacity.
|
|
|
|
*/
|
2017-05-20 21:14:45 +08:00
|
|
|
ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
|
2015-11-26 17:54:19 +08:00
|
|
|
if (ns->lba_shift == 0)
|
|
|
|
ns->lba_shift = 9;
|
|
|
|
bs = 1 << ns->lba_shift;
|
2017-06-16 00:44:30 +08:00
|
|
|
ns->noiob = le16_to_cpu(id->noiob);
|
2015-11-26 17:54:19 +08:00
|
|
|
|
|
|
|
blk_mq_freeze_queue(disk->queue);
|
|
|
|
|
2017-05-20 21:14:45 +08:00
|
|
|
if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
|
|
|
|
nvme_prep_integrity(disk, id, bs);
|
2015-11-26 17:54:19 +08:00
|
|
|
blk_queue_logical_block_size(ns->queue, bs);
|
2017-06-16 00:44:30 +08:00
|
|
|
if (ns->noiob)
|
|
|
|
nvme_set_chunk_size(ns);
|
2015-11-20 16:13:30 +08:00
|
|
|
if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
|
2015-11-26 17:54:19 +08:00
|
|
|
nvme_init_integrity(ns);
|
|
|
|
if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
|
|
|
|
set_capacity(disk, 0);
|
|
|
|
else
|
|
|
|
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
|
|
|
|
|
|
|
|
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
|
|
|
|
nvme_config_discard(ns);
|
|
|
|
blk_mq_unfreeze_queue(disk->queue);
|
2016-09-16 20:25:04 +08:00
|
|
|
}
|
2015-11-26 17:54:19 +08:00
|
|
|
|
2016-09-16 20:25:04 +08:00
|
|
|
static int nvme_revalidate_disk(struct gendisk *disk)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = disk->private_data;
|
|
|
|
struct nvme_id_ns *id = NULL;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (test_bit(NVME_NS_DEAD, &ns->flags)) {
|
|
|
|
set_capacity(disk, 0);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nvme_revalidate_ns(ns, &id);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
__nvme_revalidate_disk(disk, id);
|
2015-11-26 17:54:19 +08:00
|
|
|
kfree(id);
|
2016-09-16 20:25:04 +08:00
|
|
|
|
2015-11-26 17:54:19 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char nvme_pr_type(enum pr_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case PR_WRITE_EXCLUSIVE:
|
|
|
|
return 1;
|
|
|
|
case PR_EXCLUSIVE_ACCESS:
|
|
|
|
return 2;
|
|
|
|
case PR_WRITE_EXCLUSIVE_REG_ONLY:
|
|
|
|
return 3;
|
|
|
|
case PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
|
|
|
return 4;
|
|
|
|
case PR_WRITE_EXCLUSIVE_ALL_REGS:
|
|
|
|
return 5;
|
|
|
|
case PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
|
|
|
return 6;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
|
|
|
u64 key, u64 sa_key, u8 op)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = bdev->bd_disk->private_data;
|
|
|
|
struct nvme_command c;
|
|
|
|
u8 data[16] = { 0, };
|
|
|
|
|
|
|
|
put_unaligned_le64(key, &data[0]);
|
|
|
|
put_unaligned_le64(sa_key, &data[8]);
|
|
|
|
|
|
|
|
memset(&c, 0, sizeof(c));
|
|
|
|
c.common.opcode = op;
|
|
|
|
c.common.nsid = cpu_to_le32(ns->ns_id);
|
|
|
|
c.common.cdw10[0] = cpu_to_le32(cdw10);
|
|
|
|
|
|
|
|
return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_pr_register(struct block_device *bdev, u64 old,
|
|
|
|
u64 new, unsigned flags)
|
|
|
|
{
|
|
|
|
u32 cdw10;
|
|
|
|
|
|
|
|
if (flags & ~PR_FL_IGNORE_KEY)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
cdw10 = old ? 2 : 0;
|
|
|
|
cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
|
|
|
|
cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
|
|
|
|
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_pr_reserve(struct block_device *bdev, u64 key,
|
|
|
|
enum pr_type type, unsigned flags)
|
|
|
|
{
|
|
|
|
u32 cdw10;
|
|
|
|
|
|
|
|
if (flags & ~PR_FL_IGNORE_KEY)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
cdw10 = nvme_pr_type(type) << 8;
|
|
|
|
cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
|
|
|
|
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
|
|
|
|
enum pr_type type, bool abort)
|
|
|
|
{
|
|
|
|
u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
|
|
|
|
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_pr_clear(struct block_device *bdev, u64 key)
|
|
|
|
{
|
2015-12-09 18:24:06 +08:00
|
|
|
u32 cdw10 = 1 | (key ? 1 << 3 : 0);
|
2015-11-26 17:54:19 +08:00
|
|
|
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
|
|
|
{
|
|
|
|
u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
|
|
|
|
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct pr_ops nvme_pr_ops = {
|
|
|
|
.pr_register = nvme_pr_register,
|
|
|
|
.pr_reserve = nvme_pr_reserve,
|
|
|
|
.pr_release = nvme_pr_release,
|
|
|
|
.pr_preempt = nvme_pr_preempt,
|
|
|
|
.pr_clear = nvme_pr_clear,
|
|
|
|
};
|
|
|
|
|
2017-02-04 03:50:32 +08:00
|
|
|
#ifdef CONFIG_BLK_SED_OPAL
|
2017-02-17 20:59:39 +08:00
|
|
|
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
|
|
|
bool send)
|
2017-02-04 03:50:32 +08:00
|
|
|
{
|
2017-02-17 20:59:39 +08:00
|
|
|
struct nvme_ctrl *ctrl = data;
|
2017-02-04 03:50:32 +08:00
|
|
|
struct nvme_command cmd;
|
|
|
|
|
|
|
|
memset(&cmd, 0, sizeof(cmd));
|
|
|
|
if (send)
|
|
|
|
cmd.common.opcode = nvme_admin_security_send;
|
|
|
|
else
|
|
|
|
cmd.common.opcode = nvme_admin_security_recv;
|
|
|
|
cmd.common.nsid = 0;
|
|
|
|
cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
|
|
|
|
cmd.common.cdw10[1] = cpu_to_le32(len);
|
|
|
|
|
|
|
|
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
|
|
|
|
ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_sec_submit);
|
|
|
|
#endif /* CONFIG_BLK_SED_OPAL */
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
static const struct block_device_operations nvme_fops = {
|
2015-11-26 17:54:19 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.ioctl = nvme_ioctl,
|
|
|
|
.compat_ioctl = nvme_compat_ioctl,
|
|
|
|
.open = nvme_open,
|
|
|
|
.release = nvme_release,
|
|
|
|
.getgeo = nvme_getgeo,
|
|
|
|
.revalidate_disk= nvme_revalidate_disk,
|
|
|
|
.pr_ops = &nvme_pr_ops,
|
|
|
|
};
|
|
|
|
|
2015-11-28 22:03:49 +08:00
|
|
|
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
|
|
|
|
{
|
|
|
|
unsigned long timeout =
|
|
|
|
((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
|
|
|
|
u32 csts, bit = enabled ? NVME_CSTS_RDY : 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
|
2016-10-12 01:31:58 +08:00
|
|
|
if (csts == ~0)
|
|
|
|
return -ENODEV;
|
2015-11-28 22:03:49 +08:00
|
|
|
if ((csts & NVME_CSTS_RDY) == bit)
|
|
|
|
break;
|
|
|
|
|
|
|
|
msleep(100);
|
|
|
|
if (fatal_signal_pending(current))
|
|
|
|
return -EINTR;
|
|
|
|
if (time_after(jiffies, timeout)) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_err(ctrl->device,
|
2015-11-28 22:03:49 +08:00
|
|
|
"Device not ready; aborting %s\n", enabled ?
|
|
|
|
"initialisation" : "reset");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the device has been passed off to us in an enabled state, just clear
|
|
|
|
* the enabled bit. The spec says we should set the 'shutdown notification
|
|
|
|
* bits', but doing so may cause the device to complete commands to the
|
|
|
|
* admin queue ... and we don't know what memory that might be pointing at!
|
|
|
|
*/
|
|
|
|
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
|
|
|
|
ctrl->ctrl_config &= ~NVME_CC_ENABLE;
|
|
|
|
|
|
|
|
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-06-15 05:22:41 +08:00
|
|
|
|
nvme: apply DELAY_BEFORE_CHK_RDY quirk at probe time too
Commit 54adc01055b7 ("nvme/quirk: Add a delay before checking for adapter
readiness") introduced a quirk to adapters that cannot read the bit
NVME_CSTS_RDY right after register NVME_REG_CC is set; these adapters
need a delay or else the action of reading the bit NVME_CSTS_RDY could
somehow corrupt adapter's registers state and it never recovers.
When this quirk was added, we checked ctrl->tagset in order to avoid
quirking in probe time, supposing we would never require such delay
during probe. Well, it was too optimistic; we in fact need this quirk
at probe time in some cases, like after a kexec.
In some experiments, after abnormal shutdown of machine (aka power cord
unplug), we booted into our bootloader in Power, which is a Linux kernel,
and kexec'ed into another distro. If this kexec is too quick, we end up
reaching the probe of NVMe adapter in that distro when adapter is in
bad state (not fully initialized on our bootloader). What happens next
is that nvme_wait_ready() is unable to complete, except if the quirk is
enabled.
So, this patch removes the original ctrl->tagset verification in order
to enable the quirk even on probe time.
Fixes: 54adc01055b7 ("nvme/quirk: Add a delay before checking for adapter readiness")
Reported-by: Andrew Byrne <byrneadw@ie.ibm.com>
Reported-by: Jaime A. H. Gomez <jahgomez@mx1.ibm.com>
Reported-by: Zachary D. Myers <zdmyers@us.ibm.com>
Signed-off-by: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
Acked-by: Jeffrey Lien <Jeff.Lien@wdc.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2016-12-29 08:13:15 +08:00
|
|
|
if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
|
2016-06-15 05:22:41 +08:00
|
|
|
msleep(NVME_QUIRK_DELAY_AMOUNT);
|
|
|
|
|
2015-11-28 22:03:49 +08:00
|
|
|
return nvme_wait_ready(ctrl, cap, false);
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
|
2015-11-28 22:03:49 +08:00
|
|
|
|
|
|
|
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Default to a 4K page size, with the intention to update this
|
|
|
|
* path in the future to accomodate architectures with differing
|
|
|
|
* kernel and IO page sizes.
|
|
|
|
*/
|
|
|
|
unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12, page_shift = 12;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (page_shift < dev_page_min) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_err(ctrl->device,
|
2015-11-28 22:03:49 +08:00
|
|
|
"Minimum device page size %u too large for host (%u)\n",
|
|
|
|
1 << dev_page_min, 1 << page_shift);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl->page_size = 1 << page_shift;
|
|
|
|
|
|
|
|
ctrl->ctrl_config = NVME_CC_CSS_NVM;
|
|
|
|
ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT;
|
|
|
|
ctrl->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
|
|
|
|
ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
|
|
|
|
ctrl->ctrl_config |= NVME_CC_ENABLE;
|
|
|
|
|
|
|
|
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return nvme_wait_ready(ctrl, cap, true);
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
|
2015-11-28 22:03:49 +08:00
|
|
|
|
|
|
|
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
2017-06-13 00:30:51 +08:00
|
|
|
unsigned long timeout = jiffies + (shutdown_timeout * HZ);
|
2015-11-28 22:03:49 +08:00
|
|
|
u32 csts;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ctrl->ctrl_config &= ~NVME_CC_SHN_MASK;
|
|
|
|
ctrl->ctrl_config |= NVME_CC_SHN_NORMAL;
|
|
|
|
|
|
|
|
ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
|
|
|
|
if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT)
|
|
|
|
break;
|
|
|
|
|
|
|
|
msleep(100);
|
|
|
|
if (fatal_signal_pending(current))
|
|
|
|
return -EINTR;
|
|
|
|
if (time_after(jiffies, timeout)) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_err(ctrl->device,
|
2015-11-28 22:03:49 +08:00
|
|
|
"Device shutdown incomplete; abort shutdown\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
|
2015-11-28 22:03:49 +08:00
|
|
|
|
2016-03-03 01:07:11 +08:00
|
|
|
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
|
|
|
|
struct request_queue *q)
|
|
|
|
{
|
2016-04-13 05:43:09 +08:00
|
|
|
bool vwc = false;
|
|
|
|
|
2016-03-03 01:07:11 +08:00
|
|
|
if (ctrl->max_hw_sectors) {
|
2016-03-03 01:07:12 +08:00
|
|
|
u32 max_segments =
|
|
|
|
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
|
|
|
|
|
2016-03-03 01:07:11 +08:00
|
|
|
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
|
2016-03-03 01:07:12 +08:00
|
|
|
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
|
2016-03-03 01:07:11 +08:00
|
|
|
}
|
2016-12-20 00:37:50 +08:00
|
|
|
if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
|
|
|
|
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
|
2016-03-03 01:07:11 +08:00
|
|
|
blk_queue_virt_boundary(q, ctrl->page_size - 1);
|
2016-04-13 05:43:09 +08:00
|
|
|
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
|
|
|
|
vwc = true;
|
|
|
|
blk_queue_write_cache(q, vwc, vwc);
|
2016-03-03 01:07:11 +08:00
|
|
|
}
|
|
|
|
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
static void nvme_configure_apst(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* APST (Autonomous Power State Transition) lets us program a
|
|
|
|
* table of power state transitions that the controller will
|
|
|
|
* perform automatically. We configure it with a simple
|
|
|
|
* heuristic: we are willing to spend at most 2% of the time
|
|
|
|
* transitioning between power states. Therefore, when running
|
|
|
|
* in any given state, we will enter the next lower-power
|
2017-04-22 07:19:22 +08:00
|
|
|
* non-operational state after waiting 50 * (enlat + exlat)
|
2017-06-07 15:25:42 +08:00
|
|
|
* microseconds, as long as that state's exit latency is under
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
* the requested maximum latency.
|
|
|
|
*
|
|
|
|
* We will not autonomously enter any non-operational state for
|
|
|
|
* which the total latency exceeds ps_max_latency_us. Users
|
|
|
|
* can set ps_max_latency_us to zero to turn off APST.
|
|
|
|
*/
|
|
|
|
|
|
|
|
unsigned apste;
|
|
|
|
struct nvme_feat_auto_pst *table;
|
2017-04-22 07:19:23 +08:00
|
|
|
u64 max_lat_us = 0;
|
|
|
|
int max_ps = -1;
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If APST isn't supported or if we haven't been initialized yet,
|
|
|
|
* then don't do anything.
|
|
|
|
*/
|
|
|
|
if (!ctrl->apsta)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ctrl->npss > 31) {
|
|
|
|
dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
table = kzalloc(sizeof(*table), GFP_KERNEL);
|
|
|
|
if (!table)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ctrl->ps_max_latency_us == 0) {
|
|
|
|
/* Turn off APST. */
|
|
|
|
apste = 0;
|
2017-04-22 07:19:23 +08:00
|
|
|
dev_dbg(ctrl->device, "APST disabled\n");
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
} else {
|
|
|
|
__le64 target = cpu_to_le64(0);
|
|
|
|
int state;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk through all states from lowest- to highest-power.
|
|
|
|
* According to the spec, lower-numbered states use more
|
|
|
|
* power. NPSS, despite the name, is the index of the
|
|
|
|
* lowest-power state, not the number of states.
|
|
|
|
*/
|
|
|
|
for (state = (int)ctrl->npss; state >= 0; state--) {
|
2017-06-07 15:25:42 +08:00
|
|
|
u64 total_latency_us, exit_latency_us, transition_ms;
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
|
|
|
|
if (target)
|
|
|
|
table->entries[state] = target;
|
|
|
|
|
2017-04-21 04:37:55 +08:00
|
|
|
/*
|
|
|
|
* Don't allow transitions to the deepest state
|
|
|
|
* if it's quirked off.
|
|
|
|
*/
|
|
|
|
if (state == ctrl->npss &&
|
|
|
|
(ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS))
|
|
|
|
continue;
|
|
|
|
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
/*
|
|
|
|
* Is this state a useful non-operational state for
|
|
|
|
* higher-power states to autonomously transition to?
|
|
|
|
*/
|
|
|
|
if (!(ctrl->psd[state].flags &
|
|
|
|
NVME_PS_FLAGS_NON_OP_STATE))
|
|
|
|
continue;
|
|
|
|
|
2017-06-07 15:25:42 +08:00
|
|
|
exit_latency_us =
|
|
|
|
(u64)le32_to_cpu(ctrl->psd[state].exit_lat);
|
|
|
|
if (exit_latency_us > ctrl->ps_max_latency_us)
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
continue;
|
|
|
|
|
2017-06-07 15:25:42 +08:00
|
|
|
total_latency_us =
|
|
|
|
exit_latency_us +
|
|
|
|
le32_to_cpu(ctrl->psd[state].entry_lat);
|
|
|
|
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
/*
|
|
|
|
* This state is good. Use it as the APST idle
|
|
|
|
* target for higher power states.
|
|
|
|
*/
|
|
|
|
transition_ms = total_latency_us + 19;
|
|
|
|
do_div(transition_ms, 20);
|
|
|
|
if (transition_ms > (1 << 24) - 1)
|
|
|
|
transition_ms = (1 << 24) - 1;
|
|
|
|
|
|
|
|
target = cpu_to_le64((state << 3) |
|
|
|
|
(transition_ms << 8));
|
2017-04-22 07:19:23 +08:00
|
|
|
|
|
|
|
if (max_ps == -1)
|
|
|
|
max_ps = state;
|
|
|
|
|
|
|
|
if (total_latency_us > max_lat_us)
|
|
|
|
max_lat_us = total_latency_us;
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
apste = 1;
|
2017-04-22 07:19:23 +08:00
|
|
|
|
|
|
|
if (max_ps == -1) {
|
|
|
|
dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n");
|
|
|
|
} else {
|
|
|
|
dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
|
|
|
|
max_ps, max_lat_us, (int)sizeof(*table), table);
|
|
|
|
}
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste,
|
|
|
|
table, sizeof(*table), NULL);
|
|
|
|
if (ret)
|
|
|
|
dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
|
|
|
|
|
|
|
|
kfree(table);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_set_latency_tolerance(struct device *dev, s32 val)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
u64 latency;
|
|
|
|
|
|
|
|
switch (val) {
|
|
|
|
case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT:
|
|
|
|
case PM_QOS_LATENCY_ANY:
|
|
|
|
latency = U64_MAX;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
latency = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctrl->ps_max_latency_us != latency) {
|
|
|
|
ctrl->ps_max_latency_us = latency;
|
|
|
|
nvme_configure_apst(ctrl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 04:32:36 +08:00
|
|
|
struct nvme_core_quirk_entry {
|
|
|
|
/*
|
|
|
|
* NVMe model and firmware strings are padded with spaces. For
|
|
|
|
* simplicity, strings in the quirk table are padded with NULLs
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
u16 vid;
|
|
|
|
const char *mn;
|
|
|
|
const char *fr;
|
|
|
|
unsigned long quirks;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct nvme_core_quirk_entry core_quirks[] = {
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
{
|
2017-04-21 04:37:56 +08:00
|
|
|
/*
|
|
|
|
* This Toshiba device seems to die using any APST states. See:
|
|
|
|
* https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
|
|
|
|
*/
|
|
|
|
.vid = 0x1179,
|
|
|
|
.mn = "THNSF5256GPUK TOSHIBA",
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
.quirks = NVME_QUIRK_NO_APST,
|
2017-04-21 04:37:56 +08:00
|
|
|
}
|
2017-02-23 04:32:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* match is null-terminated but idstr is space-padded. */
|
|
|
|
static bool string_matches(const char *idstr, const char *match, size_t len)
|
|
|
|
{
|
|
|
|
size_t matchlen;
|
|
|
|
|
|
|
|
if (!match)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
matchlen = strlen(match);
|
|
|
|
WARN_ON_ONCE(matchlen > len);
|
|
|
|
|
|
|
|
if (memcmp(idstr, match, matchlen))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (; matchlen < len; matchlen++)
|
|
|
|
if (idstr[matchlen] != ' ')
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool quirk_matches(const struct nvme_id_ctrl *id,
|
|
|
|
const struct nvme_core_quirk_entry *q)
|
|
|
|
{
|
|
|
|
return q->vid == le16_to_cpu(id->vid) &&
|
|
|
|
string_matches(id->mn, q->mn, sizeof(id->mn)) &&
|
|
|
|
string_matches(id->fr, q->fr, sizeof(id->fr));
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:37:52 +08:00
|
|
|
/*
|
|
|
|
* Initialize the cached copies of the Identify data and various controller
|
|
|
|
* register in our nvme_ctrl structure. This should be called as soon as
|
|
|
|
* the admin queue is fully up and running.
|
|
|
|
*/
|
|
|
|
int nvme_init_identify(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_id_ctrl *id;
|
|
|
|
u64 cap;
|
|
|
|
int ret, page_shift;
|
2016-06-07 05:20:48 +08:00
|
|
|
u32 max_hw_sectors;
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
u8 prev_apsta;
|
2015-11-28 22:37:52 +08:00
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
|
|
|
|
if (ret) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
|
2015-11-28 22:40:19 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:37:52 +08:00
|
|
|
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
|
|
|
|
if (ret) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
|
2015-11-28 22:37:52 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
page_shift = NVME_CAP_MPSMIN(cap) + 12;
|
|
|
|
|
2016-10-19 23:51:05 +08:00
|
|
|
if (ctrl->vs >= NVME_VS(1, 1, 0))
|
2015-11-28 22:40:19 +08:00
|
|
|
ctrl->subsystem = NVME_CAP_NSSRC(cap);
|
|
|
|
|
2015-11-28 22:37:52 +08:00
|
|
|
ret = nvme_identify_ctrl(ctrl, &id);
|
|
|
|
if (ret) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
|
2015-11-28 22:37:52 +08:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2017-02-23 04:32:36 +08:00
|
|
|
if (!ctrl->identified) {
|
|
|
|
/*
|
|
|
|
* Check for quirks. Quirk can depend on firmware version,
|
|
|
|
* so, in principle, the set of quirks present can change
|
|
|
|
* across a reset. As a possible future enhancement, we
|
|
|
|
* could re-scan for quirks every time we reinitialize
|
|
|
|
* the device, but we'd have to make sure that the driver
|
|
|
|
* behaves intelligently if the quirks change.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
|
|
|
|
if (quirk_matches(id, &core_quirks[i]))
|
|
|
|
ctrl->quirks |= core_quirks[i].quirks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 07:19:24 +08:00
|
|
|
if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
|
2017-06-09 22:17:21 +08:00
|
|
|
dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
|
2017-04-22 07:19:24 +08:00
|
|
|
ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
|
|
|
|
}
|
|
|
|
|
2017-02-17 20:59:40 +08:00
|
|
|
ctrl->oacs = le16_to_cpu(id->oacs);
|
2016-02-19 00:57:48 +08:00
|
|
|
ctrl->vid = le16_to_cpu(id->vid);
|
2015-11-28 22:37:52 +08:00
|
|
|
ctrl->oncs = le16_to_cpup(&id->oncs);
|
2015-11-20 16:36:44 +08:00
|
|
|
atomic_set(&ctrl->abort_limit, id->acl + 1);
|
2015-11-28 22:37:52 +08:00
|
|
|
ctrl->vwc = id->vwc;
|
2016-02-27 05:24:19 +08:00
|
|
|
ctrl->cntlid = le16_to_cpup(&id->cntlid);
|
2015-11-28 22:37:52 +08:00
|
|
|
memcpy(ctrl->serial, id->sn, sizeof(id->sn));
|
|
|
|
memcpy(ctrl->model, id->mn, sizeof(id->mn));
|
|
|
|
memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
|
|
|
|
if (id->mdts)
|
2016-06-07 05:20:48 +08:00
|
|
|
max_hw_sectors = 1 << (id->mdts + page_shift - 9);
|
2015-11-28 22:37:52 +08:00
|
|
|
else
|
2016-06-07 05:20:48 +08:00
|
|
|
max_hw_sectors = UINT_MAX;
|
|
|
|
ctrl->max_hw_sectors =
|
|
|
|
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
|
2015-11-28 22:37:52 +08:00
|
|
|
|
2016-03-03 01:07:11 +08:00
|
|
|
nvme_set_queue_limits(ctrl, ctrl->admin_q);
|
2016-06-13 22:45:26 +08:00
|
|
|
ctrl->sgls = le32_to_cpu(id->sgls);
|
2016-06-13 22:45:28 +08:00
|
|
|
ctrl->kas = le16_to_cpu(id->kas);
|
2016-06-13 22:45:26 +08:00
|
|
|
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
ctrl->npss = id->npss;
|
|
|
|
prev_apsta = ctrl->apsta;
|
2017-04-22 07:19:24 +08:00
|
|
|
if (ctrl->quirks & NVME_QUIRK_NO_APST) {
|
|
|
|
if (force_apst && id->apsta) {
|
2017-06-09 22:17:21 +08:00
|
|
|
dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
|
2017-04-22 07:19:24 +08:00
|
|
|
ctrl->apsta = 1;
|
|
|
|
} else {
|
|
|
|
ctrl->apsta = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ctrl->apsta = id->apsta;
|
|
|
|
}
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd));
|
|
|
|
|
2017-05-20 21:14:44 +08:00
|
|
|
if (ctrl->ops->flags & NVME_F_FABRICS) {
|
2016-06-13 22:45:26 +08:00
|
|
|
ctrl->icdoff = le16_to_cpu(id->icdoff);
|
|
|
|
ctrl->ioccsz = le32_to_cpu(id->ioccsz);
|
|
|
|
ctrl->iorcsz = le32_to_cpu(id->iorcsz);
|
|
|
|
ctrl->maxcmd = le16_to_cpu(id->maxcmd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In fabrics we need to verify the cntlid matches the
|
|
|
|
* admin connect
|
|
|
|
*/
|
|
|
|
if (ctrl->cntlid != le16_to_cpu(id->cntlid))
|
|
|
|
ret = -EINVAL;
|
2016-06-13 22:45:28 +08:00
|
|
|
|
|
|
|
if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
|
2017-06-09 22:17:21 +08:00
|
|
|
dev_err(ctrl->device,
|
2016-06-13 22:45:28 +08:00
|
|
|
"keep-alive support is mandatory for fabrics\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
2016-06-13 22:45:26 +08:00
|
|
|
} else {
|
|
|
|
ctrl->cntlid = le16_to_cpu(id->cntlid);
|
2017-05-12 23:16:10 +08:00
|
|
|
ctrl->hmpre = le32_to_cpu(id->hmpre);
|
|
|
|
ctrl->hmmin = le32_to_cpu(id->hmmin);
|
2016-06-13 22:45:26 +08:00
|
|
|
}
|
2016-03-03 01:07:11 +08:00
|
|
|
|
2015-11-28 22:37:52 +08:00
|
|
|
kfree(id);
|
2017-02-23 04:32:36 +08:00
|
|
|
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
if (ctrl->apsta && !prev_apsta)
|
|
|
|
dev_pm_qos_expose_latency_tolerance(ctrl->device);
|
|
|
|
else if (!ctrl->apsta && prev_apsta)
|
|
|
|
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
|
|
|
|
|
|
|
nvme_configure_apst(ctrl);
|
|
|
|
|
2017-02-23 04:32:36 +08:00
|
|
|
ctrl->identified = true;
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
|
2016-06-13 22:45:26 +08:00
|
|
|
return ret;
|
2015-11-28 22:37:52 +08:00
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_init_identify);
|
2015-11-28 22:37:52 +08:00
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static int nvme_dev_open(struct inode *inode, struct file *file)
|
2015-11-26 17:54:19 +08:00
|
|
|
{
|
2015-11-28 22:40:19 +08:00
|
|
|
struct nvme_ctrl *ctrl;
|
|
|
|
int instance = iminor(inode);
|
|
|
|
int ret = -ENODEV;
|
2015-11-26 17:54:19 +08:00
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
|
|
|
|
if (ctrl->instance != instance)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!ctrl->admin_q) {
|
|
|
|
ret = -EWOULDBLOCK;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!kref_get_unless_zero(&ctrl->kref))
|
|
|
|
break;
|
|
|
|
file->private_data = ctrl;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
|
|
|
|
return ret;
|
2015-11-26 17:54:19 +08:00
|
|
|
}
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static int nvme_dev_release(struct inode *inode, struct file *file)
|
2015-11-26 17:54:19 +08:00
|
|
|
{
|
2015-11-28 22:40:19 +08:00
|
|
|
nvme_put_ctrl(file->private_data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-24 22:27:01 +08:00
|
|
|
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
if (list_empty(&ctrl->namespaces)) {
|
|
|
|
ret = -ENOTTY;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
|
|
|
|
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_warn(ctrl->device,
|
2015-12-24 22:27:01 +08:00
|
|
|
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_warn(ctrl->device,
|
2015-12-24 22:27:01 +08:00
|
|
|
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
|
|
|
|
kref_get(&ns->kref);
|
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
|
|
|
|
ret = nvme_user_cmd(ctrl, ns, argp);
|
|
|
|
nvme_put_ns(ns);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = file->private_data;
|
|
|
|
void __user *argp = (void __user *)arg;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case NVME_IOCTL_ADMIN_CMD:
|
|
|
|
return nvme_user_cmd(ctrl, NULL, argp);
|
|
|
|
case NVME_IOCTL_IO_CMD:
|
2015-12-24 22:27:01 +08:00
|
|
|
return nvme_dev_user_cmd(ctrl, argp);
|
2015-11-28 22:40:19 +08:00
|
|
|
case NVME_IOCTL_RESET:
|
2016-02-10 23:51:15 +08:00
|
|
|
dev_warn(ctrl->device, "resetting controller\n");
|
2017-06-15 21:41:08 +08:00
|
|
|
return nvme_reset_ctrl_sync(ctrl);
|
2015-11-28 22:40:19 +08:00
|
|
|
case NVME_IOCTL_SUBSYS_RESET:
|
|
|
|
return nvme_reset_subsystem(ctrl);
|
2016-04-30 05:45:18 +08:00
|
|
|
case NVME_IOCTL_RESCAN:
|
|
|
|
nvme_queue_scan(ctrl);
|
|
|
|
return 0;
|
2015-11-28 22:40:19 +08:00
|
|
|
default:
|
|
|
|
return -ENOTTY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations nvme_dev_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = nvme_dev_open,
|
|
|
|
.release = nvme_dev_release,
|
|
|
|
.unlocked_ioctl = nvme_dev_ioctl,
|
|
|
|
.compat_ioctl = nvme_dev_ioctl,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t nvme_sysfs_reset(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
int ret;
|
|
|
|
|
2017-06-15 21:41:08 +08:00
|
|
|
ret = nvme_reset_ctrl_sync(ctrl);
|
2015-11-28 22:40:19 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
return count;
|
2015-11-26 17:54:19 +08:00
|
|
|
}
|
2015-11-28 22:40:19 +08:00
|
|
|
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
|
2015-11-26 17:54:19 +08:00
|
|
|
|
2016-04-30 05:45:18 +08:00
|
|
|
static ssize_t nvme_sysfs_rescan(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
nvme_queue_scan(ctrl);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
|
|
|
|
|
2016-02-19 00:57:48 +08:00
|
|
|
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
2016-09-16 20:25:08 +08:00
|
|
|
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
2016-02-19 00:57:48 +08:00
|
|
|
struct nvme_ctrl *ctrl = ns->ctrl;
|
|
|
|
int serial_len = sizeof(ctrl->serial);
|
|
|
|
int model_len = sizeof(ctrl->model);
|
|
|
|
|
2017-06-07 17:45:31 +08:00
|
|
|
if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
|
|
|
|
return sprintf(buf, "eui.%16phN\n", ns->nguid);
|
2016-02-19 00:57:48 +08:00
|
|
|
|
|
|
|
if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
|
|
|
|
return sprintf(buf, "eui.%8phN\n", ns->eui);
|
|
|
|
|
|
|
|
while (ctrl->serial[serial_len - 1] == ' ')
|
|
|
|
serial_len--;
|
|
|
|
while (ctrl->model[model_len - 1] == ' ')
|
|
|
|
model_len--;
|
|
|
|
|
|
|
|
return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
|
|
|
|
serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
|
|
|
|
|
2017-06-07 17:45:35 +08:00
|
|
|
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
|
|
|
return sprintf(buf, "%pU\n", ns->nguid);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
|
|
|
|
|
2015-12-23 01:10:45 +08:00
|
|
|
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
2016-09-16 20:25:08 +08:00
|
|
|
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
2017-06-07 17:45:35 +08:00
|
|
|
|
|
|
|
/* For backward compatibility expose the NGUID to userspace if
|
|
|
|
* we have no UUID set
|
|
|
|
*/
|
|
|
|
if (uuid_is_null(&ns->uuid)) {
|
|
|
|
printk_ratelimited(KERN_WARNING
|
|
|
|
"No UUID available providing old NGUID\n");
|
|
|
|
return sprintf(buf, "%pU\n", ns->nguid);
|
|
|
|
}
|
|
|
|
return sprintf(buf, "%pU\n", &ns->uuid);
|
2015-12-23 01:10:45 +08:00
|
|
|
}
|
|
|
|
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
2016-09-16 20:25:08 +08:00
|
|
|
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
2015-12-23 01:10:45 +08:00
|
|
|
return sprintf(buf, "%8phd\n", ns->eui);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
2016-09-16 20:25:08 +08:00
|
|
|
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
2015-12-23 01:10:45 +08:00
|
|
|
return sprintf(buf, "%d\n", ns->ns_id);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
|
|
|
|
|
|
|
|
static struct attribute *nvme_ns_attrs[] = {
|
2016-02-19 00:57:48 +08:00
|
|
|
&dev_attr_wwid.attr,
|
2015-12-23 01:10:45 +08:00
|
|
|
&dev_attr_uuid.attr,
|
2017-06-07 17:45:35 +08:00
|
|
|
&dev_attr_nguid.attr,
|
2015-12-23 01:10:45 +08:00
|
|
|
&dev_attr_eui.attr,
|
|
|
|
&dev_attr_nsid.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2016-06-13 22:45:24 +08:00
|
|
|
static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
|
2015-12-23 01:10:45 +08:00
|
|
|
struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
2016-09-16 20:25:08 +08:00
|
|
|
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
2015-12-23 01:10:45 +08:00
|
|
|
|
|
|
|
if (a == &dev_attr_uuid.attr) {
|
2017-06-07 17:45:35 +08:00
|
|
|
if (uuid_is_null(&ns->uuid) ||
|
|
|
|
!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (a == &dev_attr_nguid.attr) {
|
2017-06-07 17:45:31 +08:00
|
|
|
if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
|
2015-12-23 01:10:45 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (a == &dev_attr_eui.attr) {
|
|
|
|
if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct attribute_group nvme_ns_attr_group = {
|
|
|
|
.attrs = nvme_ns_attrs,
|
2016-06-13 22:45:24 +08:00
|
|
|
.is_visible = nvme_ns_attrs_are_visible,
|
2015-12-23 01:10:45 +08:00
|
|
|
};
|
|
|
|
|
2016-02-27 05:24:19 +08:00
|
|
|
#define nvme_show_str_function(field) \
|
2016-01-13 06:09:31 +08:00
|
|
|
static ssize_t field##_show(struct device *dev, \
|
|
|
|
struct device_attribute *attr, char *buf) \
|
|
|
|
{ \
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
|
|
|
return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
|
|
|
|
} \
|
|
|
|
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
|
|
|
|
2016-02-27 05:24:19 +08:00
|
|
|
#define nvme_show_int_function(field) \
|
|
|
|
static ssize_t field##_show(struct device *dev, \
|
|
|
|
struct device_attribute *attr, char *buf) \
|
|
|
|
{ \
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
|
|
|
|
return sprintf(buf, "%d\n", ctrl->field); \
|
|
|
|
} \
|
|
|
|
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
|
|
|
|
|
|
|
|
nvme_show_str_function(model);
|
|
|
|
nvme_show_str_function(serial);
|
|
|
|
nvme_show_str_function(firmware_rev);
|
|
|
|
nvme_show_int_function(cntlid);
|
2016-01-13 06:09:31 +08:00
|
|
|
|
2016-06-13 22:45:24 +08:00
|
|
|
static ssize_t nvme_sysfs_delete(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
if (device_remove_file_self(dev, attr))
|
|
|
|
ctrl->ops->delete_ctrl(ctrl);
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
|
|
|
|
|
|
|
|
static ssize_t nvme_sysfs_show_transport(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
|
|
|
|
|
2016-11-28 07:47:40 +08:00
|
|
|
static ssize_t nvme_sysfs_show_state(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
static const char *const state_name[] = {
|
|
|
|
[NVME_CTRL_NEW] = "new",
|
|
|
|
[NVME_CTRL_LIVE] = "live",
|
|
|
|
[NVME_CTRL_RESETTING] = "resetting",
|
|
|
|
[NVME_CTRL_RECONNECTING]= "reconnecting",
|
|
|
|
[NVME_CTRL_DELETING] = "deleting",
|
|
|
|
[NVME_CTRL_DEAD] = "dead",
|
|
|
|
};
|
|
|
|
|
|
|
|
if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) &&
|
|
|
|
state_name[ctrl->state])
|
|
|
|
return sprintf(buf, "%s\n", state_name[ctrl->state]);
|
|
|
|
|
|
|
|
return sprintf(buf, "unknown state\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL);
|
|
|
|
|
2016-06-13 22:45:24 +08:00
|
|
|
static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
|
|
|
ctrl->ops->get_subsysnqn(ctrl));
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
|
|
|
|
|
|
|
|
static ssize_t nvme_sysfs_show_address(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE);
|
|
|
|
}
|
|
|
|
static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL);
|
|
|
|
|
2016-01-13 06:09:31 +08:00
|
|
|
static struct attribute *nvme_dev_attrs[] = {
|
|
|
|
&dev_attr_reset_controller.attr,
|
2016-04-30 05:45:18 +08:00
|
|
|
&dev_attr_rescan_controller.attr,
|
2016-01-13 06:09:31 +08:00
|
|
|
&dev_attr_model.attr,
|
|
|
|
&dev_attr_serial.attr,
|
|
|
|
&dev_attr_firmware_rev.attr,
|
2016-02-27 05:24:19 +08:00
|
|
|
&dev_attr_cntlid.attr,
|
2016-06-13 22:45:24 +08:00
|
|
|
&dev_attr_delete_controller.attr,
|
|
|
|
&dev_attr_transport.attr,
|
|
|
|
&dev_attr_subsysnqn.attr,
|
|
|
|
&dev_attr_address.attr,
|
2016-11-28 07:47:40 +08:00
|
|
|
&dev_attr_state.attr,
|
2016-01-13 06:09:31 +08:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2016-06-13 22:45:24 +08:00
|
|
|
#define CHECK_ATTR(ctrl, a, name) \
|
|
|
|
if ((a) == &dev_attr_##name.attr && \
|
|
|
|
!(ctrl)->ops->get_##name) \
|
|
|
|
return 0
|
|
|
|
|
|
|
|
static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
|
|
|
|
struct attribute *a, int n)
|
|
|
|
{
|
|
|
|
struct device *dev = container_of(kobj, struct device, kobj);
|
|
|
|
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
|
|
|
|
|
|
|
if (a == &dev_attr_delete_controller.attr) {
|
|
|
|
if (!ctrl->ops->delete_ctrl)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_ATTR(ctrl, a, subsysnqn);
|
|
|
|
CHECK_ATTR(ctrl, a, address);
|
|
|
|
|
|
|
|
return a->mode;
|
|
|
|
}
|
|
|
|
|
2016-01-13 06:09:31 +08:00
|
|
|
static struct attribute_group nvme_dev_attrs_group = {
|
2016-06-13 22:45:24 +08:00
|
|
|
.attrs = nvme_dev_attrs,
|
|
|
|
.is_visible = nvme_dev_attrs_are_visible,
|
2016-01-13 06:09:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *nvme_dev_attr_groups[] = {
|
|
|
|
&nvme_dev_attrs_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|
|
|
{
|
|
|
|
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
|
|
|
|
struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
|
|
|
|
|
|
|
|
return nsa->ns_id - nsb->ns_id;
|
|
|
|
}
|
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
2015-11-28 22:39:07 +08:00
|
|
|
{
|
2016-07-14 01:45:02 +08:00
|
|
|
struct nvme_ns *ns, *ret = NULL;
|
2015-12-24 22:27:00 +08:00
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
2015-11-28 22:39:07 +08:00
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
2016-07-14 01:45:02 +08:00
|
|
|
if (ns->ns_id == nsid) {
|
|
|
|
kref_get(&ns->kref);
|
|
|
|
ret = ns;
|
|
|
|
break;
|
|
|
|
}
|
2015-11-28 22:39:07 +08:00
|
|
|
if (ns->ns_id > nsid)
|
|
|
|
break;
|
|
|
|
}
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
return ret;
|
2015-11-28 22:39:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
struct gendisk *disk;
|
2016-09-16 20:25:04 +08:00
|
|
|
struct nvme_id_ns *id;
|
|
|
|
char disk_name[DISK_NAME_LEN];
|
2015-11-28 22:39:07 +08:00
|
|
|
int node = dev_to_node(ctrl->dev);
|
|
|
|
|
|
|
|
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
|
|
|
|
if (!ns)
|
|
|
|
return;
|
|
|
|
|
2016-02-25 00:15:53 +08:00
|
|
|
ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
|
|
|
|
if (ns->instance < 0)
|
|
|
|
goto out_free_ns;
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
ns->queue = blk_mq_init_queue(ctrl->tagset);
|
|
|
|
if (IS_ERR(ns->queue))
|
2016-02-25 00:15:53 +08:00
|
|
|
goto out_release_instance;
|
2015-11-28 22:39:07 +08:00
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
|
|
|
|
ns->queue->queuedata = ns;
|
|
|
|
ns->ctrl = ctrl;
|
|
|
|
|
|
|
|
kref_init(&ns->kref);
|
|
|
|
ns->ns_id = nsid;
|
|
|
|
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
|
|
|
|
|
|
|
|
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
|
2016-03-03 01:07:11 +08:00
|
|
|
nvme_set_queue_limits(ctrl, ns->queue);
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2016-09-16 20:25:04 +08:00
|
|
|
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2016-09-16 20:25:04 +08:00
|
|
|
if (nvme_revalidate_ns(ns, &id))
|
|
|
|
goto out_free_queue;
|
|
|
|
|
2016-11-29 05:38:53 +08:00
|
|
|
if (nvme_nvm_ns_supported(ns, id) &&
|
|
|
|
nvme_nvm_register(ns, disk_name, node)) {
|
2017-06-09 22:17:21 +08:00
|
|
|
dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__);
|
2016-11-29 05:38:53 +08:00
|
|
|
goto out_free_id;
|
|
|
|
}
|
2016-09-16 20:25:04 +08:00
|
|
|
|
2016-11-29 05:38:53 +08:00
|
|
|
disk = alloc_disk_node(0, node);
|
|
|
|
if (!disk)
|
|
|
|
goto out_free_id;
|
2016-09-16 20:25:04 +08:00
|
|
|
|
2016-11-29 05:38:53 +08:00
|
|
|
disk->fops = &nvme_fops;
|
|
|
|
disk->private_data = ns;
|
|
|
|
disk->queue = ns->queue;
|
|
|
|
disk->flags = GENHD_FL_EXT_DEVT;
|
|
|
|
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
|
|
|
|
ns->disk = disk;
|
|
|
|
|
|
|
|
__nvme_revalidate_disk(disk, id);
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
list_add_tail(&ns->list, &ctrl->namespaces);
|
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
kref_get(&ctrl->kref);
|
2016-09-16 20:25:04 +08:00
|
|
|
|
|
|
|
kfree(id);
|
|
|
|
|
2016-06-16 10:44:20 +08:00
|
|
|
device_add_disk(ctrl->device, ns->disk);
|
2015-12-23 01:10:45 +08:00
|
|
|
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
|
|
|
|
&nvme_ns_attr_group))
|
|
|
|
pr_warn("%s: failed to create sysfs group for identification\n",
|
|
|
|
ns->disk->disk_name);
|
2016-11-29 05:38:53 +08:00
|
|
|
if (ns->ndev && nvme_nvm_register_sysfs(ns))
|
|
|
|
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
|
|
|
|
ns->disk->disk_name);
|
2015-11-28 22:39:07 +08:00
|
|
|
return;
|
2016-09-16 20:25:04 +08:00
|
|
|
out_free_id:
|
|
|
|
kfree(id);
|
2015-11-28 22:39:07 +08:00
|
|
|
out_free_queue:
|
|
|
|
blk_cleanup_queue(ns->queue);
|
2016-02-25 00:15:53 +08:00
|
|
|
out_release_instance:
|
|
|
|
ida_simple_remove(&ctrl->ns_ida, ns->instance);
|
2015-11-28 22:39:07 +08:00
|
|
|
out_free_ns:
|
|
|
|
kfree(ns);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_ns_remove(struct nvme_ns *ns)
|
|
|
|
{
|
2016-02-25 00:15:54 +08:00
|
|
|
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
|
|
|
|
return;
|
2015-12-24 22:27:00 +08:00
|
|
|
|
2016-09-16 20:25:07 +08:00
|
|
|
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
|
2015-11-28 22:39:07 +08:00
|
|
|
if (blk_get_integrity(ns->disk))
|
|
|
|
blk_integrity_unregister(ns->disk);
|
2015-12-23 01:10:45 +08:00
|
|
|
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
|
|
|
|
&nvme_ns_attr_group);
|
2016-11-29 05:38:53 +08:00
|
|
|
if (ns->ndev)
|
|
|
|
nvme_nvm_unregister_sysfs(ns);
|
2015-11-28 22:39:07 +08:00
|
|
|
del_gendisk(ns->disk);
|
|
|
|
blk_cleanup_queue(ns->queue);
|
|
|
|
}
|
2016-07-14 01:45:02 +08:00
|
|
|
|
|
|
|
mutex_lock(&ns->ctrl->namespaces_mutex);
|
2015-11-28 22:39:07 +08:00
|
|
|
list_del_init(&ns->list);
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_unlock(&ns->ctrl->namespaces_mutex);
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
nvme_put_ns(ns);
|
|
|
|
}
|
|
|
|
|
2015-10-23 05:45:06 +08:00
|
|
|
static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
ns = nvme_find_get_ns(ctrl, nsid);
|
2015-10-23 05:45:06 +08:00
|
|
|
if (ns) {
|
2016-09-16 20:25:07 +08:00
|
|
|
if (ns->disk && revalidate_disk(ns->disk))
|
2015-10-23 05:45:06 +08:00
|
|
|
nvme_ns_remove(ns);
|
2016-07-14 01:45:02 +08:00
|
|
|
nvme_put_ns(ns);
|
2015-10-23 05:45:06 +08:00
|
|
|
} else
|
|
|
|
nvme_alloc_ns(ctrl, nsid);
|
|
|
|
}
|
|
|
|
|
2016-05-27 18:29:43 +08:00
|
|
|
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
|
|
|
unsigned nsid)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns, *next;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
|
|
|
|
if (ns->ns_id > nsid)
|
|
|
|
nvme_ns_remove(ns);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-23 05:45:06 +08:00
|
|
|
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
__le32 *ns_list;
|
|
|
|
unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
ns_list = kzalloc(0x1000, GFP_KERNEL);
|
|
|
|
if (!ns_list)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < num_lists; i++) {
|
|
|
|
ret = nvme_identify_ns_list(ctrl, prev, ns_list);
|
|
|
|
if (ret)
|
2016-05-27 18:29:43 +08:00
|
|
|
goto free;
|
2015-10-23 05:45:06 +08:00
|
|
|
|
|
|
|
for (j = 0; j < min(nn, 1024U); j++) {
|
|
|
|
nsid = le32_to_cpu(ns_list[j]);
|
|
|
|
if (!nsid)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
nvme_validate_ns(ctrl, nsid);
|
|
|
|
|
|
|
|
while (++prev < nsid) {
|
2016-07-14 01:45:02 +08:00
|
|
|
ns = nvme_find_get_ns(ctrl, prev);
|
|
|
|
if (ns) {
|
2015-10-23 05:45:06 +08:00
|
|
|
nvme_ns_remove(ns);
|
2016-07-14 01:45:02 +08:00
|
|
|
nvme_put_ns(ns);
|
|
|
|
}
|
2015-10-23 05:45:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
nn -= j;
|
|
|
|
}
|
|
|
|
out:
|
2016-05-27 18:29:43 +08:00
|
|
|
nvme_remove_invalid_namespaces(ctrl, prev);
|
|
|
|
free:
|
2015-10-23 05:45:06 +08:00
|
|
|
kfree(ns_list);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-04-26 19:51:59 +08:00
|
|
|
static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
|
2015-11-28 22:39:07 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2015-10-23 05:45:06 +08:00
|
|
|
for (i = 1; i <= nn; i++)
|
|
|
|
nvme_validate_ns(ctrl, i);
|
|
|
|
|
2016-05-27 18:29:43 +08:00
|
|
|
nvme_remove_invalid_namespaces(ctrl, nn);
|
2015-11-28 22:39:07 +08:00
|
|
|
}
|
|
|
|
|
2016-04-26 19:51:59 +08:00
|
|
|
static void nvme_scan_work(struct work_struct *work)
|
2015-11-28 22:39:07 +08:00
|
|
|
{
|
2016-04-26 19:51:59 +08:00
|
|
|
struct nvme_ctrl *ctrl =
|
|
|
|
container_of(work, struct nvme_ctrl, scan_work);
|
2015-11-28 22:39:07 +08:00
|
|
|
struct nvme_id_ctrl *id;
|
2015-10-23 05:45:06 +08:00
|
|
|
unsigned nn;
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2016-04-26 19:51:59 +08:00
|
|
|
if (ctrl->state != NVME_CTRL_LIVE)
|
|
|
|
return;
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
if (nvme_identify_ctrl(ctrl, &id))
|
|
|
|
return;
|
2015-10-23 05:45:06 +08:00
|
|
|
|
|
|
|
nn = le32_to_cpu(id->nn);
|
2016-10-19 23:51:05 +08:00
|
|
|
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
|
2015-10-23 05:45:06 +08:00
|
|
|
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
|
|
|
|
if (!nvme_scan_ns_list(ctrl, nn))
|
|
|
|
goto done;
|
|
|
|
}
|
2016-04-26 19:51:59 +08:00
|
|
|
nvme_scan_ns_sequential(ctrl, nn);
|
2015-10-23 05:45:06 +08:00
|
|
|
done:
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
2015-10-23 05:45:06 +08:00
|
|
|
list_sort(NULL, &ctrl->namespaces, ns_cmp);
|
2015-12-24 22:27:00 +08:00
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
2015-11-28 22:39:07 +08:00
|
|
|
kfree(id);
|
|
|
|
}
|
2016-04-26 19:51:59 +08:00
|
|
|
|
|
|
|
void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Do not queue new scan work when a controller is reset during
|
|
|
|
* removal.
|
|
|
|
*/
|
|
|
|
if (ctrl->state == NVME_CTRL_LIVE)
|
2017-05-04 18:33:14 +08:00
|
|
|
queue_work(nvme_wq, &ctrl->scan_work);
|
2016-04-26 19:51:59 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_queue_scan);
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
/*
|
|
|
|
* This function iterates the namespace list unlocked to allow recovery from
|
|
|
|
* controller failure. It is up to the caller to ensure the namespace list is
|
|
|
|
* not modified by scan work while this function is executing.
|
|
|
|
*/
|
2015-11-28 22:39:07 +08:00
|
|
|
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns, *next;
|
|
|
|
|
2016-05-12 22:37:14 +08:00
|
|
|
/*
|
|
|
|
* The dead states indicates the controller was not gracefully
|
|
|
|
* disconnected. In that case, we won't be able to flush any data while
|
|
|
|
* removing the namespaces' disks; fail all the queues now to avoid
|
|
|
|
* potentially having to clean up the failed sync later.
|
|
|
|
*/
|
|
|
|
if (ctrl->state == NVME_CTRL_DEAD)
|
|
|
|
nvme_kill_queues(ctrl);
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
|
|
|
|
nvme_ns_remove(ns);
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
|
2015-11-28 22:39:07 +08:00
|
|
|
|
2016-04-26 19:52:00 +08:00
|
|
|
static void nvme_async_event_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl =
|
|
|
|
container_of(work, struct nvme_ctrl, async_event_work);
|
|
|
|
|
|
|
|
spin_lock_irq(&ctrl->lock);
|
|
|
|
while (ctrl->event_limit > 0) {
|
|
|
|
int aer_idx = --ctrl->event_limit;
|
|
|
|
|
|
|
|
spin_unlock_irq(&ctrl->lock);
|
|
|
|
ctrl->ops->submit_async_event(ctrl, aer_idx);
|
|
|
|
spin_lock_irq(&ctrl->lock);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&ctrl->lock);
|
|
|
|
}
|
|
|
|
|
2016-11-10 23:32:34 +08:00
|
|
|
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
|
|
|
|
union nvme_result *res)
|
2016-04-26 19:52:00 +08:00
|
|
|
{
|
2016-11-10 23:32:34 +08:00
|
|
|
u32 result = le32_to_cpu(res->u32);
|
|
|
|
bool done = true;
|
2016-04-26 19:52:00 +08:00
|
|
|
|
2016-11-10 23:32:34 +08:00
|
|
|
switch (le16_to_cpu(status) >> 1) {
|
|
|
|
case NVME_SC_SUCCESS:
|
|
|
|
done = false;
|
|
|
|
/*FALLTHRU*/
|
|
|
|
case NVME_SC_ABORT_REQ:
|
2016-04-26 19:52:00 +08:00
|
|
|
++ctrl->event_limit;
|
2017-05-04 18:33:14 +08:00
|
|
|
queue_work(nvme_wq, &ctrl->async_event_work);
|
2016-11-10 23:32:34 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2016-04-26 19:52:00 +08:00
|
|
|
}
|
|
|
|
|
2016-11-10 23:32:34 +08:00
|
|
|
if (done)
|
2016-04-26 19:52:00 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
switch (result & 0xff07) {
|
|
|
|
case NVME_AER_NOTICE_NS_CHANGED:
|
|
|
|
dev_info(ctrl->device, "rescanning\n");
|
|
|
|
nvme_queue_scan(ctrl);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_warn(ctrl->device, "async event result %08x\n", result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
|
|
|
|
|
|
|
|
void nvme_queue_async_events(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
ctrl->event_limit = NVME_NR_AERS;
|
2017-05-04 18:33:14 +08:00
|
|
|
queue_work(nvme_wq, &ctrl->async_event_work);
|
2016-04-26 19:52:00 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_queue_async_events);
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
static DEFINE_IDA(nvme_instance_ida);
|
|
|
|
|
|
|
|
static int nvme_set_instance(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
int instance, error;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
error = ida_get_new(&nvme_instance_ida, &instance);
|
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
} while (error == -EAGAIN);
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
ctrl->instance = instance;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nvme_release_instance(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
ida_remove(&nvme_instance_ida, ctrl->instance);
|
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:41:02 +08:00
|
|
|
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
2016-02-11 02:03:32 +08:00
|
|
|
{
|
2016-04-26 19:52:00 +08:00
|
|
|
flush_work(&ctrl->async_event_work);
|
2016-04-26 19:51:59 +08:00
|
|
|
flush_work(&ctrl->scan_work);
|
|
|
|
nvme_remove_namespaces(ctrl);
|
|
|
|
|
2015-11-28 22:41:02 +08:00
|
|
|
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
|
2015-11-28 22:40:19 +08:00
|
|
|
|
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
list_del(&ctrl->node);
|
|
|
|
spin_unlock(&dev_list_lock);
|
2015-11-28 22:41:02 +08:00
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
|
2015-11-28 22:41:02 +08:00
|
|
|
|
|
|
|
static void nvme_free_ctrl(struct kref *kref)
|
|
|
|
{
|
|
|
|
struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
|
2015-11-28 22:40:19 +08:00
|
|
|
|
|
|
|
put_device(ctrl->device);
|
|
|
|
nvme_release_instance(ctrl);
|
2016-02-25 00:15:53 +08:00
|
|
|
ida_destroy(&ctrl->ns_ida);
|
2015-11-28 22:40:19 +08:00
|
|
|
|
|
|
|
ctrl->ops->free_ctrl(ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nvme_put_ctrl(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
kref_put(&ctrl->kref, nvme_free_ctrl);
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_put_ctrl);
|
2015-11-28 22:40:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize a NVMe controller structures. This needs to be called during
|
|
|
|
* earliest initialization so that we have the initialized structured around
|
|
|
|
* during probing.
|
|
|
|
*/
|
|
|
|
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|
|
|
const struct nvme_ctrl_ops *ops, unsigned long quirks)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2016-04-26 19:51:57 +08:00
|
|
|
ctrl->state = NVME_CTRL_NEW;
|
|
|
|
spin_lock_init(&ctrl->lock);
|
2015-11-28 22:40:19 +08:00
|
|
|
INIT_LIST_HEAD(&ctrl->namespaces);
|
2015-12-24 22:27:00 +08:00
|
|
|
mutex_init(&ctrl->namespaces_mutex);
|
2015-11-28 22:40:19 +08:00
|
|
|
kref_init(&ctrl->kref);
|
|
|
|
ctrl->dev = dev;
|
|
|
|
ctrl->ops = ops;
|
|
|
|
ctrl->quirks = quirks;
|
2016-04-26 19:51:59 +08:00
|
|
|
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
|
2016-04-26 19:52:00 +08:00
|
|
|
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
|
2015-11-28 22:40:19 +08:00
|
|
|
|
|
|
|
ret = nvme_set_instance(ctrl);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2016-01-13 06:09:31 +08:00
|
|
|
ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
|
2015-11-28 22:40:19 +08:00
|
|
|
MKDEV(nvme_char_major, ctrl->instance),
|
2016-02-10 03:44:03 +08:00
|
|
|
ctrl, nvme_dev_attr_groups,
|
2016-01-13 06:09:31 +08:00
|
|
|
"nvme%d", ctrl->instance);
|
2015-11-28 22:40:19 +08:00
|
|
|
if (IS_ERR(ctrl->device)) {
|
|
|
|
ret = PTR_ERR(ctrl->device);
|
|
|
|
goto out_release_instance;
|
|
|
|
}
|
|
|
|
get_device(ctrl->device);
|
2016-02-25 00:15:53 +08:00
|
|
|
ida_init(&ctrl->ns_ida);
|
2015-11-28 22:40:19 +08:00
|
|
|
|
|
|
|
spin_lock(&dev_list_lock);
|
|
|
|
list_add_tail(&ctrl->node, &nvme_ctrl_list);
|
|
|
|
spin_unlock(&dev_list_lock);
|
|
|
|
|
nvme: Enable autonomous power state transitions
NVMe devices can advertise multiple power states. These states can
be either "operational" (the device is fully functional but possibly
slow) or "non-operational" (the device is asleep until woken up).
Some devices can automatically enter a non-operational state when
idle for a specified amount of time and then automatically wake back
up when needed.
The hardware configuration is a table. For each state, an entry in
the table indicates the next deeper non-operational state, if any,
to autonomously transition to and the idle time required before
transitioning.
This patch teaches the driver to program APST so that each successive
non-operational state will be entered after an idle time equal to 100%
of the total latency (entry plus exit) associated with that state.
The maximum acceptable latency is controlled using dev_pm_qos
(e.g. power/pm_qos_latency_tolerance_us in sysfs); non-operational
states with total latency greater than this value will not be used.
As a special case, setting the latency tolerance to 0 will disable
APST entirely. On hardware without APST support, the sysfs file will
not be exposed.
The latency tolerance for newly-probed devices is set by the module
parameter nvme_core.default_ps_max_latency_us.
In theory, the device can expose "default" APST table, but this
doesn't seem to function correctly on my device (Samsung 950), nor
does it seem particularly useful. There is also an optional
mechanism by which a configuration can be "saved" so it will be
automatically loaded on reset. This can be configured from
userspace, but it doesn't seem useful to support in the driver.
On my laptop, enabling APST seems to save nearly 1W.
The hardware tables can be decoded in userspace with nvme-cli.
'nvme id-ctrl /dev/nvmeN' will show the power state table and
'nvme get-feature -f 0x0c -H /dev/nvme0' will show the current APST
configuration.
This feature is quirked off on a known-buggy Samsung device.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2017-02-08 02:08:45 +08:00
|
|
|
/*
|
|
|
|
* Initialize latency tolerance controls. The sysfs files won't
|
|
|
|
* be visible to userspace unless the device actually supports APST.
|
|
|
|
*/
|
|
|
|
ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance;
|
|
|
|
dev_pm_qos_update_user_latency_tolerance(ctrl->device,
|
|
|
|
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
return 0;
|
|
|
|
out_release_instance:
|
|
|
|
nvme_release_instance(ctrl);
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
|
2015-11-28 22:40:19 +08:00
|
|
|
|
2016-02-25 00:15:56 +08:00
|
|
|
/**
|
|
|
|
* nvme_kill_queues(): Ends all namespace queues
|
|
|
|
* @ctrl: the dead controller that needs to end
|
|
|
|
*
|
|
|
|
* Call this function when the driver determines it is unable to get the
|
|
|
|
* controller in a state capable of servicing IO.
|
|
|
|
*/
|
|
|
|
void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
2017-06-02 16:32:08 +08:00
|
|
|
|
2017-06-19 10:21:08 +08:00
|
|
|
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
|
|
|
blk_mq_unquiesce_queue(ctrl->admin_q);
|
|
|
|
|
2017-06-02 16:32:08 +08:00
|
|
|
/* Forcibly start all queues to avoid having stuck requests */
|
|
|
|
blk_mq_start_hw_queues(ctrl->admin_q);
|
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
2016-02-25 00:15:56 +08:00
|
|
|
/*
|
|
|
|
* Revalidating a dead namespace sets capacity to 0. This will
|
|
|
|
* end buffered writers dirtying pages that can't be synced.
|
|
|
|
*/
|
2017-02-11 07:15:51 +08:00
|
|
|
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
|
|
|
|
continue;
|
|
|
|
revalidate_disk(ns->disk);
|
2016-02-25 00:15:56 +08:00
|
|
|
blk_set_queue_dying(ns->queue);
|
2017-05-22 23:05:03 +08:00
|
|
|
|
2017-06-19 10:21:08 +08:00
|
|
|
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
|
|
|
blk_mq_unquiesce_queue(ns->queue);
|
|
|
|
|
2017-05-22 23:05:03 +08:00
|
|
|
/*
|
|
|
|
* Forcibly start all queues to avoid having stuck requests.
|
|
|
|
* Note that we must ensure the queues are not stopped
|
|
|
|
* when the final removal happens.
|
|
|
|
*/
|
|
|
|
blk_mq_start_hw_queues(ns->queue);
|
2017-05-22 23:05:04 +08:00
|
|
|
|
|
|
|
/* draining requests in requeue list */
|
|
|
|
blk_mq_kick_requeue_list(ns->queue);
|
2016-02-25 00:15:56 +08:00
|
|
|
}
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
2016-02-25 00:15:56 +08:00
|
|
|
}
|
Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"This is the block driver pull request for this merge window. It sits
on top of for-4.6/core, that was just sent out.
This contains:
- A set of fixes for lightnvm. One from Alan, fixing an overflow,
and the rest from the usual suspects, Javier and Matias.
- A set of fixes for nbd from Markus and Dan, and a fixup from Arnd
for correct usage of the signed 64-bit divider.
- A set of bug fixes for the Micron mtip32xx, from Asai.
- A fix for the brd discard handling from Bart.
- Update the maintainers entry for cciss, since that hardware has
transferred ownership.
- Three bug fixes for bcache from Eric Wheeler.
- Set of fixes for xen-blk{back,front} from Jan and Konrad.
- Removal of the cpqarray driver. It has been disabled in Kconfig
since 2013, and we were initially scheduled to remove it in 3.15.
- Various updates and fixes for NVMe, with the most important being:
- Removal of the per-device NVMe thread, replacing that with a
watchdog timer instead. From Christoph.
- Exposing the namespace WWID through sysfs, from Keith.
- Set of cleanups from Ming Lin.
- Logging the controller device name instead of the underlying
PCI device name, from Sagi.
- And a bunch of fixes and optimizations from the usual suspects
in this area"
* 'for-4.6/drivers' of git://git.kernel.dk/linux-block: (49 commits)
NVMe: Expose ns wwid through single sysfs entry
drivers:block: cpqarray clean up
brd: Fix discard request processing
cpqarray: remove it from the kernel
cciss: update MAINTAINERS
NVMe: Remove unused sq_head read in completion path
bcache: fix cache_set_flush() NULL pointer dereference on OOM
bcache: cleaned up error handling around register_cache()
bcache: fix race of writeback thread starting before complete initialization
NVMe: Create discard zero quirk white list
nbd: use correct div_s64 helper
mtip32xx: remove unneeded variable in mtip_cmd_timeout()
lightnvm: generalize rrpc ppa calculations
lightnvm: remove struct nvm_dev->total_blocks
lightnvm: rename ->nr_pages to ->nr_sects
lightnvm: update closed list outside of intr context
xen/blback: Fit the important information of the thread in 17 characters
lightnvm: fold get bb tbl when using dual/quad plane mode
lightnvm: fix up nonsensical configure overrun checking
xen-blkback: advertise indirect segment support earlier
...
2016-03-19 08:13:31 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_kill_queues);
|
2016-02-25 00:15:56 +08:00
|
|
|
|
2017-03-02 03:22:12 +08:00
|
|
|
void nvme_unfreeze(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
|
|
|
blk_mq_unfreeze_queue(ns->queue);
|
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_unfreeze);
|
|
|
|
|
|
|
|
void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
|
|
|
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
|
|
|
|
if (timeout <= 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
|
|
|
|
|
|
|
|
void nvme_wait_freeze(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
|
|
|
blk_mq_freeze_queue_wait(ns->queue);
|
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
|
|
|
|
|
|
|
|
void nvme_start_freeze(struct nvme_ctrl *ctrl)
|
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
2017-03-27 20:06:57 +08:00
|
|
|
blk_freeze_queue_start(ns->queue);
|
2017-03-02 03:22:12 +08:00
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(nvme_start_freeze);
|
|
|
|
|
2016-01-05 00:10:57 +08:00
|
|
|
void nvme_stop_queues(struct nvme_ctrl *ctrl)
|
2015-12-24 22:26:59 +08:00
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
2016-10-29 08:23:40 +08:00
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list)
|
2016-10-29 08:23:19 +08:00
|
|
|
blk_mq_quiesce_queue(ns->queue);
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
2015-12-24 22:26:59 +08:00
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_stop_queues);
|
2015-12-24 22:26:59 +08:00
|
|
|
|
2016-01-05 00:10:57 +08:00
|
|
|
void nvme_start_queues(struct nvme_ctrl *ctrl)
|
2015-12-24 22:26:59 +08:00
|
|
|
{
|
|
|
|
struct nvme_ns *ns;
|
|
|
|
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_lock(&ctrl->namespaces_mutex);
|
|
|
|
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
2017-06-06 23:22:04 +08:00
|
|
|
blk_mq_unquiesce_queue(ns->queue);
|
2015-12-24 22:26:59 +08:00
|
|
|
blk_mq_kick_requeue_list(ns->queue);
|
|
|
|
}
|
2016-07-14 01:45:02 +08:00
|
|
|
mutex_unlock(&ctrl->namespaces_mutex);
|
2015-12-24 22:26:59 +08:00
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(nvme_start_queues);
|
2015-12-24 22:26:59 +08:00
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
int __init nvme_core_init(void)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
2017-06-08 02:31:55 +08:00
|
|
|
nvme_wq = alloc_workqueue("nvme-wq",
|
|
|
|
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
|
|
|
if (!nvme_wq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-11-28 22:40:19 +08:00
|
|
|
result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
|
|
|
|
&nvme_dev_fops);
|
|
|
|
if (result < 0)
|
2017-06-08 02:31:55 +08:00
|
|
|
goto destroy_wq;
|
2015-11-28 22:40:19 +08:00
|
|
|
else if (result > 0)
|
|
|
|
nvme_char_major = result;
|
|
|
|
|
|
|
|
nvme_class = class_create(THIS_MODULE, "nvme");
|
|
|
|
if (IS_ERR(nvme_class)) {
|
|
|
|
result = PTR_ERR(nvme_class);
|
|
|
|
goto unregister_chrdev;
|
|
|
|
}
|
|
|
|
|
2015-11-28 22:39:07 +08:00
|
|
|
return 0;
|
2015-11-28 22:40:19 +08:00
|
|
|
|
2017-06-08 02:31:55 +08:00
|
|
|
unregister_chrdev:
|
2015-11-28 22:40:19 +08:00
|
|
|
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
|
2017-06-08 02:31:55 +08:00
|
|
|
destroy_wq:
|
|
|
|
destroy_workqueue(nvme_wq);
|
2015-11-28 22:40:19 +08:00
|
|
|
return result;
|
2015-11-28 22:39:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void nvme_core_exit(void)
|
|
|
|
{
|
2015-11-28 22:40:19 +08:00
|
|
|
class_destroy(nvme_class);
|
|
|
|
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
|
2017-06-08 02:31:55 +08:00
|
|
|
destroy_workqueue(nvme_wq);
|
2015-11-28 22:39:07 +08:00
|
|
|
}
|
2016-02-11 02:03:32 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_VERSION("1.0");
|
|
|
|
module_init(nvme_core_init);
|
|
|
|
module_exit(nvme_core_exit);
|