Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block

Pull NVMe updates from Jens Axboe:
 "Last branch for this series is the nvme changes.  It's in a separate
  branch to avoid splitting too much between core and NVMe changes,
  since NVMe is still helping drive some blk-mq changes.  That said, not
  a huge amount of core changes in here.  The grunt of the work is the
  continued split of the code"

* 'for-4.5/nvme' of git://git.kernel.dk/linux-block: (67 commits)
  uapi: update install list after nvme.h rename
  NVMe: Export NVMe attributes to sysfs group
  NVMe: Shutdown controller only for power-off
  NVMe: IO queue deletion re-write
  NVMe: Remove queue freezing on resets
  NVMe: Use a retryable error code on reset
  NVMe: Fix admin queue ring wrap
  nvme: make SG_IO support optional
  nvme: fixes for NVME_IOCTL_IO_CMD on the char device
  nvme: synchronize access to ctrl->namespaces
  nvme: Move nvme_freeze/unfreeze_queues to nvme core
  PCI/AER: include header file
  NVMe: Export namespace attributes to sysfs
  NVMe: Add pci error handlers
  block: remove REQ_NO_TIMEOUT flag
  nvme: merge iod and cmd_info
  nvme: meta_sg doesn't have to be an array
  nvme: properly free resources for cancelled command
  nvme: simplify completion handling
  nvme: special case AEN requests
  ...
This commit is contained in:
Linus Torvalds 2016-01-21 19:58:02 -08:00
commit 3e1e21c7bf
19 changed files with 2620 additions and 2221 deletions

View File

@ -66,7 +66,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
}
if (unlikely(!bip))
return NULL;
return ERR_PTR(-ENOMEM);
memset(bip, 0, sizeof(*bip));
@ -89,7 +89,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
return bip;
err:
mempool_free(bip, bs->bio_integrity_pool);
return NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(bio_integrity_alloc);
@ -298,10 +298,10 @@ int bio_integrity_prep(struct bio *bio)
/* Allocate bio integrity payload and integrity vectors */
bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
if (unlikely(bip == NULL)) {
if (IS_ERR(bip)) {
printk(KERN_ERR "could not allocate data integrity bioset\n");
kfree(buf);
return -EIO;
return PTR_ERR(bip);
}
bip->bip_flags |= BIP_BLOCK_INTEGRITY;
@ -465,9 +465,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
BUG_ON(bip_src == NULL);
bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
if (bip == NULL)
return -EIO;
if (IS_ERR(bip))
return PTR_ERR(bip);
memcpy(bip->bip_vec, bip_src->bip_vec,
bip_src->bip_vcnt * sizeof(struct bio_vec));

View File

@ -680,6 +680,13 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
static void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
kblockd_schedule_work(&q->timeout_work);
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
@ -841,6 +848,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail;
INIT_WORK(&q->timeout_work, blk_timeout_work);
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;

View File

@ -603,8 +603,6 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
blk_mq_complete_request(rq, -EIO);
return;
}
if (rq->cmd_flags & REQ_NO_TIMEOUT)
return;
if (time_after_eq(jiffies, rq->deadline)) {
if (!blk_mark_rq_complete(rq))
@ -615,15 +613,19 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
}
}
static void blk_mq_rq_timer(unsigned long priv)
static void blk_mq_timeout_work(struct work_struct *work)
{
struct request_queue *q = (struct request_queue *)priv;
struct request_queue *q =
container_of(work, struct request_queue, timeout_work);
struct blk_mq_timeout_data data = {
.next = 0,
.next_set = 0,
};
int i;
if (blk_queue_enter(q, true))
return;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
if (data.next_set) {
@ -638,6 +640,7 @@ static void blk_mq_rq_timer(unsigned long priv)
blk_mq_tag_idle(hctx);
}
}
blk_queue_exit(q);
}
/*
@ -2008,7 +2011,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
hctxs[i]->queue_num = i;
}
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids;

View File

@ -127,13 +127,16 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
}
}
void blk_rq_timed_out_timer(unsigned long data)
void blk_timeout_work(struct work_struct *work)
{
struct request_queue *q = (struct request_queue *) data;
struct request_queue *q =
container_of(work, struct request_queue, timeout_work);
unsigned long flags, next = 0;
struct request *rq, *tmp;
int next_set = 0;
if (blk_queue_enter(q, true))
return;
spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
@ -143,6 +146,7 @@ void blk_rq_timed_out_timer(unsigned long data)
mod_timer(&q->timeout, round_jiffies_up(next));
spin_unlock_irqrestore(q->queue_lock, flags);
blk_queue_exit(q);
}
/**
@ -193,9 +197,6 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q;
unsigned long expiry;
if (req->cmd_flags & REQ_NO_TIMEOUT)
return;
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn)
return;

View File

@ -93,7 +93,7 @@ static inline void blk_flush_integrity(void)
}
#endif
void blk_rq_timed_out_timer(unsigned long data);
void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
void blk_delete_timer(struct request *);

View File

@ -8,3 +8,14 @@ config BLK_DEV_NVME
To compile this driver as a module, choose M here: the
module will be called nvme.
config BLK_DEV_NVME_SCSI
bool "SCSI emulation for NVMe device nodes"
depends on BLK_DEV_NVME
---help---
This adds support for the SG_IO ioctl on the NVMe character
and block devices nodes, as well a a translation for a small
number of selected SCSI commands to NVMe commands to the NVMe
driver. If you don't know what this means you probably want
to say N here, and if you know what it means you probably
want to say N as well.

View File

@ -2,4 +2,5 @@
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
lightnvm-$(CONFIG_NVM) := lightnvm.o
nvme-y += pci.o scsi.o $(lightnvm-y)
nvme-y += core.o pci.o $(lightnvm-y)
nvme-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o

1472
drivers/nvme/host/core.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -294,7 +294,6 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_id *nvme_nvm_id;
struct nvme_nvm_command c = {};
int ret;
@ -307,7 +306,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
if (!nvme_nvm_id)
return -ENOMEM;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
nvme_nvm_id, sizeof(struct nvme_nvm_id));
if (ret) {
ret = -EIO;
@ -332,9 +331,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
nvm_l2p_update_fn *update_l2p, void *priv)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
u32 nlb_pr_rq = len / sizeof(u64);
u64 cmd_slba = slba;
void *entries;
@ -352,10 +350,10 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
c.l2p.slba = cpu_to_le64(cmd_slba);
c.l2p.nlb = cpu_to_le32(cmd_nlb);
ret = nvme_submit_sync_cmd(dev->admin_q,
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
(struct nvme_command *)&c, entries, len);
if (ret) {
dev_err(dev->dev, "L2P table transfer failed (%d)\n",
dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
ret);
ret = -EIO;
goto out;
@ -381,7 +379,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
@ -395,30 +393,30 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
ret = -EIO;
goto out;
}
if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
dev_err(dev->dev, "bbt format mismatch\n");
dev_err(ctrl->dev, "bbt format mismatch\n");
ret = -EINVAL;
goto out;
}
if (le16_to_cpu(bb_tbl->verid) != 1) {
ret = -EINVAL;
dev_err(dev->dev, "bbt version not supported\n");
dev_err(ctrl->dev, "bbt version not supported\n");
goto out;
}
if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
ret = -EINVAL;
dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
le32_to_cpu(bb_tbl->tblks), nr_blocks);
goto out;
}
@ -434,7 +432,6 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
int type)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
struct nvme_nvm_command c = {};
int ret = 0;
@ -444,10 +441,10 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
c.set_bb.value = type;
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
NULL, 0);
if (ret)
dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
return ret;
}
@ -532,9 +529,8 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_dev *dev = ns->dev;
return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
}
static void nvme_nvm_destroy_dma_pool(void *pool)
@ -592,8 +588,9 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
{
struct nvme_dev *dev = ns->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct nvme_ctrl *ctrl = ns->ctrl;
/* XXX: this is poking into PCI structures from generic code! */
struct pci_dev *pdev = to_pci_dev(ctrl->dev);
/* QEMU NVMe simulator - PCI ID + Vendor specific bit */
if (pdev->vendor == PCI_VENDOR_ID_CNEX &&

View File

@ -19,58 +19,77 @@
#include <linux/kref.h>
#include <linux/blk-mq.h>
enum {
/*
* Driver internal status code for commands that were cancelled due
* to timeouts or controller shutdown. The value is negative so
* that it a) doesn't overlap with the unsigned hardware error codes,
* and b) can easily be tested for.
*/
NVME_SC_CANCELLED = -EINTR,
};
extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
extern unsigned char admin_timeout;
#define ADMIN_TIMEOUT (admin_timeout * HZ)
extern unsigned char shutdown_timeout;
#define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ)
enum {
NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1,
};
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
* List of workarounds for devices that required behavior not specified in
* the standard.
*/
struct nvme_dev {
struct list_head node;
struct nvme_queue **queues;
enum nvme_quirks {
/*
* Prefers I/O aligned to a stripe size specified in a vendor
* specific Identify field.
*/
NVME_QUIRK_STRIPE_SIZE = (1 << 0),
/*
* The controller doesn't handle Identify value others than 0 or 1
* correctly.
*/
NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
};
struct nvme_ctrl {
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev;
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool;
int instance;
unsigned queue_count;
unsigned online_queues;
unsigned max_qid;
int q_depth;
u32 db_stride;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
struct list_head namespaces;
struct kref kref;
struct device *device;
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
int instance;
struct blk_mq_tag_set *tagset;
struct list_head namespaces;
struct mutex namespaces_mutex;
struct device *device; /* char device */
struct list_head node;
char name[12];
char serial[20];
char model[40];
char firmware_rev[8];
bool subsystem;
u32 ctrl_config;
u32 page_size;
u32 max_hw_sectors;
u32 stripe_size;
u32 page_size;
void __iomem *cmb;
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
u16 oncs;
u16 abort_limit;
atomic_t abort_limit;
u8 event_limit;
u8 vwc;
u32 vs;
bool subsystem;
unsigned long quirks;
};
/*
@ -79,11 +98,14 @@ struct nvme_dev {
struct nvme_ns {
struct list_head list;
struct nvme_dev *dev;
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
struct kref kref;
u8 eui[8];
u8 uuid[16];
unsigned ns_id;
int lba_shift;
u16 ms;
@ -94,41 +116,156 @@ struct nvme_ns {
u32 mode_select_block_len;
};
/*
* The nvme_iod describes the data in an I/O, including the list of PRP
* entries. You can't see it in this data structure because C doesn't let
* me express that. Use nvme_alloc_iod to ensure there's enough space
* allocated to store the PRP list.
*/
struct nvme_iod {
unsigned long private; /* For the use of the submitter of the I/O */
int npages; /* In the PRP list. 0 means small pool in use */
int offset; /* Of PRP list */
int nents; /* Used in scatterlist */
int length; /* Of data, in bytes */
dma_addr_t first_dma;
struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
struct scatterlist sg[0];
struct nvme_ctrl_ops {
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
bool (*io_incapable)(struct nvme_ctrl *ctrl);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
{
u32 val = 0;
if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
return false;
return val & NVME_CSTS_RDY;
}
static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
{
u32 val = 0;
if (ctrl->ops->io_incapable(ctrl))
return false;
if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
return false;
return val & NVME_CSTS_CFS;
}
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsystem)
return -ENOTTY;
return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
}
static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
{
return (sector >> (ns->lba_shift - 9));
}
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->ns_id);
}
static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd)
{
u16 control = 0;
u32 dsmgmt = 0;
if (req->cmd_flags & REQ_FUA)
control |= NVME_RW_FUA;
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
control |= NVME_RW_LR;
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
if (ns->ms) {
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
control |= NVME_RW_PRINFO_PRCHK_GUARD |
NVME_RW_PRINFO_PRCHK_REF;
cmnd->rw.reftag = cpu_to_le32(
nvme_block_nr(ns, blk_rq_pos(req)));
break;
}
if (!blk_integrity_rq(req))
control |= NVME_RW_PRINFO_PRACT;
}
cmnd->rw.control = cpu_to_le16(control);
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
}
static inline int nvme_error_status(u16 status)
{
switch (status & 0x7ff) {
case NVME_SC_SUCCESS:
return 0;
case NVME_SC_CAP_EXCEEDED:
return -ENOSPC;
default:
return -EIO;
}
}
static inline bool nvme_req_needs_retry(struct request *req, u16 status)
{
return !(status & NVME_SC_DNR || blk_noretry_request(req)) &&
(jiffies - req->start_time) < req->timeout;
}
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
const struct nvme_ctrl_ops *ops, unsigned long quirks);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags);
void nvme_requeue_req(struct request *req);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, void __user *ubuffer, unsigned bufflen,
void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u32 *result, unsigned timeout);
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id);
int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid,
struct nvme_id_ns **id);
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log);
int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result);
int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
extern spinlock_t dev_list_lock;
struct sg_io_hdr;
@ -154,4 +291,7 @@ static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *i
}
#endif /* CONFIG_NVM */
int __init nvme_core_init(void);
void nvme_core_exit(void);
#endif /* _NVME_H */

File diff suppressed because it is too large Load Diff

View File

@ -524,7 +524,7 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
struct sg_io_hdr *hdr, u8 *inq_response,
int alloc_len)
{
struct nvme_dev *dev = ns->dev;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ns *id_ns;
int res;
int nvme_sc;
@ -532,10 +532,10 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
u8 resp_data_format = 0x02;
u8 protect;
u8 cmdque = 0x01 << 1;
u8 fw_offset = sizeof(dev->firmware_rev);
u8 fw_offset = sizeof(ctrl->firmware_rev);
/* nvme ns identify - use DPS value for PROTECT field */
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -553,12 +553,12 @@ static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */
strncpy(&inq_response[8], "NVMe ", 8);
strncpy(&inq_response[16], dev->model, 16);
strncpy(&inq_response[16], ctrl->model, 16);
while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
while (ctrl->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
fw_offset--;
fw_offset -= 4;
strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
strncpy(&inq_response[32], ctrl->firmware_rev + fw_offset, 4);
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
@ -588,82 +588,113 @@ static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
struct sg_io_hdr *hdr, u8 *inq_response,
int alloc_len)
{
struct nvme_dev *dev = ns->dev;
int xfer_len;
memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */
strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
strncpy(&inq_response[4], ns->ctrl->serial, INQ_SERIAL_NUMBER_LENGTH);
xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
}
static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *inq_response, int alloc_len)
{
struct nvme_dev *dev = ns->dev;
int res;
int nvme_sc;
int xfer_len;
__be32 tmp_id = cpu_to_be32(ns->ns_id);
memset(inq_response, 0, alloc_len);
inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */
if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
struct nvme_id_ns *id_ns;
int nvme_sc, res;
size_t len;
void *eui;
int len;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
eui = id_ns->eui64;
len = sizeof(id_ns->eui64);
if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
if (ns->ctrl->vs >= NVME_VS(1, 2)) {
if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid;
len = sizeof(id_ns->nguid);
}
}
if (bitmap_empty(eui, len * 8)) {
kfree(id_ns);
goto scsi_string;
res = -EOPNOTSUPP;
goto out_free_id;
}
memset(inq_response, 0, alloc_len);
inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
inq_response[3] = 4 + len; /* Page Length */
/* Designation Descriptor start */
inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */
inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */
inq_response[6] = 0x00; /* Rsvd */
inq_response[7] = len; /* Designator Length */
memcpy(&inq_response[8], eui, len);
res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
out_free_id:
kfree(id_ns);
} else {
scsi_string:
return res;
}
static int nvme_fill_device_id_scsi_string(struct nvme_ns *ns,
struct sg_io_hdr *hdr, u8 *inq_response, int alloc_len)
{
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ctrl *id_ctrl;
int nvme_sc, res;
if (alloc_len < 72) {
return nvme_trans_completion(hdr,
SAM_STAT_CHECK_CONDITION,
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
}
nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
memset(inq_response, 0, alloc_len);
inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
inq_response[3] = 0x48; /* Page Length */
/* Designation Descriptor start */
inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */
inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */
inq_response[6] = 0x00; /* Rsvd */
inq_response[7] = 0x44; /* Designator Length */
sprintf(&inq_response[8], "%04x", to_pci_dev(dev->dev)->vendor);
memcpy(&inq_response[12], dev->model, sizeof(dev->model));
sprintf(&inq_response[52], "%04x", tmp_id);
memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
sprintf(&inq_response[8], "%04x", le16_to_cpu(id_ctrl->vid));
memcpy(&inq_response[12], ctrl->model, sizeof(ctrl->model));
sprintf(&inq_response[52], "%04x", cpu_to_be32(ns->ns_id));
memcpy(&inq_response[56], ctrl->serial, sizeof(ctrl->serial));
res = nvme_trans_copy_to_user(hdr, inq_response, alloc_len);
kfree(id_ctrl);
return res;
}
static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *resp, int alloc_len)
{
int res;
if (ns->ctrl->vs >= NVME_VS(1, 1)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
if (res != -EOPNOTSUPP)
return res;
}
xfer_len = alloc_len;
return nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
return nvme_fill_device_id_scsi_string(ns, hdr, resp, alloc_len);
}
static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
@ -672,7 +703,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *inq_response;
int res;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ctrl *id_ctrl;
struct nvme_id_ns *id_ns;
int xfer_len;
@ -688,7 +719,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (inq_response == NULL)
return -ENOMEM;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
goto out_free_inq;
@ -704,7 +735,7 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
app_chk = protect << 1;
ref_chk = protect;
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
nvme_sc = nvme_identify_ctrl(ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
goto out_free_inq;
@ -815,7 +846,6 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
int res;
int xfer_len;
u8 *log_response;
struct nvme_dev *dev = ns->dev;
struct nvme_smart_log *smart_log;
u8 temp_c;
u16 temp_k;
@ -824,7 +854,7 @@ static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
if (log_response == NULL)
return -ENOMEM;
res = nvme_get_log_page(dev, &smart_log);
res = nvme_get_log_page(ns->ctrl, &smart_log);
if (res < 0)
goto out_free_response;
@ -862,7 +892,6 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int res;
int xfer_len;
u8 *log_response;
struct nvme_dev *dev = ns->dev;
struct nvme_smart_log *smart_log;
u32 feature_resp;
u8 temp_c_cur, temp_c_thresh;
@ -872,7 +901,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (log_response == NULL)
return -ENOMEM;
res = nvme_get_log_page(dev, &smart_log);
res = nvme_get_log_page(ns->ctrl, &smart_log);
if (res < 0)
goto out_free_response;
@ -886,7 +915,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
kfree(smart_log);
/* Get Features for Temp Threshold */
res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0,
&feature_resp);
if (res != NVME_SC_SUCCESS)
temp_c_thresh = LOG_TEMP_UNKNOWN;
@ -948,7 +977,6 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id_ns;
u8 flbas;
u32 lba_length;
@ -958,7 +986,7 @@ static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
return -EINVAL;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -1014,14 +1042,13 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
{
int res = 0;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
u32 feature_resp;
u8 vwc;
if (len < MODE_PAGE_CACHING_LEN)
return -EINVAL;
nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0,
&feature_resp);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
@ -1207,12 +1234,11 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ctrl *id_ctrl;
int lowest_pow_st; /* max npss = lowest power consumption */
unsigned ps_desired = 0;
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -1256,7 +1282,7 @@ static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
break;
}
nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0,
NULL);
return nvme_trans_status_code(hdr, nvme_sc);
}
@ -1280,7 +1306,6 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
u8 buffer_id)
{
int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_command c;
if (hdr->iovec_count > 0) {
@ -1297,7 +1322,7 @@ static int nvme_trans_send_download_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr
c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
nvme_sc = __nvme_submit_sync_cmd(dev->admin_q, &c, NULL,
nvme_sc = nvme_submit_user_cmd(ns->ctrl->admin_q, &c,
hdr->dxferp, tot_len, NULL, 0);
return nvme_trans_status_code(hdr, nvme_sc);
}
@ -1364,14 +1389,13 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res = 0;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
unsigned dword11;
switch (page_code) {
case MODE_PAGE_CACHING:
dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
0, NULL);
nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC,
dword11, 0, NULL);
res = nvme_trans_status_code(hdr, nvme_sc);
break;
case MODE_PAGE_CONTROL:
@ -1473,7 +1497,6 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
{
int res = 0;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
u8 flbas;
/*
@ -1486,7 +1509,7 @@ static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
struct nvme_id_ns *id_ns;
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -1570,7 +1593,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
int nvme_sc;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id_ns;
u8 i;
u8 flbas, nlbaf;
@ -1579,7 +1601,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
struct nvme_command c;
/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -1611,7 +1633,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
c.format.nsid = cpu_to_le32(ns->ns_id);
c.format.cdw10 = cpu_to_le32(cdw10);
nvme_sc = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
nvme_sc = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, NULL, 0);
res = nvme_trans_status_code(hdr, nvme_sc);
kfree(id_ns);
@ -1704,7 +1726,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
nvme_sc = NVME_SC_LBA_RANGE;
break;
}
nvme_sc = __nvme_submit_sync_cmd(ns->queue, &c, NULL,
nvme_sc = nvme_submit_user_cmd(ns->queue, &c,
next_mapping_addr, unit_len, NULL, 0);
if (nvme_sc)
break;
@ -2040,7 +2062,6 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u32 alloc_len;
u32 resp_size;
u32 xfer_len;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ns *id_ns;
u8 *response;
@ -2052,7 +2073,7 @@ static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
resp_size = READ_CAP_10_RESP_SIZE;
}
nvme_sc = nvme_identify_ns(dev, ns->ns_id, &id_ns);
nvme_sc = nvme_identify_ns(ns->ctrl, ns->ns_id, &id_ns);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -2080,7 +2101,6 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc;
u32 alloc_len, xfer_len, resp_size;
u8 *response;
struct nvme_dev *dev = ns->dev;
struct nvme_id_ctrl *id_ctrl;
u32 ll_length, lun_id;
u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
@ -2094,7 +2114,7 @@ static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
case ALL_LUNS_RETURNED:
case ALL_WELL_KNOWN_LUNS_RETURNED:
case RESTRICTED_LUNS_RETURNED:
nvme_sc = nvme_identify_ctrl(dev, &id_ctrl);
nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl);
res = nvme_trans_status_code(hdr, nvme_sc);
if (res)
return res;
@ -2295,9 +2315,7 @@ static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
struct sg_io_hdr *hdr,
u8 *cmd)
{
struct nvme_dev *dev = ns->dev;
if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
if (nvme_ctrl_ready(ns->ctrl))
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
NOT_READY, SCSI_ASC_LUN_NOT_READY,
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);

View File

@ -615,9 +615,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
}
bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
if (!bip) {
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return -ENOMEM;
return PTR_ERR(bip);
}
bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *

View File

@ -7,6 +7,7 @@
#ifndef _AER_H_
#define _AER_H_
#include <linux/errno.h>
#include <linux/types.h>
#define AER_NONFATAL 0

View File

@ -318,16 +318,6 @@ enum bip_flags {
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
};
#if defined(CONFIG_BLK_DEV_INTEGRITY)
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
if (bio->bi_rw & REQ_INTEGRITY)
return bio->bi_integrity;
return NULL;
}
/*
* bio integrity payload
*/
@ -349,6 +339,16 @@ struct bio_integrity_payload {
struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
};
#if defined(CONFIG_BLK_DEV_INTEGRITY)
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
if (bio->bi_rw & REQ_INTEGRITY)
return bio->bi_integrity;
return NULL;
}
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
@ -795,6 +795,18 @@ static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
return false;
}
static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
unsigned int nr)
{
return ERR_PTR(-EINVAL);
}
static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#endif /* CONFIG_BLOCK */

View File

@ -188,7 +188,6 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NO_TIMEOUT, /* requests may never expire */
__REQ_NR_BITS, /* stops here */
};
@ -242,7 +241,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U

View File

@ -409,6 +409,7 @@ struct request_queue {
unsigned int rq_timeout;
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;

View File

@ -17,20 +17,19 @@
#include <linux/types.h>
struct nvme_bar {
__u64 cap; /* Controller Capabilities */
__u32 vs; /* Version */
__u32 intms; /* Interrupt Mask Set */
__u32 intmc; /* Interrupt Mask Clear */
__u32 cc; /* Controller Configuration */
__u32 rsvd1; /* Reserved */
__u32 csts; /* Controller Status */
__u32 nssr; /* Subsystem Reset */
__u32 aqa; /* Admin Queue Attributes */
__u64 asq; /* Admin SQ Base Address */
__u64 acq; /* Admin CQ Base Address */
__u32 cmbloc; /* Controller Memory Buffer Location */
__u32 cmbsz; /* Controller Memory Buffer Size */
enum {
NVME_REG_CAP = 0x0000, /* Controller Capabilities */
NVME_REG_VS = 0x0008, /* Version */
NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */
NVME_REG_CC = 0x0014, /* Controller Configuration */
NVME_REG_CSTS = 0x001c, /* Controller Status */
NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */
NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
};
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)

View File

@ -307,7 +307,7 @@ header-y += nfs_mount.h
header-y += nl80211.h
header-y += n_r3964.h
header-y += nubus.h
header-y += nvme.h
header-y += nvme_ioctl.h
header-y += nvram.h
header-y += omap3isp.h
header-y += omapfb.h