Merge branch 'nvme-4.14' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph: "Below are a few small fixes for the current merge window: - fix string.h compilation failures with the new memcpy_and_pad helper (Martin Wilck) - fix incorrect dereference of a PCI data structure in the lightnvm support code (me) - HMB fixes (Akinobu Mita and me)"
This commit is contained in:
commit
be1c704329
|
@ -1897,6 +1897,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
|||
ctrl->cntlid = le16_to_cpu(id->cntlid);
|
||||
ctrl->hmpre = le32_to_cpu(id->hmpre);
|
||||
ctrl->hmmin = le32_to_cpu(id->hmmin);
|
||||
ctrl->hmminds = le32_to_cpu(id->hmminds);
|
||||
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
|
||||
}
|
||||
|
||||
kfree(id);
|
||||
|
@ -2377,10 +2379,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
|
||||
nvme_report_ns_ids(ctrl, ns->ns_id, id, ns->eui, ns->nguid, &ns->uuid);
|
||||
|
||||
if (nvme_nvm_ns_supported(ns, id) &&
|
||||
nvme_nvm_register(ns, disk_name, node)) {
|
||||
dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__);
|
||||
goto out_free_id;
|
||||
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
|
||||
if (nvme_nvm_register(ns, disk_name, node)) {
|
||||
dev_warn(ctrl->device, "LightNVM init failure\n");
|
||||
goto out_free_id;
|
||||
}
|
||||
}
|
||||
|
||||
disk = alloc_disk_node(0, node);
|
||||
|
|
|
@ -955,29 +955,3 @@ void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
|
|||
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
|
||||
&nvm_dev_attr_group);
|
||||
}
|
||||
|
||||
/* move to shared place when used in multiple places. */
|
||||
#define PCI_VENDOR_ID_CNEX 0x1d1d
|
||||
#define PCI_DEVICE_ID_CNEX_WL 0x2807
|
||||
#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
|
||||
|
||||
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
/* XXX: this is poking into PCI structures from generic code! */
|
||||
struct pci_dev *pdev = to_pci_dev(ctrl->dev);
|
||||
|
||||
/* QEMU NVMe simulator - PCI ID + Vendor specific bit */
|
||||
if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
|
||||
pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
|
||||
id->vs[0] == 0x1)
|
||||
return 1;
|
||||
|
||||
/* CNEX Labs - PCI ID + Vendor specific bit */
|
||||
if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
|
||||
pdev->device == PCI_DEVICE_ID_CNEX_WL &&
|
||||
id->vs[0] == 0x1)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -75,6 +75,11 @@ enum nvme_quirks {
|
|||
* The deepest sleep state should not be used.
|
||||
*/
|
||||
NVME_QUIRK_NO_DEEPEST_PS = (1 << 5),
|
||||
|
||||
/*
|
||||
* Supports the LighNVM command set if indicated in vs[1].
|
||||
*/
|
||||
NVME_QUIRK_LIGHTNVM = (1 << 6),
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -176,8 +181,11 @@ struct nvme_ctrl {
|
|||
u64 ps_max_latency_us;
|
||||
bool apst_enabled;
|
||||
|
||||
/* PCIe only: */
|
||||
u32 hmpre;
|
||||
u32 hmmin;
|
||||
u32 hmminds;
|
||||
u16 hmmaxd;
|
||||
|
||||
/* Fabrics only */
|
||||
u16 sqsize;
|
||||
|
@ -320,7 +328,6 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
|
|||
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
|
||||
|
||||
#ifdef CONFIG_NVM
|
||||
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
|
||||
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
|
||||
void nvme_nvm_unregister(struct nvme_ns *ns);
|
||||
int nvme_nvm_register_sysfs(struct nvme_ns *ns);
|
||||
|
@ -339,10 +346,6 @@ static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
|
|||
return 0;
|
||||
}
|
||||
static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
|
||||
static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
|
|
|
@ -1612,21 +1612,23 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
|
|||
dev->host_mem_descs = NULL;
|
||||
}
|
||||
|
||||
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
|
||||
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
|
||||
u32 chunk_size)
|
||||
{
|
||||
struct nvme_host_mem_buf_desc *descs;
|
||||
u32 chunk_size, max_entries, len;
|
||||
u32 max_entries, len;
|
||||
dma_addr_t descs_dma;
|
||||
int i = 0;
|
||||
void **bufs;
|
||||
u64 size = 0, tmp;
|
||||
|
||||
/* start big and work our way down */
|
||||
chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
|
||||
retry:
|
||||
tmp = (preferred + chunk_size - 1);
|
||||
do_div(tmp, chunk_size);
|
||||
max_entries = tmp;
|
||||
|
||||
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
|
||||
max_entries = dev->ctrl.hmmaxd;
|
||||
|
||||
descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
|
||||
&descs_dma, GFP_KERNEL);
|
||||
if (!descs)
|
||||
|
@ -1650,15 +1652,9 @@ retry:
|
|||
i++;
|
||||
}
|
||||
|
||||
if (!size || (min && size < min)) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to allocate host memory buffer.\n");
|
||||
if (!size)
|
||||
goto out_free_bufs;
|
||||
}
|
||||
|
||||
dev_info(dev->ctrl.device,
|
||||
"allocated %lld MiB host memory buffer.\n",
|
||||
size >> ilog2(SZ_1M));
|
||||
dev->nr_host_mem_descs = i;
|
||||
dev->host_mem_size = size;
|
||||
dev->host_mem_descs = descs;
|
||||
|
@ -1679,21 +1675,35 @@ out_free_descs:
|
|||
dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
|
||||
descs_dma);
|
||||
out:
|
||||
/* try a smaller chunk size if we failed early */
|
||||
if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
|
||||
chunk_size /= 2;
|
||||
goto retry;
|
||||
}
|
||||
dev->host_mem_descs = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void nvme_setup_host_mem(struct nvme_dev *dev)
|
||||
static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
|
||||
{
|
||||
u32 chunk_size;
|
||||
|
||||
/* start big and work our way down */
|
||||
for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
|
||||
chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
|
||||
chunk_size /= 2) {
|
||||
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
|
||||
if (!min || dev->host_mem_size >= min)
|
||||
return 0;
|
||||
nvme_free_host_mem(dev);
|
||||
}
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int nvme_setup_host_mem(struct nvme_dev *dev)
|
||||
{
|
||||
u64 max = (u64)max_host_mem_size_mb * SZ_1M;
|
||||
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
|
||||
u64 min = (u64)dev->ctrl.hmmin * 4096;
|
||||
u32 enable_bits = NVME_HOST_MEM_ENABLE;
|
||||
int ret = 0;
|
||||
|
||||
preferred = min(preferred, max);
|
||||
if (min > max) {
|
||||
|
@ -1701,7 +1711,7 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
|
|||
"min host memory (%lld MiB) above limit (%d MiB).\n",
|
||||
min >> ilog2(SZ_1M), max_host_mem_size_mb);
|
||||
nvme_free_host_mem(dev);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1715,12 +1725,21 @@ static void nvme_setup_host_mem(struct nvme_dev *dev)
|
|||
}
|
||||
|
||||
if (!dev->host_mem_descs) {
|
||||
if (nvme_alloc_host_mem(dev, min, preferred))
|
||||
return;
|
||||
if (nvme_alloc_host_mem(dev, min, preferred)) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to allocate host memory buffer.\n");
|
||||
return 0; /* controller must work without HMB */
|
||||
}
|
||||
|
||||
dev_info(dev->ctrl.device,
|
||||
"allocated %lld MiB host memory buffer.\n",
|
||||
dev->host_mem_size >> ilog2(SZ_1M));
|
||||
}
|
||||
|
||||
if (nvme_set_host_mem(dev, enable_bits))
|
||||
ret = nvme_set_host_mem(dev, enable_bits);
|
||||
if (ret)
|
||||
nvme_free_host_mem(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_setup_io_queues(struct nvme_dev *dev)
|
||||
|
@ -2164,8 +2183,11 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
"unable to allocate dma for dbbuf\n");
|
||||
}
|
||||
|
||||
if (dev->ctrl.hmpre)
|
||||
nvme_setup_host_mem(dev);
|
||||
if (dev->ctrl.hmpre) {
|
||||
result = nvme_setup_host_mem(dev);
|
||||
if (result < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
result = nvme_setup_io_queues(dev);
|
||||
if (result)
|
||||
|
@ -2497,6 +2519,10 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
|
||||
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
||||
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
|
||||
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
|
||||
|
|
|
@ -226,7 +226,9 @@ struct nvme_id_ctrl {
|
|||
__le16 mntmt;
|
||||
__le16 mxtmt;
|
||||
__le32 sanicap;
|
||||
__u8 rsvd332[180];
|
||||
__le32 hmminds;
|
||||
__le16 hmmaxd;
|
||||
__u8 rsvd338[174];
|
||||
__u8 sqes;
|
||||
__u8 cqes;
|
||||
__le16 maxcmd;
|
||||
|
|
|
@ -434,20 +434,9 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
|
|||
* @count: The number of bytes to copy
|
||||
* @pad: Character to use for padding if space is left in destination.
|
||||
*/
|
||||
__FORTIFY_INLINE void memcpy_and_pad(void *dest, size_t dest_len,
|
||||
const void *src, size_t count, int pad)
|
||||
static inline void memcpy_and_pad(void *dest, size_t dest_len,
|
||||
const void *src, size_t count, int pad)
|
||||
{
|
||||
size_t dest_size = __builtin_object_size(dest, 0);
|
||||
size_t src_size = __builtin_object_size(src, 0);
|
||||
|
||||
if (__builtin_constant_p(dest_len) && __builtin_constant_p(count)) {
|
||||
if (dest_size < dest_len && dest_size < count)
|
||||
__write_overflow();
|
||||
else if (src_size < dest_len && src_size < count)
|
||||
__read_overflow3();
|
||||
}
|
||||
if (dest_size < dest_len)
|
||||
fortify_panic(__func__);
|
||||
if (dest_len > count) {
|
||||
memcpy(dest, src, count);
|
||||
memset(dest + count, pad, dest_len - count);
|
||||
|
|
Loading…
Reference in New Issue