nvme-pci: implement the HMB entry number and size limitations
Adds support for the new Host Memory Buffer Minimum Descriptor Entry Size and Host Memory Maximum Descriptors Entries field that were added in TP 4002 HMB Enhancements. These allow the controller to advertise limits for the usual number of segments in the host memory buffer, as well as a minimum usable per-segment size. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com>
This commit is contained in:
parent
9620cfba97
commit
044a9df1a7
|
@ -1897,6 +1897,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||||
ctrl->cntlid = le16_to_cpu(id->cntlid);
|
ctrl->cntlid = le16_to_cpu(id->cntlid);
|
||||||
ctrl->hmpre = le32_to_cpu(id->hmpre);
|
ctrl->hmpre = le32_to_cpu(id->hmpre);
|
||||||
ctrl->hmmin = le32_to_cpu(id->hmmin);
|
ctrl->hmmin = le32_to_cpu(id->hmmin);
|
||||||
|
ctrl->hmminds = le32_to_cpu(id->hmminds);
|
||||||
|
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(id);
|
kfree(id);
|
||||||
|
|
|
@ -181,8 +181,11 @@ struct nvme_ctrl {
|
||||||
u64 ps_max_latency_us;
|
u64 ps_max_latency_us;
|
||||||
bool apst_enabled;
|
bool apst_enabled;
|
||||||
|
|
||||||
|
/* PCIe only: */
|
||||||
u32 hmpre;
|
u32 hmpre;
|
||||||
u32 hmmin;
|
u32 hmmin;
|
||||||
|
u32 hmminds;
|
||||||
|
u16 hmmaxd;
|
||||||
|
|
||||||
/* Fabrics only */
|
/* Fabrics only */
|
||||||
u16 sqsize;
|
u16 sqsize;
|
||||||
|
|
|
@ -1625,6 +1625,10 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
|
||||||
tmp = (preferred + chunk_size - 1);
|
tmp = (preferred + chunk_size - 1);
|
||||||
do_div(tmp, chunk_size);
|
do_div(tmp, chunk_size);
|
||||||
max_entries = tmp;
|
max_entries = tmp;
|
||||||
|
|
||||||
|
if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
|
||||||
|
max_entries = dev->ctrl.hmmaxd;
|
||||||
|
|
||||||
descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
|
descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
|
||||||
&descs_dma, GFP_KERNEL);
|
&descs_dma, GFP_KERNEL);
|
||||||
if (!descs)
|
if (!descs)
|
||||||
|
@ -1681,7 +1685,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
|
||||||
|
|
||||||
/* start big and work our way down */
|
/* start big and work our way down */
|
||||||
for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
|
for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
|
||||||
chunk_size >= PAGE_SIZE * 2;
|
chunk_size >= max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
|
||||||
chunk_size /= 2) {
|
chunk_size /= 2) {
|
||||||
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
|
if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
|
||||||
if (!min || dev->host_mem_size >= min)
|
if (!min || dev->host_mem_size >= min)
|
||||||
|
|
|
@ -226,7 +226,9 @@ struct nvme_id_ctrl {
|
||||||
__le16 mntmt;
|
__le16 mntmt;
|
||||||
__le16 mxtmt;
|
__le16 mxtmt;
|
||||||
__le32 sanicap;
|
__le32 sanicap;
|
||||||
__u8 rsvd332[180];
|
__le32 hmminds;
|
||||||
|
__le16 hmmaxd;
|
||||||
|
__u8 rsvd338[174];
|
||||||
__u8 sqes;
|
__u8 sqes;
|
||||||
__u8 cqes;
|
__u8 cqes;
|
||||||
__le16 maxcmd;
|
__le16 maxcmd;
|
||||||
|
|
Loading…
Reference in New Issue