lightnvm: dynamic DMA pool entry size
Currently lightnvm and pblk uses single DMA pool, for which the entry size always is equal to PAGE_SIZE. The contents of each entry allocated from the DMA pool consists of a PPA list (8bytes * 64), leaving 56bytes * 64 space for metadata. Since the metadata field can be bigger, such as 128 bytes, the static size does not cover this use-case. This patch adds support for I/O metadata above 56 bytes by changing DMA pool size based on device meta size and allows pblk to use OOB metadata >=16B. Reviewed-by: Javier González <javier@cnexlabs.com> Signed-off-by: Igor Konopko <igor.j.konopko@intel.com> Signed-off-by: Matias Bjørling <mb@lightnvm.io> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
faa79f27f0
commit
24828d0536
|
@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(nvm_alloc_dev);
|
|||
|
||||
int nvm_register(struct nvm_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
int ret, exp_pool_size;
|
||||
|
||||
if (!dev->q || !dev->ops)
|
||||
return -EINVAL;
|
||||
|
@ -1149,7 +1149,12 @@ int nvm_register(struct nvm_dev *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
|
||||
exp_pool_size = max_t(int, PAGE_SIZE,
|
||||
(NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
|
||||
exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
|
||||
|
||||
dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
|
||||
exp_pool_size);
|
||||
if (!dev->dma_pool) {
|
||||
pr_err("nvm: could not create dma pool\n");
|
||||
nvm_free(dev);
|
||||
|
|
|
@ -250,8 +250,8 @@ int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
|
|||
if (rqd->nr_ppas == 1)
|
||||
return 0;
|
||||
|
||||
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
|
||||
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
|
||||
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
|
||||
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -846,8 +846,8 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
|
|||
if (!meta_list)
|
||||
return -ENOMEM;
|
||||
|
||||
ppa_list = meta_list + pblk_dma_meta_size;
|
||||
dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
|
||||
ppa_list = meta_list + pblk_dma_meta_size(pblk);
|
||||
dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
|
||||
|
||||
next_rq:
|
||||
memset(&rqd, 0, sizeof(struct nvm_rq));
|
||||
|
|
|
@ -406,7 +406,7 @@ static int pblk_core_init(struct pblk *pblk)
|
|||
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
|
||||
|
||||
pblk->oob_meta_size = geo->sos;
|
||||
if (pblk->oob_meta_size != sizeof(struct pblk_sec_meta)) {
|
||||
if (pblk->oob_meta_size < sizeof(struct pblk_sec_meta)) {
|
||||
pblk_err(pblk, "Unsupported metadata size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -481,8 +481,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
|
|||
if (!meta_list)
|
||||
return -ENOMEM;
|
||||
|
||||
ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
|
||||
dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
|
||||
ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
|
||||
dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
|
||||
|
||||
data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
|
||||
if (!data) {
|
||||
|
|
|
@ -104,7 +104,6 @@ enum {
|
|||
PBLK_RL_LOW = 4
|
||||
};
|
||||
|
||||
#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
|
||||
#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
|
||||
|
||||
/* write buffer completion context */
|
||||
|
@ -1388,4 +1387,9 @@ static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
|
|||
{
|
||||
return meta + pblk->oob_meta_size * index;
|
||||
}
|
||||
|
||||
static inline int pblk_dma_meta_size(struct pblk *pblk)
|
||||
{
|
||||
return pblk->oob_meta_size * NVM_MAX_VLBA;
|
||||
}
|
||||
#endif /* PBLK_H_ */
|
||||
|
|
|
@ -732,11 +732,12 @@ static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
|
||||
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
|
||||
int size)
|
||||
{
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
|
||||
return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
|
||||
return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
|
||||
}
|
||||
|
||||
static void nvme_nvm_destroy_dma_pool(void *pool)
|
||||
|
|
|
@ -90,7 +90,7 @@ typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
|
|||
struct nvm_chk_meta *);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
|
||||
typedef void (nvm_destroy_dma_pool_fn)(void *);
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
|
||||
dma_addr_t *);
|
||||
|
|
Loading…
Reference in New Issue