2015-10-29 02:54:57 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 IT University of Copenhagen
|
|
|
|
* Initial release: Matias Bjorling <m@bjorling.me>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License version
|
|
|
|
* 2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef RRPC_H_
|
|
|
|
#define RRPC_H_
|
|
|
|
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kthread.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
|
|
|
|
#include <linux/lightnvm.h>
|
|
|
|
|
|
|
|
/* Run only GC if less than 1/X blocks are free */
|
|
|
|
#define GC_LIMIT_INVERSE 10
|
|
|
|
#define GC_TIME_SECS 100
|
|
|
|
|
|
|
|
#define RRPC_SECTOR (512)
|
|
|
|
#define RRPC_EXPOSED_PAGE_SIZE (4096)
|
|
|
|
|
|
|
|
#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
|
|
|
|
|
|
|
|
struct rrpc_inflight {
|
|
|
|
struct list_head reqs;
|
|
|
|
spinlock_t lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rrpc_inflight_rq {
|
|
|
|
struct list_head list;
|
|
|
|
sector_t l_start;
|
|
|
|
sector_t l_end;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rrpc_rq {
|
|
|
|
struct rrpc_inflight_rq inflight_rq;
|
|
|
|
unsigned long flags;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rrpc_block {
|
2016-11-29 05:39:09 +08:00
|
|
|
int id; /* id inside of LUN */
|
2016-01-12 14:49:31 +08:00
|
|
|
struct rrpc_lun *rlun;
|
2016-11-29 05:39:06 +08:00
|
|
|
|
|
|
|
struct list_head prio; /* LUN CG list */
|
|
|
|
struct list_head list; /* LUN free, used, bb list */
|
2015-10-29 02:54:57 +08:00
|
|
|
|
|
|
|
#define MAX_INVALID_PAGES_STORAGE 8
|
|
|
|
/* Bitmap for invalid page intries */
|
|
|
|
unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
|
|
|
|
/* points to the next writable page within a block */
|
|
|
|
unsigned int next_page;
|
|
|
|
/* number of pages that are invalid, wrt host page size */
|
|
|
|
unsigned int nr_invalid_pages;
|
|
|
|
|
2016-11-29 05:39:06 +08:00
|
|
|
int state;
|
|
|
|
|
2015-10-29 02:54:57 +08:00
|
|
|
spinlock_t lock;
|
|
|
|
atomic_t data_cmnt_size; /* data pages committed to stable storage */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rrpc_lun {
|
|
|
|
struct rrpc *rrpc;
|
lightnvm: eliminate nvm_lun abstraction in mm
In order to naturally support multi-target instances on an Open-Channel
SSD, targets should own the LUNs they get blocks from and manage
provisioning internally. This is done in several steps.
Since targets own the LUNs the are instantiated on top of and manage the
free block list internally, there is no need for a LUN abstraction in
the media manager. LUNs are intrinsically managed as in the physical
layout (ch:0,lun:0, ..., ch:0,lun:n, ch:1,lun:0, ch:1,lun:n, ...,
ch:m,lun:0, ch:m,lun:n) and given to the targets based on the target
creation ioctl. This simplifies LUN management and clears the path for a
partition manager to sit directly underneath LightNVM targets.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2016-11-29 05:39:10 +08:00
|
|
|
|
|
|
|
int id;
|
|
|
|
struct ppa_addr bppa;
|
2016-11-29 05:39:06 +08:00
|
|
|
|
2015-10-29 02:54:57 +08:00
|
|
|
struct rrpc_block *cur, *gc_cur;
|
|
|
|
struct rrpc_block *blocks; /* Reference to block allocation */
|
2016-01-12 14:49:33 +08:00
|
|
|
|
|
|
|
struct list_head prio_list; /* Blocks that may be GC'ed */
|
2016-07-07 15:54:20 +08:00
|
|
|
struct list_head wblk_list; /* Queued blocks to be written to */
|
2016-01-12 14:49:33 +08:00
|
|
|
|
2016-11-29 05:39:09 +08:00
|
|
|
/* lun block lists */
|
|
|
|
struct list_head used_list; /* In-use blocks */
|
|
|
|
struct list_head free_list; /* Not used blocks i.e. released
|
|
|
|
* and ready for use
|
|
|
|
*/
|
|
|
|
struct list_head bb_list; /* Bad blocks. Mutually exclusive with
|
|
|
|
* free_list and used_list
|
|
|
|
*/
|
|
|
|
unsigned int nr_free_blocks; /* Number of unused blocks */
|
|
|
|
|
2015-10-29 02:54:57 +08:00
|
|
|
struct work_struct ws_gc;
|
|
|
|
|
2016-11-29 05:39:06 +08:00
|
|
|
int reserved_blocks;
|
|
|
|
|
2015-10-29 02:54:57 +08:00
|
|
|
spinlock_t lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rrpc {
|
|
|
|
/* instance must be kept in top to resolve rrpc in unprep */
|
|
|
|
struct nvm_tgt_instance instance;
|
|
|
|
|
2016-11-29 05:39:06 +08:00
|
|
|
struct nvm_tgt_dev *dev;
|
2015-10-29 02:54:57 +08:00
|
|
|
struct gendisk *disk;
|
|
|
|
|
2016-03-03 22:06:37 +08:00
|
|
|
sector_t soffset; /* logical sector offset */
|
2015-10-29 02:54:57 +08:00
|
|
|
|
|
|
|
int nr_luns;
|
|
|
|
struct rrpc_lun *luns;
|
|
|
|
|
|
|
|
/* calculated values */
|
2016-02-20 15:52:41 +08:00
|
|
|
unsigned long long nr_sects;
|
2015-10-29 02:54:57 +08:00
|
|
|
|
|
|
|
/* Write strategy variables. Move these into each for structure for each
|
|
|
|
* strategy
|
|
|
|
*/
|
|
|
|
atomic_t next_lun; /* Whenever a page is written, this is updated
|
|
|
|
* to point to the next write lun
|
|
|
|
*/
|
|
|
|
|
|
|
|
spinlock_t bio_lock;
|
|
|
|
struct bio_list requeue_bios;
|
|
|
|
struct work_struct ws_requeue;
|
|
|
|
|
|
|
|
/* Simple translation map of logical addresses to physical addresses.
|
|
|
|
* The logical addresses is known by the host system, while the physical
|
|
|
|
* addresses are used when writing to the disk block device.
|
|
|
|
*/
|
|
|
|
struct rrpc_addr *trans_map;
|
|
|
|
/* also store a reverse map for garbage collection */
|
|
|
|
struct rrpc_rev_addr *rev_trans_map;
|
|
|
|
spinlock_t rev_lock;
|
|
|
|
|
|
|
|
struct rrpc_inflight inflights;
|
|
|
|
|
|
|
|
mempool_t *addr_pool;
|
|
|
|
mempool_t *page_pool;
|
|
|
|
mempool_t *gcb_pool;
|
|
|
|
mempool_t *rq_pool;
|
|
|
|
|
|
|
|
struct timer_list gc_timer;
|
|
|
|
struct workqueue_struct *krqd_wq;
|
|
|
|
struct workqueue_struct *kgc_wq;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct rrpc_block_gc {
|
|
|
|
struct rrpc *rrpc;
|
|
|
|
struct rrpc_block *rblk;
|
|
|
|
struct work_struct ws_gc;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Logical to physical mapping */
|
|
|
|
struct rrpc_addr {
|
2015-11-03 00:12:27 +08:00
|
|
|
u64 addr;
|
2015-10-29 02:54:57 +08:00
|
|
|
struct rrpc_block *rblk;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Physical to logical mapping */
|
|
|
|
struct rrpc_rev_addr {
|
2015-11-03 00:12:27 +08:00
|
|
|
u64 addr;
|
2015-10-29 02:54:57 +08:00
|
|
|
};
|
|
|
|
|
lightnvm: eliminate nvm_lun abstraction in mm
In order to naturally support multi-target instances on an Open-Channel
SSD, targets should own the LUNs they get blocks from and manage
provisioning internally. This is done in several steps.
Since targets own the LUNs the are instantiated on top of and manage the
free block list internally, there is no need for a LUN abstraction in
the media manager. LUNs are intrinsically managed as in the physical
layout (ch:0,lun:0, ..., ch:0,lun:n, ch:1,lun:0, ch:1,lun:n, ...,
ch:m,lun:0, ch:m,lun:n) and given to the targets based on the target
creation ioctl. This simplifies LUN management and clears the path for a
partition manager to sit directly underneath LightNVM targets.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2016-11-29 05:39:10 +08:00
|
|
|
static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
|
|
|
|
struct ppa_addr r)
|
|
|
|
{
|
|
|
|
struct ppa_addr l;
|
|
|
|
int secs, pgs;
|
|
|
|
sector_t ppa = r.ppa;
|
|
|
|
|
|
|
|
l.ppa = 0;
|
|
|
|
|
|
|
|
div_u64_rem(ppa, geo->sec_per_pg, &secs);
|
|
|
|
l.g.sec = secs;
|
|
|
|
|
|
|
|
sector_div(ppa, geo->sec_per_pg);
|
|
|
|
div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
|
|
|
|
l.g.pg = pgs;
|
|
|
|
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
2016-11-29 05:39:13 +08:00
|
|
|
static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
|
lightnvm: eliminate nvm_lun abstraction in mm
In order to naturally support multi-target instances on an Open-Channel
SSD, targets should own the LUNs they get blocks from and manage
provisioning internally. This is done in several steps.
Since targets own the LUNs the are instantiated on top of and manage the
free block list internally, there is no need for a LUN abstraction in
the media manager. LUNs are intrinsically managed as in the physical
layout (ch:0,lun:0, ..., ch:0,lun:n, ch:1,lun:0, ch:1,lun:n, ...,
ch:m,lun:0, ch:m,lun:n) and given to the targets based on the target
creation ioctl. This simplifies LUN management and clears the path for a
partition manager to sit directly underneath LightNVM targets.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2016-11-29 05:39:10 +08:00
|
|
|
{
|
|
|
|
return linear_to_generic_addr(&dev->geo, pba);
|
|
|
|
}
|
|
|
|
|
2016-11-29 05:39:09 +08:00
|
|
|
static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
|
2016-03-04 05:47:53 +08:00
|
|
|
{
|
2016-11-29 05:39:06 +08:00
|
|
|
struct nvm_tgt_dev *dev = rrpc->dev;
|
2016-11-29 05:39:09 +08:00
|
|
|
struct nvm_geo *geo = &dev->geo;
|
|
|
|
struct rrpc_lun *rlun = rblk->rlun;
|
2016-03-04 05:47:53 +08:00
|
|
|
|
lightnvm: eliminate nvm_lun abstraction in mm
In order to naturally support multi-target instances on an Open-Channel
SSD, targets should own the LUNs they get blocks from and manage
provisioning internally. This is done in several steps.
Since targets own the LUNs the are instantiated on top of and manage the
free block list internally, there is no need for a LUN abstraction in
the media manager. LUNs are intrinsically managed as in the physical
layout (ch:0,lun:0, ..., ch:0,lun:n, ch:1,lun:0, ch:1,lun:n, ...,
ch:m,lun:0, ch:m,lun:n) and given to the targets based on the target
creation ioctl. This simplifies LUN management and clears the path for a
partition manager to sit directly underneath LightNVM targets.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
2016-11-29 05:39:10 +08:00
|
|
|
return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
|
2016-03-04 05:47:53 +08:00
|
|
|
}
|
|
|
|
|
2015-10-29 02:54:57 +08:00
|
|
|
static inline sector_t rrpc_get_laddr(struct bio *bio)
|
|
|
|
{
|
|
|
|
return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int rrpc_get_pages(struct bio *bio)
|
|
|
|
{
|
|
|
|
return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline sector_t rrpc_get_sector(sector_t laddr)
|
|
|
|
{
|
|
|
|
return laddr * NR_PHY_IN_LOG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int request_intersects(struct rrpc_inflight_rq *r,
|
|
|
|
sector_t laddr_start, sector_t laddr_end)
|
|
|
|
{
|
2016-02-04 22:13:25 +08:00
|
|
|
return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
|
2015-10-29 02:54:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
|
2016-07-07 15:54:12 +08:00
|
|
|
unsigned int pages, struct rrpc_inflight_rq *r)
|
2015-10-29 02:54:57 +08:00
|
|
|
{
|
|
|
|
sector_t laddr_end = laddr + pages - 1;
|
|
|
|
struct rrpc_inflight_rq *rtmp;
|
|
|
|
|
2016-02-04 22:13:24 +08:00
|
|
|
WARN_ON(irqs_disabled());
|
|
|
|
|
2015-10-29 02:54:57 +08:00
|
|
|
spin_lock_irq(&rrpc->inflights.lock);
|
|
|
|
list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
|
|
|
|
if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
|
|
|
|
/* existing, overlapping request, come back later */
|
|
|
|
spin_unlock_irq(&rrpc->inflights.lock);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r->l_start = laddr;
|
|
|
|
r->l_end = laddr_end;
|
|
|
|
|
|
|
|
list_add_tail(&r->list, &rrpc->inflights.reqs);
|
|
|
|
spin_unlock_irq(&rrpc->inflights.lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
|
2016-07-07 15:54:12 +08:00
|
|
|
unsigned int pages,
|
2015-10-29 02:54:57 +08:00
|
|
|
struct rrpc_inflight_rq *r)
|
|
|
|
{
|
2016-02-20 15:52:41 +08:00
|
|
|
BUG_ON((laddr + pages) > rrpc->nr_sects);
|
2015-10-29 02:54:57 +08:00
|
|
|
|
|
|
|
return __rrpc_lock_laddr(rrpc, laddr, pages, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
|
|
|
|
{
|
|
|
|
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
|
|
|
|
|
|
|
|
return &rrqd->inflight_rq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
|
|
|
|
struct nvm_rq *rqd)
|
|
|
|
{
|
|
|
|
sector_t laddr = rrpc_get_laddr(bio);
|
|
|
|
unsigned int pages = rrpc_get_pages(bio);
|
|
|
|
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
|
|
|
|
|
|
|
|
return rrpc_lock_laddr(rrpc, laddr, pages, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
|
|
|
|
struct rrpc_inflight_rq *r)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&rrpc->inflights.lock, flags);
|
|
|
|
list_del_init(&r->list);
|
|
|
|
spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
|
|
|
|
{
|
|
|
|
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
|
2016-05-07 02:03:20 +08:00
|
|
|
uint8_t pages = rqd->nr_ppas;
|
2015-10-29 02:54:57 +08:00
|
|
|
|
2016-02-20 15:52:41 +08:00
|
|
|
BUG_ON((r->l_start + pages) > rrpc->nr_sects);
|
2015-10-29 02:54:57 +08:00
|
|
|
|
|
|
|
rrpc_unlock_laddr(rrpc, r);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* RRPC_H_ */
|