Merge branch 'for-4.5/lightnvm' of git://git.kernel.dk/linux-block
Pull lightnvm fixes and updates from Jens Axboe: "This should have been part of the drivers branch, but it arrived a bit late and wasn't based on the official core block driver branch. So they got a small scolding, but got a pass since it's still new. Hence it's in a separate branch. This is mostly pure fixes, contained to lightnvm/, and minor feature additions" * 'for-4.5/lightnvm' of git://git.kernel.dk/linux-block: (26 commits) lightnvm: ensure that nvm_dev_ops can be used without CONFIG_NVM lightnvm: introduce factory reset lightnvm: use system block for mm initialization lightnvm: introduce ioctl to initialize device lightnvm: core on-disk initialization lightnvm: introduce mlc lower page table mappings lightnvm: add mccap support lightnvm: manage open and closed blocks separately lightnvm: fix missing grown bad block type lightnvm: reference rrpc lun in rrpc block lightnvm: introduce nvm_submit_ppa lightnvm: move rq->error to nvm_rq->error lightnvm: support multiple ppas in nvm_erase_ppa lightnvm: move the pages per block check out of the loop lightnvm: sectors first in ppa list lightnvm: fix locking and mempool in rrpc_lun_gc lightnvm: put block back to gc list on its reclaim fail lightnvm: check bi_error in gc lightnvm: return the get_bb_tbl return value lightnvm: refactor end_io functions for sync ...
This commit is contained in:
commit
0a13daedf7
@ -436,9 +436,8 @@ static void null_del_dev(struct nullb *nullb)
|
||||
static void null_lnvm_end_io(struct request *rq, int error)
|
||||
{
|
||||
struct nvm_rq *rqd = rq->end_io_data;
|
||||
struct nvm_dev *dev = rqd->dev;
|
||||
|
||||
dev->mt->end_io(rqd, error);
|
||||
nvm_end_io(rqd, error);
|
||||
|
||||
blk_put_request(rq);
|
||||
}
|
||||
|
@ -2,6 +2,6 @@
|
||||
# Makefile for Open-Channel SSDs.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_NVM) := core.o
|
||||
obj-$(CONFIG_NVM) := core.o sysblk.o
|
||||
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
|
||||
obj-$(CONFIG_NVM_RRPC) += rrpc.o
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/lightnvm.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <uapi/linux/lightnvm.h>
|
||||
|
||||
static LIST_HEAD(nvm_targets);
|
||||
@ -105,6 +106,9 @@ struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
|
||||
lockdep_assert_held(&nvm_lock);
|
||||
|
||||
list_for_each_entry(mt, &nvm_mgrs, list) {
|
||||
if (strncmp(dev->sb.mmtype, mt->name, NVM_MMTYPE_LEN))
|
||||
continue;
|
||||
|
||||
ret = mt->register_mgr(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
|
||||
@ -166,6 +170,20 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *dev, struct nvm_lun *lun,
|
||||
unsigned long flags)
|
||||
{
|
||||
return dev->mt->get_blk_unlocked(dev, lun, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_get_blk_unlocked);
|
||||
|
||||
/* Assumes that all valid pages have already been moved on release to bm */
|
||||
void nvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
{
|
||||
return dev->mt->put_blk_unlocked(dev, blk);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_put_blk_unlocked);
|
||||
|
||||
struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
|
||||
unsigned long flags)
|
||||
{
|
||||
@ -192,6 +210,206 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_erase_blk);
|
||||
|
||||
void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = dev_to_generic_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_addr_to_generic_mode);
|
||||
|
||||
void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = generic_to_dev_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
|
||||
|
||||
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
|
||||
struct ppa_addr *ppas, int nr_ppas)
|
||||
{
|
||||
int i, plane_cnt, pl_idx;
|
||||
|
||||
if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
|
||||
rqd->nr_pages = 1;
|
||||
rqd->ppa_addr = ppas[0];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
plane_cnt = (1 << dev->plane_mode);
|
||||
rqd->nr_pages = plane_cnt * nr_ppas;
|
||||
|
||||
if (dev->ops->max_phys_sect < rqd->nr_pages)
|
||||
return -EINVAL;
|
||||
|
||||
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
|
||||
if (!rqd->ppa_list) {
|
||||
pr_err("nvm: failed to allocate dma memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
|
||||
for (i = 0; i < nr_ppas; i++) {
|
||||
ppas[i].g.pl = pl_idx;
|
||||
rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_set_rqd_ppalist);
|
||||
|
||||
void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
if (!rqd->ppa_list)
|
||||
return;
|
||||
|
||||
nvm_dev_dma_free(dev, rqd->ppa_list, rqd->dma_ppa_list);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_free_rqd_ppalist);
|
||||
|
||||
int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
|
||||
{
|
||||
struct nvm_rq rqd;
|
||||
int ret;
|
||||
|
||||
if (!dev->ops->erase_block)
|
||||
return 0;
|
||||
|
||||
memset(&rqd, 0, sizeof(struct nvm_rq));
|
||||
|
||||
ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->erase_block(dev, &rqd);
|
||||
|
||||
nvm_free_rqd_ppalist(dev, &rqd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_erase_ppa);
|
||||
|
||||
void nvm_end_io(struct nvm_rq *rqd, int error)
|
||||
{
|
||||
rqd->error = error;
|
||||
rqd->end_io(rqd);
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_end_io);
|
||||
|
||||
static void nvm_end_io_sync(struct nvm_rq *rqd)
|
||||
{
|
||||
struct completion *waiting = rqd->wait;
|
||||
|
||||
rqd->wait = NULL;
|
||||
|
||||
complete(waiting);
|
||||
}
|
||||
|
||||
int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
|
||||
int opcode, int flags, void *buf, int len)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
struct nvm_rq rqd;
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
unsigned long hang_check;
|
||||
|
||||
bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
|
||||
if (IS_ERR_OR_NULL(bio))
|
||||
return -ENOMEM;
|
||||
|
||||
memset(&rqd, 0, sizeof(struct nvm_rq));
|
||||
ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
|
||||
if (ret) {
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
rqd.opcode = opcode;
|
||||
rqd.bio = bio;
|
||||
rqd.wait = &wait;
|
||||
rqd.dev = dev;
|
||||
rqd.end_io = nvm_end_io_sync;
|
||||
rqd.flags = flags;
|
||||
nvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->submit_io(dev, &rqd);
|
||||
|
||||
/* Prevent hang_check timer from firing at us during very long I/O */
|
||||
hang_check = sysctl_hung_task_timeout_secs;
|
||||
if (hang_check)
|
||||
while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
|
||||
else
|
||||
wait_for_completion_io(&wait);
|
||||
|
||||
nvm_free_rqd_ppalist(dev, &rqd);
|
||||
|
||||
return rqd.error;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_submit_ppa);
|
||||
|
||||
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
|
||||
{
|
||||
int i;
|
||||
|
||||
dev->lps_per_blk = dev->pgs_per_blk;
|
||||
dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
|
||||
if (!dev->lptbl)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Just a linear array */
|
||||
for (i = 0; i < dev->lps_per_blk; i++)
|
||||
dev->lptbl[i] = i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
|
||||
{
|
||||
int i, p;
|
||||
struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
|
||||
|
||||
if (!mlc->num_pairs)
|
||||
return 0;
|
||||
|
||||
dev->lps_per_blk = mlc->num_pairs;
|
||||
dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
|
||||
if (!dev->lptbl)
|
||||
return -ENOMEM;
|
||||
|
||||
/* The lower page table encoding consists of a list of bytes, where each
|
||||
* has a lower and an upper half. The first half byte maintains the
|
||||
* increment value and every value after is an offset added to the
|
||||
* previous incrementation value */
|
||||
dev->lptbl[0] = mlc->pairs[0] & 0xF;
|
||||
for (i = 1; i < dev->lps_per_blk; i++) {
|
||||
p = mlc->pairs[i >> 1];
|
||||
if (i & 0x1) /* upper */
|
||||
dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
|
||||
else /* lower */
|
||||
dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvm_core_init(struct nvm_dev *dev)
|
||||
{
|
||||
struct nvm_id *id = &dev->identity;
|
||||
@ -206,6 +424,7 @@ static int nvm_core_init(struct nvm_dev *dev)
|
||||
dev->sec_size = grp->csecs;
|
||||
dev->oob_size = grp->sos;
|
||||
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
|
||||
dev->mccap = grp->mccap;
|
||||
memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
|
||||
|
||||
dev->plane_mode = NVM_PLANE_SINGLE;
|
||||
@ -216,11 +435,23 @@ static int nvm_core_init(struct nvm_dev *dev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (grp->fmtype != 0 && grp->fmtype != 1) {
|
||||
switch (grp->fmtype) {
|
||||
case NVM_ID_FMTYPE_SLC:
|
||||
if (nvm_init_slc_tbl(dev, grp))
|
||||
return -ENOMEM;
|
||||
break;
|
||||
case NVM_ID_FMTYPE_MLC:
|
||||
if (nvm_init_mlc_tbl(dev, grp))
|
||||
return -ENOMEM;
|
||||
break;
|
||||
default:
|
||||
pr_err("nvm: flash type not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!dev->lps_per_blk)
|
||||
pr_info("nvm: lower page programming table missing\n");
|
||||
|
||||
if (grp->mpos & 0x020202)
|
||||
dev->plane_mode = NVM_PLANE_DOUBLE;
|
||||
if (grp->mpos & 0x040404)
|
||||
@ -238,6 +469,7 @@ static int nvm_core_init(struct nvm_dev *dev)
|
||||
dev->nr_chnls;
|
||||
dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
|
||||
INIT_LIST_HEAD(&dev->online_targets);
|
||||
mutex_init(&dev->mlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -249,6 +481,8 @@ static void nvm_free(struct nvm_dev *dev)
|
||||
|
||||
if (dev->mt)
|
||||
dev->mt->unregister_mgr(dev);
|
||||
|
||||
kfree(dev->lptbl);
|
||||
}
|
||||
|
||||
static int nvm_init(struct nvm_dev *dev)
|
||||
@ -338,9 +572,16 @@ int nvm_register(struct request_queue *q, char *disk_name,
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvm_get_sysblock(dev, &dev->sb);
|
||||
if (!ret)
|
||||
pr_err("nvm: device not initialized.\n");
|
||||
else if (ret < 0)
|
||||
pr_err("nvm: err (%d) on device initialization\n", ret);
|
||||
|
||||
/* register device with a supported media manager */
|
||||
down_write(&nvm_lock);
|
||||
dev->mt = nvm_init_mgr(dev);
|
||||
if (ret > 0)
|
||||
dev->mt = nvm_init_mgr(dev);
|
||||
list_add(&dev->devices, &nvm_devices);
|
||||
up_write(&nvm_lock);
|
||||
|
||||
@ -788,6 +1029,97 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
|
||||
return __nvm_configure_remove(&remove);
|
||||
}
|
||||
|
||||
static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
|
||||
{
|
||||
info->seqnr = 1;
|
||||
info->erase_cnt = 0;
|
||||
info->version = 1;
|
||||
}
|
||||
|
||||
static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
|
||||
{
|
||||
struct nvm_dev *dev;
|
||||
struct nvm_sb_info info;
|
||||
int ret;
|
||||
|
||||
down_write(&nvm_lock);
|
||||
dev = nvm_find_nvm_dev(init->dev);
|
||||
up_write(&nvm_lock);
|
||||
if (!dev) {
|
||||
pr_err("nvm: device not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvm_setup_nvm_sb_info(&info);
|
||||
|
||||
strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
|
||||
info.fs_ppa.ppa = -1;
|
||||
|
||||
ret = nvm_init_sysblock(dev, &info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
|
||||
|
||||
down_write(&nvm_lock);
|
||||
dev->mt = nvm_init_mgr(dev);
|
||||
up_write(&nvm_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
|
||||
{
|
||||
struct nvm_ioctl_dev_init init;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
|
||||
return -EFAULT;
|
||||
|
||||
if (init.flags != 0) {
|
||||
pr_err("nvm: no flags supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
init.dev[DISK_NAME_LEN - 1] = '\0';
|
||||
|
||||
return __nvm_ioctl_dev_init(&init);
|
||||
}
|
||||
|
||||
static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
|
||||
{
|
||||
struct nvm_ioctl_dev_factory fact;
|
||||
struct nvm_dev *dev;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
|
||||
return -EFAULT;
|
||||
|
||||
fact.dev[DISK_NAME_LEN - 1] = '\0';
|
||||
|
||||
if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvm_lock);
|
||||
dev = nvm_find_nvm_dev(fact.dev);
|
||||
up_write(&nvm_lock);
|
||||
if (!dev) {
|
||||
pr_err("nvm: device not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->mt) {
|
||||
dev->mt->unregister_mgr(dev);
|
||||
dev->mt = NULL;
|
||||
}
|
||||
|
||||
return nvm_dev_factory(dev, fact.flags);
|
||||
}
|
||||
|
||||
static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *)arg;
|
||||
@ -801,6 +1133,10 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
|
||||
return nvm_ioctl_dev_create(file, argp);
|
||||
case NVM_DEV_REMOVE:
|
||||
return nvm_ioctl_dev_remove(file, argp);
|
||||
case NVM_DEV_INIT:
|
||||
return nvm_ioctl_dev_init(file, argp);
|
||||
case NVM_DEV_FACTORY:
|
||||
return nvm_ioctl_dev_factory(file, argp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -60,7 +60,8 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
|
||||
lun->vlun.lun_id = i % dev->luns_per_chnl;
|
||||
lun->vlun.chnl_id = i / dev->luns_per_chnl;
|
||||
lun->vlun.nr_free_blocks = dev->blks_per_lun;
|
||||
lun->vlun.nr_inuse_blocks = 0;
|
||||
lun->vlun.nr_open_blocks = 0;
|
||||
lun->vlun.nr_closed_blocks = 0;
|
||||
lun->vlun.nr_bad_blocks = 0;
|
||||
}
|
||||
return 0;
|
||||
@ -89,6 +90,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
||||
|
||||
list_move_tail(&blk->list, &lun->bb_list);
|
||||
lun->vlun.nr_bad_blocks++;
|
||||
lun->vlun.nr_free_blocks--;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -133,15 +135,15 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
|
||||
pba = pba - (dev->sec_per_lun * lun_id);
|
||||
blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
|
||||
|
||||
if (!blk->type) {
|
||||
if (!blk->state) {
|
||||
/* at this point, we don't know anything about the
|
||||
* block. It's up to the FTL on top to re-etablish the
|
||||
* block state
|
||||
* block state. The block is assumed to be open.
|
||||
*/
|
||||
list_move_tail(&blk->list, &lun->used_list);
|
||||
blk->type = 1;
|
||||
blk->state = NVM_BLK_ST_OPEN;
|
||||
lun->vlun.nr_free_blocks--;
|
||||
lun->vlun.nr_inuse_blocks++;
|
||||
lun->vlun.nr_open_blocks++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,14 +257,14 @@ static void gennvm_unregister(struct nvm_dev *dev)
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
|
||||
static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
|
||||
struct nvm_lun *vlun, unsigned long flags)
|
||||
{
|
||||
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
|
||||
struct nvm_block *blk = NULL;
|
||||
int is_gc = flags & NVM_IOTYPE_GC;
|
||||
|
||||
spin_lock(&vlun->lock);
|
||||
assert_spin_locked(&vlun->lock);
|
||||
|
||||
if (list_empty(&lun->free_list)) {
|
||||
pr_err_ratelimited("gennvm: lun %u have no free pages available",
|
||||
@ -275,85 +277,66 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
|
||||
|
||||
blk = list_first_entry(&lun->free_list, struct nvm_block, list);
|
||||
list_move_tail(&blk->list, &lun->used_list);
|
||||
blk->type = 1;
|
||||
blk->state = NVM_BLK_ST_OPEN;
|
||||
|
||||
lun->vlun.nr_free_blocks--;
|
||||
lun->vlun.nr_inuse_blocks++;
|
||||
lun->vlun.nr_open_blocks++;
|
||||
|
||||
out:
|
||||
return blk;
|
||||
}
|
||||
|
||||
static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
|
||||
struct nvm_lun *vlun, unsigned long flags)
|
||||
{
|
||||
struct nvm_block *blk;
|
||||
|
||||
spin_lock(&vlun->lock);
|
||||
blk = gennvm_get_blk_unlocked(dev, vlun, flags);
|
||||
spin_unlock(&vlun->lock);
|
||||
return blk;
|
||||
}
|
||||
|
||||
static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
{
|
||||
struct nvm_lun *vlun = blk->lun;
|
||||
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
|
||||
|
||||
assert_spin_locked(&vlun->lock);
|
||||
|
||||
if (blk->state & NVM_BLK_ST_OPEN) {
|
||||
list_move_tail(&blk->list, &lun->free_list);
|
||||
lun->vlun.nr_open_blocks--;
|
||||
lun->vlun.nr_free_blocks++;
|
||||
blk->state = NVM_BLK_ST_FREE;
|
||||
} else if (blk->state & NVM_BLK_ST_CLOSED) {
|
||||
list_move_tail(&blk->list, &lun->free_list);
|
||||
lun->vlun.nr_closed_blocks--;
|
||||
lun->vlun.nr_free_blocks++;
|
||||
blk->state = NVM_BLK_ST_FREE;
|
||||
} else if (blk->state & NVM_BLK_ST_BAD) {
|
||||
list_move_tail(&blk->list, &lun->bb_list);
|
||||
lun->vlun.nr_bad_blocks++;
|
||||
blk->state = NVM_BLK_ST_BAD;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
pr_err("gennvm: erroneous block type (%lu -> %u)\n",
|
||||
blk->id, blk->state);
|
||||
list_move_tail(&blk->list, &lun->bb_list);
|
||||
lun->vlun.nr_bad_blocks++;
|
||||
blk->state = NVM_BLK_ST_BAD;
|
||||
}
|
||||
}
|
||||
|
||||
static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
|
||||
{
|
||||
struct nvm_lun *vlun = blk->lun;
|
||||
struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
|
||||
|
||||
spin_lock(&vlun->lock);
|
||||
|
||||
switch (blk->type) {
|
||||
case 1:
|
||||
list_move_tail(&blk->list, &lun->free_list);
|
||||
lun->vlun.nr_free_blocks++;
|
||||
lun->vlun.nr_inuse_blocks--;
|
||||
blk->type = 0;
|
||||
break;
|
||||
case 2:
|
||||
list_move_tail(&blk->list, &lun->bb_list);
|
||||
lun->vlun.nr_bad_blocks++;
|
||||
lun->vlun.nr_inuse_blocks--;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
pr_err("gennvm: erroneous block type (%lu -> %u)\n",
|
||||
blk->id, blk->type);
|
||||
list_move_tail(&blk->list, &lun->bb_list);
|
||||
lun->vlun.nr_bad_blocks++;
|
||||
lun->vlun.nr_inuse_blocks--;
|
||||
}
|
||||
|
||||
gennvm_put_blk_unlocked(dev, blk);
|
||||
spin_unlock(&vlun->lock);
|
||||
}
|
||||
|
||||
static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = dev_to_generic_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rqd->nr_pages > 1) {
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
rqd->ppa_list[i] = generic_to_dev_addr(dev,
|
||||
rqd->ppa_list[i]);
|
||||
} else {
|
||||
rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
if (!dev->ops->submit_io)
|
||||
return 0;
|
||||
|
||||
/* Convert address space */
|
||||
gennvm_generic_to_addr_mode(dev, rqd);
|
||||
|
||||
rqd->dev = dev;
|
||||
return dev->ops->submit_io(dev, rqd);
|
||||
}
|
||||
|
||||
static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
|
||||
int type)
|
||||
{
|
||||
@ -376,7 +359,7 @@ static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
|
||||
blk = &lun->vlun.blocks[ppa->g.blk];
|
||||
|
||||
/* will be moved to bb list on put_blk from target */
|
||||
blk->type = type;
|
||||
blk->state = type;
|
||||
}
|
||||
|
||||
/* mark block bad. It is expected the target recover from the error. */
|
||||
@ -390,77 +373,51 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
if (dev->ops->set_bb_tbl(dev, rqd, 1))
|
||||
return;
|
||||
|
||||
gennvm_addr_to_generic_mode(dev, rqd);
|
||||
nvm_addr_to_generic_mode(dev, rqd);
|
||||
|
||||
/* look up blocks and mark them as bad */
|
||||
if (rqd->nr_pages > 1)
|
||||
for (i = 0; i < rqd->nr_pages; i++)
|
||||
gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
|
||||
gennvm_blk_set_type(dev, &rqd->ppa_list[i],
|
||||
NVM_BLK_ST_BAD);
|
||||
else
|
||||
gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
|
||||
gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
|
||||
}
|
||||
|
||||
static int gennvm_end_io(struct nvm_rq *rqd, int error)
|
||||
static void gennvm_end_io(struct nvm_rq *rqd)
|
||||
{
|
||||
struct nvm_tgt_instance *ins = rqd->ins;
|
||||
int ret = 0;
|
||||
|
||||
switch (error) {
|
||||
switch (rqd->error) {
|
||||
case NVM_RSP_SUCCESS:
|
||||
break;
|
||||
case NVM_RSP_ERR_EMPTYPAGE:
|
||||
break;
|
||||
case NVM_RSP_ERR_FAILWRITE:
|
||||
gennvm_mark_blk_bad(rqd->dev, rqd);
|
||||
default:
|
||||
ret++;
|
||||
}
|
||||
|
||||
ret += ins->tt->end_io(rqd, error);
|
||||
ins->tt->end_io(rqd);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
{
|
||||
if (!dev->ops->submit_io)
|
||||
return -ENODEV;
|
||||
|
||||
/* Convert address space */
|
||||
nvm_generic_to_addr_mode(dev, rqd);
|
||||
|
||||
rqd->dev = dev;
|
||||
rqd->end_io = gennvm_end_io;
|
||||
return dev->ops->submit_io(dev, rqd);
|
||||
}
|
||||
|
||||
static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
|
||||
unsigned long flags)
|
||||
{
|
||||
int plane_cnt = 0, pl_idx, ret;
|
||||
struct ppa_addr addr;
|
||||
struct nvm_rq rqd;
|
||||
struct ppa_addr addr = block_to_ppa(dev, blk);
|
||||
|
||||
if (!dev->ops->erase_block)
|
||||
return 0;
|
||||
|
||||
addr = block_to_ppa(dev, blk);
|
||||
|
||||
if (dev->plane_mode == NVM_PLANE_SINGLE) {
|
||||
rqd.nr_pages = 1;
|
||||
rqd.ppa_addr = addr;
|
||||
} else {
|
||||
plane_cnt = (1 << dev->plane_mode);
|
||||
rqd.nr_pages = plane_cnt;
|
||||
|
||||
rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
|
||||
&rqd.dma_ppa_list);
|
||||
if (!rqd.ppa_list) {
|
||||
pr_err("gennvm: failed to allocate dma memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
|
||||
addr.g.pl = pl_idx;
|
||||
rqd.ppa_list[pl_idx] = addr;
|
||||
}
|
||||
}
|
||||
|
||||
gennvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->erase_block(dev, &rqd);
|
||||
|
||||
if (plane_cnt)
|
||||
nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
|
||||
|
||||
return ret;
|
||||
return nvm_erase_ppa(dev, &addr, 1);
|
||||
}
|
||||
|
||||
static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
|
||||
@ -480,10 +437,11 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
|
||||
gennvm_for_each_lun(gn, lun, i) {
|
||||
spin_lock(&lun->vlun.lock);
|
||||
|
||||
pr_info("%s: lun%8u\t%u\t%u\t%u\n",
|
||||
pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
|
||||
dev->name, i,
|
||||
lun->vlun.nr_free_blocks,
|
||||
lun->vlun.nr_inuse_blocks,
|
||||
lun->vlun.nr_open_blocks,
|
||||
lun->vlun.nr_closed_blocks,
|
||||
lun->vlun.nr_bad_blocks);
|
||||
|
||||
spin_unlock(&lun->vlun.lock);
|
||||
@ -491,21 +449,23 @@ static void gennvm_lun_info_print(struct nvm_dev *dev)
|
||||
}
|
||||
|
||||
static struct nvmm_type gennvm = {
|
||||
.name = "gennvm",
|
||||
.version = {0, 1, 0},
|
||||
.name = "gennvm",
|
||||
.version = {0, 1, 0},
|
||||
|
||||
.register_mgr = gennvm_register,
|
||||
.unregister_mgr = gennvm_unregister,
|
||||
.register_mgr = gennvm_register,
|
||||
.unregister_mgr = gennvm_unregister,
|
||||
|
||||
.get_blk = gennvm_get_blk,
|
||||
.put_blk = gennvm_put_blk,
|
||||
.get_blk_unlocked = gennvm_get_blk_unlocked,
|
||||
.put_blk_unlocked = gennvm_put_blk_unlocked,
|
||||
|
||||
.submit_io = gennvm_submit_io,
|
||||
.end_io = gennvm_end_io,
|
||||
.erase_blk = gennvm_erase_blk,
|
||||
.get_blk = gennvm_get_blk,
|
||||
.put_blk = gennvm_put_blk,
|
||||
|
||||
.get_lun = gennvm_get_lun,
|
||||
.lun_info_print = gennvm_lun_info_print,
|
||||
.submit_io = gennvm_submit_io,
|
||||
.erase_blk = gennvm_erase_blk,
|
||||
|
||||
.get_lun = gennvm_get_lun,
|
||||
.lun_info_print = gennvm_lun_info_print,
|
||||
};
|
||||
|
||||
static int __init gennvm_module_init(void)
|
||||
|
@ -179,16 +179,23 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
|
||||
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct nvm_lun *lun = rlun->parent;
|
||||
struct nvm_block *blk;
|
||||
struct rrpc_block *rblk;
|
||||
|
||||
blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
|
||||
if (!blk)
|
||||
spin_lock(&lun->lock);
|
||||
blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
|
||||
if (!blk) {
|
||||
pr_err("nvm: rrpc: cannot get new block from media manager\n");
|
||||
spin_unlock(&lun->lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rblk = &rlun->blocks[blk->id];
|
||||
blk->priv = rblk;
|
||||
list_add_tail(&rblk->list, &rlun->open_list);
|
||||
spin_unlock(&lun->lock);
|
||||
|
||||
blk->priv = rblk;
|
||||
bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
|
||||
rblk->next_page = 0;
|
||||
rblk->nr_invalid_pages = 0;
|
||||
@ -199,7 +206,13 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
|
||||
|
||||
static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
|
||||
{
|
||||
nvm_put_blk(rrpc->dev, rblk->parent);
|
||||
struct rrpc_lun *rlun = rblk->rlun;
|
||||
struct nvm_lun *lun = rlun->parent;
|
||||
|
||||
spin_lock(&lun->lock);
|
||||
nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
|
||||
list_del(&rblk->list);
|
||||
spin_unlock(&lun->lock);
|
||||
}
|
||||
|
||||
static void rrpc_put_blks(struct rrpc *rrpc)
|
||||
@ -287,6 +300,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
|
||||
}
|
||||
|
||||
page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
while ((slot = find_first_zero_bit(rblk->invalid_pages,
|
||||
nr_pgs_per_blk)) < nr_pgs_per_blk) {
|
||||
@ -328,6 +343,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
|
||||
goto finished;
|
||||
}
|
||||
wait_for_completion_io(&wait);
|
||||
if (bio->bi_error) {
|
||||
rrpc_inflight_laddr_release(rrpc, rqd);
|
||||
goto finished;
|
||||
}
|
||||
|
||||
bio_reset(bio);
|
||||
reinit_completion(&wait);
|
||||
@ -350,6 +369,8 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
|
||||
wait_for_completion_io(&wait);
|
||||
|
||||
rrpc_inflight_laddr_release(rrpc, rqd);
|
||||
if (bio->bi_error)
|
||||
goto finished;
|
||||
|
||||
bio_reset(bio);
|
||||
}
|
||||
@ -373,16 +394,26 @@ static void rrpc_block_gc(struct work_struct *work)
|
||||
struct rrpc *rrpc = gcb->rrpc;
|
||||
struct rrpc_block *rblk = gcb->rblk;
|
||||
struct nvm_dev *dev = rrpc->dev;
|
||||
struct nvm_lun *lun = rblk->parent->lun;
|
||||
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
|
||||
|
||||
mempool_free(gcb, rrpc->gcb_pool);
|
||||
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
|
||||
|
||||
if (rrpc_move_valid_pages(rrpc, rblk))
|
||||
goto done;
|
||||
goto put_back;
|
||||
|
||||
if (nvm_erase_blk(dev, rblk->parent))
|
||||
goto put_back;
|
||||
|
||||
nvm_erase_blk(dev, rblk->parent);
|
||||
rrpc_put_blk(rrpc, rblk);
|
||||
done:
|
||||
mempool_free(gcb, rrpc->gcb_pool);
|
||||
|
||||
return;
|
||||
|
||||
put_back:
|
||||
spin_lock(&rlun->lock);
|
||||
list_add_tail(&rblk->prio, &rlun->prio_list);
|
||||
spin_unlock(&rlun->lock);
|
||||
}
|
||||
|
||||
/* the block with highest number of invalid pages, will be in the beginning
|
||||
@ -427,7 +458,7 @@ static void rrpc_lun_gc(struct work_struct *work)
|
||||
if (nr_blocks_need < rrpc->nr_luns)
|
||||
nr_blocks_need = rrpc->nr_luns;
|
||||
|
||||
spin_lock(&lun->lock);
|
||||
spin_lock(&rlun->lock);
|
||||
while (nr_blocks_need > lun->nr_free_blocks &&
|
||||
!list_empty(&rlun->prio_list)) {
|
||||
struct rrpc_block *rblock = block_prio_find_max(rlun);
|
||||
@ -436,16 +467,16 @@ static void rrpc_lun_gc(struct work_struct *work)
|
||||
if (!rblock->nr_invalid_pages)
|
||||
break;
|
||||
|
||||
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
|
||||
if (!gcb)
|
||||
break;
|
||||
|
||||
list_del_init(&rblock->prio);
|
||||
|
||||
BUG_ON(!block_is_full(rrpc, rblock));
|
||||
|
||||
pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
|
||||
|
||||
gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
|
||||
if (!gcb)
|
||||
break;
|
||||
|
||||
gcb->rrpc = rrpc;
|
||||
gcb->rblk = rblock;
|
||||
INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
|
||||
@ -454,7 +485,7 @@ static void rrpc_lun_gc(struct work_struct *work)
|
||||
|
||||
nr_blocks_need--;
|
||||
}
|
||||
spin_unlock(&lun->lock);
|
||||
spin_unlock(&rlun->lock);
|
||||
|
||||
/* TODO: Hint that request queue can be started again */
|
||||
}
|
||||
@ -635,12 +666,24 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
|
||||
lun = rblk->parent->lun;
|
||||
|
||||
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
|
||||
if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
|
||||
if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
|
||||
struct nvm_block *blk = rblk->parent;
|
||||
struct rrpc_lun *rlun = rblk->rlun;
|
||||
|
||||
spin_lock(&lun->lock);
|
||||
lun->nr_open_blocks--;
|
||||
lun->nr_closed_blocks++;
|
||||
blk->state &= ~NVM_BLK_ST_OPEN;
|
||||
blk->state |= NVM_BLK_ST_CLOSED;
|
||||
list_move_tail(&rblk->list, &rlun->closed_list);
|
||||
spin_unlock(&lun->lock);
|
||||
|
||||
rrpc_run_gc(rrpc, rblk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
||||
static void rrpc_end_io(struct nvm_rq *rqd)
|
||||
{
|
||||
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
|
||||
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
|
||||
@ -650,11 +693,12 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
||||
if (bio_data_dir(rqd->bio) == WRITE)
|
||||
rrpc_end_io_write(rrpc, rrqd, laddr, npages);
|
||||
|
||||
bio_put(rqd->bio);
|
||||
|
||||
if (rrqd->flags & NVM_IOTYPE_GC)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
rrpc_unlock_rq(rrpc, rqd);
|
||||
bio_put(rqd->bio);
|
||||
|
||||
if (npages > 1)
|
||||
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
|
||||
@ -662,8 +706,6 @@ static int rrpc_end_io(struct nvm_rq *rqd, int error)
|
||||
nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
|
||||
|
||||
mempool_free(rqd, rrpc->rq_pool);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
|
||||
@ -841,6 +883,13 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
|
||||
err = nvm_submit_io(rrpc->dev, rqd);
|
||||
if (err) {
|
||||
pr_err("rrpc: I/O submission failed: %d\n", err);
|
||||
bio_put(bio);
|
||||
if (!(flags & NVM_IOTYPE_GC)) {
|
||||
rrpc_unlock_rq(rrpc, rqd);
|
||||
if (rqd->nr_pages > 1)
|
||||
nvm_dev_dma_free(rrpc->dev,
|
||||
rqd->ppa_list, rqd->dma_ppa_list);
|
||||
}
|
||||
return NVM_IO_ERR;
|
||||
}
|
||||
|
||||
@ -1090,6 +1139,11 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
|
||||
struct rrpc_lun *rlun;
|
||||
int i, j;
|
||||
|
||||
if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
|
||||
pr_err("rrpc: number of pages per block too high.");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_init(&rrpc->rev_lock);
|
||||
|
||||
rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
|
||||
@ -1101,16 +1155,13 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
|
||||
for (i = 0; i < rrpc->nr_luns; i++) {
|
||||
struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
|
||||
|
||||
if (dev->pgs_per_blk >
|
||||
MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
|
||||
pr_err("rrpc: number of pages per block too high.");
|
||||
goto err;
|
||||
}
|
||||
|
||||
rlun = &rrpc->luns[i];
|
||||
rlun->rrpc = rrpc;
|
||||
rlun->parent = lun;
|
||||
INIT_LIST_HEAD(&rlun->prio_list);
|
||||
INIT_LIST_HEAD(&rlun->open_list);
|
||||
INIT_LIST_HEAD(&rlun->closed_list);
|
||||
|
||||
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
|
||||
spin_lock_init(&rlun->lock);
|
||||
|
||||
@ -1127,6 +1178,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
|
||||
struct nvm_block *blk = &lun->blocks[j];
|
||||
|
||||
rblk->parent = blk;
|
||||
rblk->rlun = rlun;
|
||||
INIT_LIST_HEAD(&rblk->prio);
|
||||
spin_lock_init(&rblk->lock);
|
||||
}
|
||||
|
@ -54,7 +54,9 @@ struct rrpc_rq {
|
||||
|
||||
struct rrpc_block {
|
||||
struct nvm_block *parent;
|
||||
struct rrpc_lun *rlun;
|
||||
struct list_head prio;
|
||||
struct list_head list;
|
||||
|
||||
#define MAX_INVALID_PAGES_STORAGE 8
|
||||
/* Bitmap for invalid page intries */
|
||||
@ -73,7 +75,16 @@ struct rrpc_lun {
|
||||
struct nvm_lun *parent;
|
||||
struct rrpc_block *cur, *gc_cur;
|
||||
struct rrpc_block *blocks; /* Reference to block allocation */
|
||||
struct list_head prio_list; /* Blocks that may be GC'ed */
|
||||
|
||||
struct list_head prio_list; /* Blocks that may be GC'ed */
|
||||
struct list_head open_list; /* In-use open blocks. These are blocks
|
||||
* that can be both written to and read
|
||||
* from
|
||||
*/
|
||||
struct list_head closed_list; /* In-use closed blocks. These are
|
||||
* blocks that can _only_ be read from
|
||||
*/
|
||||
|
||||
struct work_struct ws_gc;
|
||||
|
||||
spinlock_t lock;
|
||||
|
741
drivers/lightnvm/sysblk.c
Normal file
741
drivers/lightnvm/sysblk.c
Normal file
@ -0,0 +1,741 @@
|
||||
/*
|
||||
* Copyright (C) 2015 Matias Bjorling. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
|
||||
* USA.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/lightnvm.h>
|
||||
|
||||
#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
|
||||
#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
|
||||
* enables ~1.5M updates per sysblk unit
|
||||
*/
|
||||
|
||||
struct sysblk_scan {
|
||||
/* A row is a collection of flash blocks for a system block. */
|
||||
int nr_rows;
|
||||
int row;
|
||||
int act_blk[MAX_SYSBLKS];
|
||||
|
||||
int nr_ppas;
|
||||
struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
|
||||
};
|
||||
|
||||
static inline int scan_ppa_idx(int row, int blkid)
|
||||
{
|
||||
return (row * MAX_BLKS_PR_SYSBLK) + blkid;
|
||||
}
|
||||
|
||||
void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
|
||||
{
|
||||
info->seqnr = be32_to_cpu(sb->seqnr);
|
||||
info->erase_cnt = be32_to_cpu(sb->erase_cnt);
|
||||
info->version = be16_to_cpu(sb->version);
|
||||
strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
|
||||
info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
|
||||
}
|
||||
|
||||
void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
|
||||
{
|
||||
sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
|
||||
sb->seqnr = cpu_to_be32(info->seqnr);
|
||||
sb->erase_cnt = cpu_to_be32(info->erase_cnt);
|
||||
sb->version = cpu_to_be16(info->version);
|
||||
strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
|
||||
sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
|
||||
}
|
||||
|
||||
static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
|
||||
{
|
||||
int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_rows; i++)
|
||||
sysblk_ppas[i].ppa = 0;
|
||||
|
||||
/* if possible, place sysblk at first channel, middle channel and last
|
||||
* channel of the device. If not, create only one or two sys blocks
|
||||
*/
|
||||
switch (dev->nr_chnls) {
|
||||
case 2:
|
||||
sysblk_ppas[1].g.ch = 1;
|
||||
/* fall-through */
|
||||
case 1:
|
||||
sysblk_ppas[0].g.ch = 0;
|
||||
break;
|
||||
default:
|
||||
sysblk_ppas[0].g.ch = 0;
|
||||
sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
|
||||
sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
|
||||
break;
|
||||
}
|
||||
|
||||
return nr_rows;
|
||||
}
|
||||
|
||||
void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
|
||||
struct ppa_addr *sysblk_ppas)
|
||||
{
|
||||
memset(s, 0, sizeof(struct sysblk_scan));
|
||||
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
|
||||
}
|
||||
|
||||
static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
|
||||
void *private)
|
||||
{
|
||||
struct sysblk_scan *s = private;
|
||||
int i, nr_sysblk = 0;
|
||||
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
if (blks[i] != NVM_BLK_T_HOST)
|
||||
continue;
|
||||
|
||||
if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
|
||||
pr_err("nvm: too many host blks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ppa.g.blk = i;
|
||||
|
||||
s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
|
||||
s->nr_ppas++;
|
||||
nr_sysblk++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
|
||||
struct ppa_addr *ppas, nvm_bb_update_fn *fn)
|
||||
{
|
||||
struct ppa_addr dppa;
|
||||
int i, ret;
|
||||
|
||||
s->nr_ppas = 0;
|
||||
|
||||
for (i = 0; i < s->nr_rows; i++) {
|
||||
dppa = generic_to_dev_addr(dev, ppas[i]);
|
||||
s->row = i;
|
||||
|
||||
ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
|
||||
if (ret) {
|
||||
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
|
||||
ppas[i].g.ch,
|
||||
ppas[i].g.blk);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* scans a block for latest sysblk.
|
||||
* Returns:
|
||||
* 0 - newer sysblk not found. PPA is updated to latest page.
|
||||
* 1 - newer sysblk found and stored in *cur. PPA is updated to
|
||||
* next valid page.
|
||||
* <0- error.
|
||||
*/
|
||||
static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
|
||||
struct nvm_system_block *sblk)
|
||||
{
|
||||
struct nvm_system_block *cur;
|
||||
int pg, cursz, ret, found = 0;
|
||||
|
||||
/* the full buffer for a flash page is allocated. Only the first of it
|
||||
* contains the system block information
|
||||
*/
|
||||
cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
|
||||
cur = kmalloc(cursz, GFP_KERNEL);
|
||||
if (!cur)
|
||||
return -ENOMEM;
|
||||
|
||||
/* perform linear scan through the block */
|
||||
for (pg = 0; pg < dev->lps_per_blk; pg++) {
|
||||
ppa->g.pg = ppa_to_slc(dev, pg);
|
||||
|
||||
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
|
||||
cur, cursz);
|
||||
if (ret) {
|
||||
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
|
||||
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
|
||||
ppa->g.ch,
|
||||
ppa->g.lun,
|
||||
ppa->g.blk,
|
||||
ppa->g.pg);
|
||||
break;
|
||||
}
|
||||
pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
|
||||
ret,
|
||||
ppa->g.ch,
|
||||
ppa->g.lun,
|
||||
ppa->g.blk,
|
||||
ppa->g.pg);
|
||||
break; /* if we can't read a page, continue to the
|
||||
* next blk
|
||||
*/
|
||||
}
|
||||
|
||||
if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
|
||||
pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
|
||||
ppa->g.ch,
|
||||
ppa->g.lun,
|
||||
ppa->g.blk,
|
||||
ppa->g.pg);
|
||||
break; /* last valid page already found */
|
||||
}
|
||||
|
||||
if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
|
||||
continue;
|
||||
|
||||
memcpy(sblk, cur, sizeof(struct nvm_system_block));
|
||||
found = 1;
|
||||
}
|
||||
|
||||
kfree(cur);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
|
||||
{
|
||||
struct nvm_rq rqd;
|
||||
int ret;
|
||||
|
||||
if (s->nr_ppas > dev->ops->max_phys_sect) {
|
||||
pr_err("nvm: unable to update all sysblocks atomically\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&rqd, 0, sizeof(struct nvm_rq));
|
||||
|
||||
nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
|
||||
nvm_generic_to_addr_mode(dev, &rqd);
|
||||
|
||||
ret = dev->ops->set_bb_tbl(dev, &rqd, type);
|
||||
nvm_free_rqd_ppalist(dev, &rqd);
|
||||
if (ret) {
|
||||
pr_err("nvm: sysblk failed bb mark\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
|
||||
void *private)
|
||||
{
|
||||
struct sysblk_scan *s = private;
|
||||
struct ppa_addr *sppa;
|
||||
int i, blkid = 0;
|
||||
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
if (blks[i] == NVM_BLK_T_HOST)
|
||||
return -EEXIST;
|
||||
|
||||
if (blks[i] != NVM_BLK_T_FREE)
|
||||
continue;
|
||||
|
||||
sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
|
||||
sppa->g.ch = ppa.g.ch;
|
||||
sppa->g.lun = ppa.g.lun;
|
||||
sppa->g.blk = i;
|
||||
s->nr_ppas++;
|
||||
blkid++;
|
||||
|
||||
pr_debug("nvm: use (%u %u %u) as sysblk\n",
|
||||
sppa->g.ch, sppa->g.lun, sppa->g.blk);
|
||||
if (blkid > MAX_BLKS_PR_SYSBLK - 1)
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("nvm: sysblk failed get sysblk\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
|
||||
struct sysblk_scan *s)
|
||||
{
|
||||
struct nvm_system_block nvmsb;
|
||||
void *buf;
|
||||
int i, sect, ret, bufsz;
|
||||
struct ppa_addr *ppas;
|
||||
|
||||
nvm_cpu_to_sysblk(&nvmsb, info);
|
||||
|
||||
/* buffer for flash page */
|
||||
bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
|
||||
buf = kzalloc(bufsz, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
|
||||
|
||||
ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
|
||||
if (!ppas) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Write and verify */
|
||||
for (i = 0; i < s->nr_rows; i++) {
|
||||
ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
|
||||
|
||||
pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
|
||||
ppas[0].g.ch,
|
||||
ppas[0].g.lun,
|
||||
ppas[0].g.blk,
|
||||
ppas[0].g.pg);
|
||||
|
||||
/* Expand to all sectors within a flash page */
|
||||
if (dev->sec_per_pg > 1) {
|
||||
for (sect = 1; sect < dev->sec_per_pg; sect++) {
|
||||
ppas[sect].ppa = ppas[0].ppa;
|
||||
ppas[sect].g.sec = sect;
|
||||
}
|
||||
}
|
||||
|
||||
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
|
||||
NVM_IO_SLC_MODE, buf, bufsz);
|
||||
if (ret) {
|
||||
pr_err("nvm: sysblk failed program (%u %u %u)\n",
|
||||
ppas[0].g.ch,
|
||||
ppas[0].g.lun,
|
||||
ppas[0].g.blk);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
|
||||
NVM_IO_SLC_MODE, buf, bufsz);
|
||||
if (ret) {
|
||||
pr_err("nvm: sysblk failed read (%u %u %u)\n",
|
||||
ppas[0].g.ch,
|
||||
ppas[0].g.lun,
|
||||
ppas[0].g.blk);
|
||||
break;
|
||||
}
|
||||
|
||||
if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
|
||||
pr_err("nvm: sysblk failed verify (%u %u %u)\n",
|
||||
ppas[0].g.ch,
|
||||
ppas[0].g.lun,
|
||||
ppas[0].g.blk);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(ppas);
|
||||
err:
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
|
||||
{
|
||||
int i, ret;
|
||||
unsigned long nxt_blk;
|
||||
struct ppa_addr *ppa;
|
||||
|
||||
for (i = 0; i < s->nr_rows; i++) {
|
||||
nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
|
||||
ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
|
||||
ppa->g.pg = ppa_to_slc(dev, 0);
|
||||
|
||||
ret = nvm_erase_ppa(dev, ppa, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
s->act_blk[i] = nxt_blk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
|
||||
{
|
||||
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
|
||||
struct sysblk_scan s;
|
||||
struct nvm_system_block *cur;
|
||||
int i, j, found = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/*
|
||||
* 1. setup sysblk locations
|
||||
* 2. get bad block list
|
||||
* 3. filter on host-specific (type 3)
|
||||
* 4. iterate through all and find the highest seq nr.
|
||||
* 5. return superblock information
|
||||
*/
|
||||
|
||||
if (!dev->ops->get_bb_tbl)
|
||||
return -EINVAL;
|
||||
|
||||
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
|
||||
|
||||
mutex_lock(&dev->mlock);
|
||||
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
|
||||
if (ret)
|
||||
goto err_sysblk;
|
||||
|
||||
/* no sysblocks initialized */
|
||||
if (!s.nr_ppas)
|
||||
goto err_sysblk;
|
||||
|
||||
cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
|
||||
if (!cur)
|
||||
goto err_sysblk;
|
||||
|
||||
/* find the latest block across all sysblocks */
|
||||
for (i = 0; i < s.nr_rows; i++) {
|
||||
for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
|
||||
struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
|
||||
|
||||
ret = nvm_scan_block(dev, &ppa, cur);
|
||||
if (ret > 0)
|
||||
found = 1;
|
||||
else if (ret < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nvm_sysblk_to_cpu(info, cur);
|
||||
|
||||
kfree(cur);
|
||||
err_sysblk:
|
||||
mutex_unlock(&dev->mlock);
|
||||
|
||||
if (found)
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
|
||||
{
|
||||
/* 1. for each latest superblock
|
||||
* 2. if room
|
||||
* a. write new flash page entry with the updated information
|
||||
* 3. if no room
|
||||
* a. find next available block on lun (linear search)
|
||||
* if none, continue to next lun
|
||||
* if none at all, report error. also report that it wasn't
|
||||
* possible to write to all superblocks.
|
||||
* c. write data to block.
|
||||
*/
|
||||
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
|
||||
struct sysblk_scan s;
|
||||
struct nvm_system_block *cur;
|
||||
int i, j, ppaidx, found = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (!dev->ops->get_bb_tbl)
|
||||
return -EINVAL;
|
||||
|
||||
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
|
||||
|
||||
mutex_lock(&dev->mlock);
|
||||
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
|
||||
if (ret)
|
||||
goto err_sysblk;
|
||||
|
||||
cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
|
||||
if (!cur)
|
||||
goto err_sysblk;
|
||||
|
||||
/* Get the latest sysblk for each sysblk row */
|
||||
for (i = 0; i < s.nr_rows; i++) {
|
||||
found = 0;
|
||||
for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
|
||||
ppaidx = scan_ppa_idx(i, j);
|
||||
ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
|
||||
if (ret > 0) {
|
||||
s.act_blk[i] = j;
|
||||
found = 1;
|
||||
} else if (ret < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
pr_err("nvm: no valid sysblks found to update\n");
|
||||
ret = -EINVAL;
|
||||
goto err_cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* All sysblocks found. Check that they have same page id in their flash
|
||||
* blocks
|
||||
*/
|
||||
for (i = 1; i < s.nr_rows; i++) {
|
||||
struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
|
||||
struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
|
||||
|
||||
if (l.g.pg != r.g.pg) {
|
||||
pr_err("nvm: sysblks not on same page. Previous update failed.\n");
|
||||
ret = -EINVAL;
|
||||
goto err_cur;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that there haven't been another update to the seqnr since we
|
||||
* began
|
||||
*/
|
||||
if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
|
||||
pr_err("nvm: seq is not sequential\n");
|
||||
ret = -EINVAL;
|
||||
goto err_cur;
|
||||
}
|
||||
|
||||
/*
|
||||
* When all pages in a block has been written, a new block is selected
|
||||
* and writing is performed on the new block.
|
||||
*/
|
||||
if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
|
||||
dev->lps_per_blk - 1) {
|
||||
ret = nvm_prepare_new_sysblks(dev, &s);
|
||||
if (ret)
|
||||
goto err_cur;
|
||||
}
|
||||
|
||||
ret = nvm_write_and_verify(dev, new, &s);
|
||||
err_cur:
|
||||
kfree(cur);
|
||||
err_sysblk:
|
||||
mutex_unlock(&dev->mlock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
|
||||
{
|
||||
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
|
||||
struct sysblk_scan s;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* 1. select master blocks and select first available blks
|
||||
* 2. get bad block list
|
||||
* 3. mark MAX_SYSBLKS block as host-based device allocated.
|
||||
* 4. write and verify data to block
|
||||
*/
|
||||
|
||||
if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
|
||||
pr_err("nvm: memory does not support SLC access\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Index all sysblocks and mark them as host-driven */
|
||||
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
|
||||
|
||||
mutex_lock(&dev->mlock);
|
||||
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
|
||||
if (ret)
|
||||
goto err_mark;
|
||||
|
||||
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
|
||||
if (ret)
|
||||
goto err_mark;
|
||||
|
||||
/* Write to the first block of each row */
|
||||
ret = nvm_write_and_verify(dev, info, &s);
|
||||
err_mark:
|
||||
mutex_unlock(&dev->mlock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct factory_blks {
|
||||
struct nvm_dev *dev;
|
||||
int flags;
|
||||
unsigned long *blks;
|
||||
};
|
||||
|
||||
static int factory_nblks(int nblks)
|
||||
{
|
||||
/* Round up to nearest BITS_PER_LONG */
|
||||
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
|
||||
}
|
||||
|
||||
static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
|
||||
{
|
||||
int nblks = factory_nblks(dev->blks_per_lun);
|
||||
|
||||
return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
|
||||
BITS_PER_LONG;
|
||||
}
|
||||
|
||||
static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
|
||||
void *private)
|
||||
{
|
||||
struct factory_blks *f = private;
|
||||
struct nvm_dev *dev = f->dev;
|
||||
int i, lunoff;
|
||||
|
||||
lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
|
||||
|
||||
/* non-set bits correspond to the block must be erased */
|
||||
for (i = 0; i < nr_blks; i++) {
|
||||
switch (blks[i]) {
|
||||
case NVM_BLK_T_FREE:
|
||||
if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
|
||||
set_bit(i, &f->blks[lunoff]);
|
||||
break;
|
||||
case NVM_BLK_T_HOST:
|
||||
if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
|
||||
set_bit(i, &f->blks[lunoff]);
|
||||
break;
|
||||
case NVM_BLK_T_GRWN_BAD:
|
||||
if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
|
||||
set_bit(i, &f->blks[lunoff]);
|
||||
break;
|
||||
default:
|
||||
set_bit(i, &f->blks[lunoff]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
|
||||
int max_ppas, struct factory_blks *f)
|
||||
{
|
||||
struct ppa_addr ppa;
|
||||
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
|
||||
unsigned long *offset;
|
||||
|
||||
while (!done) {
|
||||
done = 1;
|
||||
for (ch = 0; ch < dev->nr_chnls; ch++) {
|
||||
for (lun = 0; lun < dev->luns_per_chnl; lun++) {
|
||||
idx = factory_blk_offset(dev, ch, lun);
|
||||
offset = &f->blks[idx];
|
||||
|
||||
blkid = find_first_zero_bit(offset,
|
||||
dev->blks_per_lun);
|
||||
if (blkid >= dev->blks_per_lun)
|
||||
continue;
|
||||
set_bit(blkid, offset);
|
||||
|
||||
ppa.ppa = 0;
|
||||
ppa.g.ch = ch;
|
||||
ppa.g.lun = lun;
|
||||
ppa.g.blk = blkid;
|
||||
pr_debug("nvm: erase ppa (%u %u %u)\n",
|
||||
ppa.g.ch,
|
||||
ppa.g.lun,
|
||||
ppa.g.blk);
|
||||
|
||||
erase_list[ppa_cnt] = ppa;
|
||||
ppa_cnt++;
|
||||
done = 0;
|
||||
|
||||
if (ppa_cnt == max_ppas)
|
||||
return ppa_cnt;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ppa_cnt;
|
||||
}
|
||||
|
||||
static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
|
||||
nvm_bb_update_fn *fn, void *priv)
|
||||
{
|
||||
struct ppa_addr dev_ppa;
|
||||
int ret;
|
||||
|
||||
dev_ppa = generic_to_dev_addr(dev, ppa);
|
||||
|
||||
ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
|
||||
if (ret)
|
||||
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
|
||||
ppa.g.ch, ppa.g.blk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
|
||||
{
|
||||
int ch, lun, ret;
|
||||
struct ppa_addr ppa;
|
||||
|
||||
ppa.ppa = 0;
|
||||
for (ch = 0; ch < dev->nr_chnls; ch++) {
|
||||
for (lun = 0; lun < dev->luns_per_chnl; lun++) {
|
||||
ppa.g.ch = ch;
|
||||
ppa.g.lun = lun;
|
||||
|
||||
ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
|
||||
f);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvm_dev_factory(struct nvm_dev *dev, int flags)
|
||||
{
|
||||
struct factory_blks f;
|
||||
struct ppa_addr *ppas;
|
||||
int ppa_cnt, ret = -ENOMEM;
|
||||
int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
|
||||
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
|
||||
struct sysblk_scan s;
|
||||
|
||||
f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
|
||||
GFP_KERNEL);
|
||||
if (!f.blks)
|
||||
return ret;
|
||||
|
||||
ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
|
||||
if (!ppas)
|
||||
goto err_blks;
|
||||
|
||||
f.dev = dev;
|
||||
f.flags = flags;
|
||||
|
||||
/* create list of blks to be erased */
|
||||
ret = nvm_fact_select_blks(dev, &f);
|
||||
if (ret)
|
||||
goto err_ppas;
|
||||
|
||||
/* continue to erase until list of blks until empty */
|
||||
while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
|
||||
nvm_erase_ppa(dev, ppas, ppa_cnt);
|
||||
|
||||
/* mark host reserved blocks free */
|
||||
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
|
||||
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
|
||||
mutex_lock(&dev->mlock);
|
||||
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
|
||||
sysblk_get_host_blks);
|
||||
if (!ret)
|
||||
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
|
||||
mutex_unlock(&dev->mlock);
|
||||
}
|
||||
err_ppas:
|
||||
kfree(ppas);
|
||||
err_blks:
|
||||
kfree(f.blks);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(nvm_dev_factory);
|
@ -146,6 +146,16 @@ struct nvme_nvm_command {
|
||||
};
|
||||
};
|
||||
|
||||
struct nvme_nvm_lp_mlc {
|
||||
__u16 num_pairs;
|
||||
__u8 pairs[886];
|
||||
};
|
||||
|
||||
struct nvme_nvm_lp_tbl {
|
||||
__u8 id[8];
|
||||
struct nvme_nvm_lp_mlc mlc;
|
||||
};
|
||||
|
||||
struct nvme_nvm_id_group {
|
||||
__u8 mtype;
|
||||
__u8 fmtype;
|
||||
@ -169,7 +179,8 @@ struct nvme_nvm_id_group {
|
||||
__le32 mpos;
|
||||
__le32 mccap;
|
||||
__le16 cpar;
|
||||
__u8 reserved[906];
|
||||
__u8 reserved[10];
|
||||
struct nvme_nvm_lp_tbl lptbl;
|
||||
} __packed;
|
||||
|
||||
struct nvme_nvm_addr_format {
|
||||
@ -266,6 +277,15 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
|
||||
dst->mccap = le32_to_cpu(src->mccap);
|
||||
|
||||
dst->cpar = le16_to_cpu(src->cpar);
|
||||
|
||||
if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
|
||||
memcpy(dst->lptbl.id, src->lptbl.id, 8);
|
||||
dst->lptbl.mlc.num_pairs =
|
||||
le16_to_cpu(src->lptbl.mlc.num_pairs);
|
||||
/* 4 bits per pair */
|
||||
memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
|
||||
dst->lptbl.mlc.num_pairs >> 1);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -405,11 +425,6 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
|
||||
ppa = dev_to_generic_addr(nvmdev, ppa);
|
||||
ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
|
||||
if (ret) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(bb_tbl);
|
||||
return ret;
|
||||
@ -453,11 +468,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
|
||||
static void nvme_nvm_end_io(struct request *rq, int error)
|
||||
{
|
||||
struct nvm_rq *rqd = rq->end_io_data;
|
||||
struct nvm_dev *dev = rqd->dev;
|
||||
|
||||
if (dev->mt && dev->mt->end_io(rqd, error))
|
||||
pr_err("nvme: err status: %x result: %lx\n",
|
||||
rq->errors, (unsigned long)rq->special);
|
||||
nvm_end_io(rqd, error);
|
||||
|
||||
kfree(rq->cmd);
|
||||
blk_mq_free_request(rq);
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef NVM_H
|
||||
#define NVM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
enum {
|
||||
NVM_IO_OK = 0,
|
||||
NVM_IO_REQUEUE = 1,
|
||||
@ -11,12 +13,74 @@ enum {
|
||||
NVM_IOTYPE_GC = 1,
|
||||
};
|
||||
|
||||
#define NVM_BLK_BITS (16)
|
||||
#define NVM_PG_BITS (16)
|
||||
#define NVM_SEC_BITS (8)
|
||||
#define NVM_PL_BITS (8)
|
||||
#define NVM_LUN_BITS (8)
|
||||
#define NVM_CH_BITS (8)
|
||||
|
||||
struct ppa_addr {
|
||||
/* Generic structure for all addresses */
|
||||
union {
|
||||
struct {
|
||||
u64 blk : NVM_BLK_BITS;
|
||||
u64 pg : NVM_PG_BITS;
|
||||
u64 sec : NVM_SEC_BITS;
|
||||
u64 pl : NVM_PL_BITS;
|
||||
u64 lun : NVM_LUN_BITS;
|
||||
u64 ch : NVM_CH_BITS;
|
||||
} g;
|
||||
|
||||
u64 ppa;
|
||||
};
|
||||
};
|
||||
|
||||
struct nvm_rq;
|
||||
struct nvm_id;
|
||||
struct nvm_dev;
|
||||
|
||||
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
|
||||
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
|
||||
nvm_l2p_update_fn *, void *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
|
||||
nvm_bb_update_fn *, void *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||
typedef void (nvm_destroy_dma_pool_fn)(void *);
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
|
||||
dma_addr_t *);
|
||||
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
|
||||
|
||||
struct nvm_dev_ops {
|
||||
nvm_id_fn *identity;
|
||||
nvm_get_l2p_tbl_fn *get_l2p_tbl;
|
||||
nvm_op_bb_tbl_fn *get_bb_tbl;
|
||||
nvm_op_set_bb_fn *set_bb_tbl;
|
||||
|
||||
nvm_submit_io_fn *submit_io;
|
||||
nvm_erase_blk_fn *erase_block;
|
||||
|
||||
nvm_create_dma_pool_fn *create_dma_pool;
|
||||
nvm_destroy_dma_pool_fn *destroy_dma_pool;
|
||||
nvm_dev_dma_alloc_fn *dev_dma_alloc;
|
||||
nvm_dev_dma_free_fn *dev_dma_free;
|
||||
|
||||
unsigned int max_phys_sect;
|
||||
};
|
||||
|
||||
|
||||
|
||||
#ifdef CONFIG_NVM
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <uapi/linux/lightnvm.h>
|
||||
|
||||
enum {
|
||||
/* HW Responsibilities */
|
||||
@ -58,8 +122,29 @@ enum {
|
||||
/* Block Types */
|
||||
NVM_BLK_T_FREE = 0x0,
|
||||
NVM_BLK_T_BAD = 0x1,
|
||||
NVM_BLK_T_DEV = 0x2,
|
||||
NVM_BLK_T_HOST = 0x4,
|
||||
NVM_BLK_T_GRWN_BAD = 0x2,
|
||||
NVM_BLK_T_DEV = 0x4,
|
||||
NVM_BLK_T_HOST = 0x8,
|
||||
|
||||
/* Memory capabilities */
|
||||
NVM_ID_CAP_SLC = 0x1,
|
||||
NVM_ID_CAP_CMD_SUSPEND = 0x2,
|
||||
NVM_ID_CAP_SCRAMBLE = 0x4,
|
||||
NVM_ID_CAP_ENCRYPT = 0x8,
|
||||
|
||||
/* Memory types */
|
||||
NVM_ID_FMTYPE_SLC = 0,
|
||||
NVM_ID_FMTYPE_MLC = 1,
|
||||
};
|
||||
|
||||
struct nvm_id_lp_mlc {
|
||||
u16 num_pairs;
|
||||
u8 pairs[886];
|
||||
};
|
||||
|
||||
struct nvm_id_lp_tbl {
|
||||
__u8 id[8];
|
||||
struct nvm_id_lp_mlc mlc;
|
||||
};
|
||||
|
||||
struct nvm_id_group {
|
||||
@ -82,6 +167,8 @@ struct nvm_id_group {
|
||||
u32 mpos;
|
||||
u32 mccap;
|
||||
u16 cpar;
|
||||
|
||||
struct nvm_id_lp_tbl lptbl;
|
||||
};
|
||||
|
||||
struct nvm_addr_format {
|
||||
@ -125,28 +212,8 @@ struct nvm_tgt_instance {
|
||||
#define NVM_VERSION_MINOR 0
|
||||
#define NVM_VERSION_PATCH 0
|
||||
|
||||
#define NVM_BLK_BITS (16)
|
||||
#define NVM_PG_BITS (16)
|
||||
#define NVM_SEC_BITS (8)
|
||||
#define NVM_PL_BITS (8)
|
||||
#define NVM_LUN_BITS (8)
|
||||
#define NVM_CH_BITS (8)
|
||||
|
||||
struct ppa_addr {
|
||||
/* Generic structure for all addresses */
|
||||
union {
|
||||
struct {
|
||||
u64 blk : NVM_BLK_BITS;
|
||||
u64 pg : NVM_PG_BITS;
|
||||
u64 sec : NVM_SEC_BITS;
|
||||
u64 pl : NVM_PL_BITS;
|
||||
u64 lun : NVM_LUN_BITS;
|
||||
u64 ch : NVM_CH_BITS;
|
||||
} g;
|
||||
|
||||
u64 ppa;
|
||||
};
|
||||
};
|
||||
struct nvm_rq;
|
||||
typedef void (nvm_end_io_fn)(struct nvm_rq *);
|
||||
|
||||
struct nvm_rq {
|
||||
struct nvm_tgt_instance *ins;
|
||||
@ -164,9 +231,14 @@ struct nvm_rq {
|
||||
void *metadata;
|
||||
dma_addr_t dma_metadata;
|
||||
|
||||
struct completion *wait;
|
||||
nvm_end_io_fn *end_io;
|
||||
|
||||
uint8_t opcode;
|
||||
uint16_t nr_pages;
|
||||
uint16_t flags;
|
||||
|
||||
int error;
|
||||
};
|
||||
|
||||
static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
|
||||
@ -181,51 +253,31 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
|
||||
|
||||
struct nvm_block;
|
||||
|
||||
typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
|
||||
typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
|
||||
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
|
||||
typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
|
||||
nvm_l2p_update_fn *, void *);
|
||||
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
|
||||
nvm_bb_update_fn *, void *);
|
||||
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
|
||||
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
|
||||
typedef void (nvm_destroy_dma_pool_fn)(void *);
|
||||
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
|
||||
dma_addr_t *);
|
||||
typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
|
||||
|
||||
struct nvm_dev_ops {
|
||||
nvm_id_fn *identity;
|
||||
nvm_get_l2p_tbl_fn *get_l2p_tbl;
|
||||
nvm_op_bb_tbl_fn *get_bb_tbl;
|
||||
nvm_op_set_bb_fn *set_bb_tbl;
|
||||
|
||||
nvm_submit_io_fn *submit_io;
|
||||
nvm_erase_blk_fn *erase_block;
|
||||
|
||||
nvm_create_dma_pool_fn *create_dma_pool;
|
||||
nvm_destroy_dma_pool_fn *destroy_dma_pool;
|
||||
nvm_dev_dma_alloc_fn *dev_dma_alloc;
|
||||
nvm_dev_dma_free_fn *dev_dma_free;
|
||||
|
||||
unsigned int max_phys_sect;
|
||||
};
|
||||
|
||||
struct nvm_lun {
|
||||
int id;
|
||||
|
||||
int lun_id;
|
||||
int chnl_id;
|
||||
|
||||
unsigned int nr_inuse_blocks; /* Number of used blocks */
|
||||
/* It is up to the target to mark blocks as closed. If the target does
|
||||
* not do it, all blocks are marked as open, and nr_open_blocks
|
||||
* represents the number of blocks in use
|
||||
*/
|
||||
unsigned int nr_open_blocks; /* Number of used, writable blocks */
|
||||
unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
|
||||
unsigned int nr_free_blocks; /* Number of unused blocks */
|
||||
unsigned int nr_bad_blocks; /* Number of bad blocks */
|
||||
struct nvm_block *blocks;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
struct nvm_block *blocks;
|
||||
};
|
||||
|
||||
enum {
|
||||
NVM_BLK_ST_FREE = 0x1, /* Free block */
|
||||
NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
|
||||
NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
|
||||
NVM_BLK_ST_BAD = 0x8, /* Bad block */
|
||||
};
|
||||
|
||||
struct nvm_block {
|
||||
@ -234,7 +286,16 @@ struct nvm_block {
|
||||
unsigned long id;
|
||||
|
||||
void *priv;
|
||||
int type;
|
||||
int state;
|
||||
};
|
||||
|
||||
/* system block cpu representation */
|
||||
struct nvm_sb_info {
|
||||
unsigned long seqnr;
|
||||
unsigned long erase_cnt;
|
||||
unsigned int version;
|
||||
char mmtype[NVM_MMTYPE_LEN];
|
||||
struct ppa_addr fs_ppa;
|
||||
};
|
||||
|
||||
struct nvm_dev {
|
||||
@ -247,6 +308,9 @@ struct nvm_dev {
|
||||
struct nvmm_type *mt;
|
||||
void *mp;
|
||||
|
||||
/* System blocks */
|
||||
struct nvm_sb_info sb;
|
||||
|
||||
/* Device information */
|
||||
int nr_chnls;
|
||||
int nr_planes;
|
||||
@ -256,6 +320,7 @@ struct nvm_dev {
|
||||
int blks_per_lun;
|
||||
int sec_size;
|
||||
int oob_size;
|
||||
int mccap;
|
||||
struct nvm_addr_format ppaf;
|
||||
|
||||
/* Calculated/Cached values. These do not reflect the actual usable
|
||||
@ -268,6 +333,10 @@ struct nvm_dev {
|
||||
int sec_per_blk;
|
||||
int sec_per_lun;
|
||||
|
||||
/* lower page table */
|
||||
int lps_per_blk;
|
||||
int *lptbl;
|
||||
|
||||
unsigned long total_pages;
|
||||
unsigned long total_blocks;
|
||||
int nr_luns;
|
||||
@ -280,6 +349,8 @@ struct nvm_dev {
|
||||
/* Backend device */
|
||||
struct request_queue *q;
|
||||
char name[DISK_NAME_LEN];
|
||||
|
||||
struct mutex mlock;
|
||||
};
|
||||
|
||||
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
|
||||
@ -345,9 +416,13 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
|
||||
return ppa;
|
||||
}
|
||||
|
||||
static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
|
||||
{
|
||||
return dev->lptbl[slc_pg];
|
||||
}
|
||||
|
||||
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
|
||||
typedef sector_t (nvm_tgt_capacity_fn)(void *);
|
||||
typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
|
||||
typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
|
||||
typedef void (nvm_tgt_exit_fn)(void *);
|
||||
|
||||
@ -358,7 +433,7 @@ struct nvm_tgt_type {
|
||||
/* target entry points */
|
||||
nvm_tgt_make_rq_fn *make_rq;
|
||||
nvm_tgt_capacity_fn *capacity;
|
||||
nvm_tgt_end_io_fn *end_io;
|
||||
nvm_end_io_fn *end_io;
|
||||
|
||||
/* module-specific init/teardown */
|
||||
nvm_tgt_init_fn *init;
|
||||
@ -383,7 +458,6 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
||||
typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
||||
typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
|
||||
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
|
||||
typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
|
||||
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
|
||||
unsigned long);
|
||||
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
|
||||
@ -397,6 +471,8 @@ struct nvmm_type {
|
||||
nvmm_unregister_fn *unregister_mgr;
|
||||
|
||||
/* Block administration callbacks */
|
||||
nvmm_get_blk_fn *get_blk_unlocked;
|
||||
nvmm_put_blk_fn *put_blk_unlocked;
|
||||
nvmm_get_blk_fn *get_blk;
|
||||
nvmm_put_blk_fn *put_blk;
|
||||
nvmm_open_blk_fn *open_blk;
|
||||
@ -404,7 +480,6 @@ struct nvmm_type {
|
||||
nvmm_flush_blk_fn *flush_blk;
|
||||
|
||||
nvmm_submit_io_fn *submit_io;
|
||||
nvmm_end_io_fn *end_io;
|
||||
nvmm_erase_blk_fn *erase_blk;
|
||||
|
||||
/* Configuration management */
|
||||
@ -418,6 +493,10 @@ struct nvmm_type {
|
||||
extern int nvm_register_mgr(struct nvmm_type *);
|
||||
extern void nvm_unregister_mgr(struct nvmm_type *);
|
||||
|
||||
extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
|
||||
struct nvm_lun *, unsigned long);
|
||||
extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
|
||||
|
||||
extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
|
||||
unsigned long);
|
||||
extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
|
||||
@ -427,7 +506,36 @@ extern int nvm_register(struct request_queue *, char *,
|
||||
extern void nvm_unregister(char *);
|
||||
|
||||
extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
|
||||
extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
|
||||
extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
|
||||
extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
|
||||
struct ppa_addr *, int);
|
||||
extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
|
||||
extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
|
||||
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
|
||||
extern void nvm_end_io(struct nvm_rq *, int);
|
||||
extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
|
||||
void *, int);
|
||||
|
||||
/* sysblk.c */
|
||||
#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
|
||||
|
||||
/* system block on disk representation */
|
||||
struct nvm_system_block {
|
||||
__be32 magic; /* magic signature */
|
||||
__be32 seqnr; /* sequence number */
|
||||
__be32 erase_cnt; /* erase count */
|
||||
__be16 version; /* version number */
|
||||
u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
|
||||
__be64 fs_ppa; /* PPA for media manager
|
||||
* superblock */
|
||||
};
|
||||
|
||||
extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
|
||||
extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
|
||||
extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
|
||||
|
||||
extern int nvm_dev_factory(struct nvm_dev *, int flags);
|
||||
#else /* CONFIG_NVM */
|
||||
struct nvm_dev_ops;
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
|
||||
#define NVM_TTYPE_NAME_MAX 48
|
||||
#define NVM_TTYPE_MAX 63
|
||||
#define NVM_MMTYPE_LEN 8
|
||||
|
||||
#define NVM_CTRL_FILE "/dev/lightnvm/control"
|
||||
|
||||
@ -100,6 +101,26 @@ struct nvm_ioctl_remove {
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct nvm_ioctl_dev_init {
|
||||
char dev[DISK_NAME_LEN]; /* open-channel SSD device */
|
||||
char mmtype[NVM_MMTYPE_LEN]; /* register to media manager */
|
||||
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
enum {
|
||||
NVM_FACTORY_ERASE_ONLY_USER = 1 << 0, /* erase only blocks used as
|
||||
* host blks or grown blks */
|
||||
NVM_FACTORY_RESET_HOST_BLKS = 1 << 1, /* remove host blk marks */
|
||||
NVM_FACTORY_RESET_GRWN_BBLKS = 1 << 2, /* remove grown blk marks */
|
||||
NVM_FACTORY_NR_BITS = 1 << 3, /* stops here */
|
||||
};
|
||||
|
||||
struct nvm_ioctl_dev_factory {
|
||||
char dev[DISK_NAME_LEN];
|
||||
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
|
||||
enum {
|
||||
@ -110,6 +131,12 @@ enum {
|
||||
/* device level cmds */
|
||||
NVM_DEV_CREATE_CMD,
|
||||
NVM_DEV_REMOVE_CMD,
|
||||
|
||||
/* Init a device to support LightNVM media managers */
|
||||
NVM_DEV_INIT_CMD,
|
||||
|
||||
/* Factory reset device */
|
||||
NVM_DEV_FACTORY_CMD,
|
||||
};
|
||||
|
||||
#define NVM_IOCTL 'L' /* 0x4c */
|
||||
@ -122,6 +149,10 @@ enum {
|
||||
struct nvm_ioctl_create)
|
||||
#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
|
||||
struct nvm_ioctl_remove)
|
||||
#define NVM_DEV_INIT _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
|
||||
struct nvm_ioctl_dev_init)
|
||||
#define NVM_DEV_FACTORY _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
|
||||
struct nvm_ioctl_dev_factory)
|
||||
|
||||
#define NVM_VERSION_MAJOR 1
|
||||
#define NVM_VERSION_MINOR 0
|
||||
|
Loading…
Reference in New Issue
Block a user