UBI: Split __wl_get_peb()
Make it two functions, wl_get_wle() and wl_get_peb(). wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle() does not call produce_free_peb(). While refilling the fastmap user pool we cannot release ubi->wl_lock as produce_free_peb() does. Hence the fastmap logic uses now wl_get_wle(). Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
parent
d59f21bebe
commit
691a870563
|
@ -496,13 +496,46 @@ out:
|
|||
#endif
|
||||
|
||||
/**
|
||||
* __wl_get_peb - get a physical eraseblock.
|
||||
* wl_get_wle - get a mean wl entry to be used by wl_get_peb() or
|
||||
* refill_wl_user_pool().
|
||||
* @ubi: UBI device description object
|
||||
*
|
||||
* This function returns a a wear leveling entry in case of success and
|
||||
* NULL in case of failure.
|
||||
*/
|
||||
static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
|
||||
{
|
||||
struct ubi_wl_entry *e;
|
||||
|
||||
e = find_mean_wl_entry(ubi, &ubi->free);
|
||||
if (!e) {
|
||||
ubi_err(ubi, "no free eraseblocks");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
self_check_in_wl_tree(ubi, e, &ubi->free);
|
||||
|
||||
/*
|
||||
* Move the physical eraseblock to the protection queue where it will
|
||||
* be protected from being moved for some time.
|
||||
*/
|
||||
rb_erase(&e->u.rb, &ubi->free);
|
||||
ubi->free_count--;
|
||||
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
|
||||
|
||||
return e;
|
||||
}
|
||||
|
||||
/**
|
||||
* wl_get_peb - get a physical eraseblock.
|
||||
* @ubi: UBI device description object
|
||||
*
|
||||
* This function returns a physical eraseblock in case of success and a
|
||||
* negative error code in case of failure.
|
||||
* It is the low level component of ubi_wl_get_peb() in the non-fastmap
|
||||
* case.
|
||||
*/
|
||||
static int __wl_get_peb(struct ubi_device *ubi)
|
||||
static int wl_get_peb(struct ubi_device *ubi)
|
||||
{
|
||||
int err;
|
||||
struct ubi_wl_entry *e;
|
||||
|
@ -521,27 +554,9 @@ retry:
|
|||
goto retry;
|
||||
}
|
||||
|
||||
e = find_mean_wl_entry(ubi, &ubi->free);
|
||||
if (!e) {
|
||||
ubi_err(ubi, "no free eraseblocks");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
self_check_in_wl_tree(ubi, e, &ubi->free);
|
||||
|
||||
/*
|
||||
* Move the physical eraseblock to the protection queue where it will
|
||||
* be protected from being moved for some time.
|
||||
*/
|
||||
rb_erase(&e->u.rb, &ubi->free);
|
||||
ubi->free_count--;
|
||||
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
|
||||
#ifndef CONFIG_MTD_UBI_FASTMAP
|
||||
/* We have to enqueue e only if fastmap is disabled,
|
||||
* is fastmap enabled prot_queue_add() will be called by
|
||||
* ubi_wl_get_peb() after removing e from the pool. */
|
||||
e = wl_get_wle(ubi);
|
||||
prot_queue_add(ubi, e);
|
||||
#endif
|
||||
|
||||
return e->pnum;
|
||||
}
|
||||
|
||||
|
@ -701,7 +716,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
|
|||
int peb, err;
|
||||
|
||||
spin_lock(&ubi->wl_lock);
|
||||
peb = __wl_get_peb(ubi);
|
||||
peb = wl_get_peb(ubi);
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
|
||||
if (peb < 0)
|
||||
|
|
Loading…
Reference in New Issue