Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6
* 'linux-next' of git://git.infradead.org/ubifs-2.6: UBIFS: do not allocate unneeded scan buffer UBIFS: do not forget to cancel timers UBIFS: remove a bit of unneeded code UBIFS: add a commentary about log recovery UBIFS: avoid kernel error if ubifs superblock read fails UBIFS: introduce new flags for RO mounts UBIFS: introduce new flag for RO due to errors UBIFS: check return code of pnode_lookup UBIFS: check return code of ubifs_lpt_lookup UBIFS: improve error reporting when reading bad node UBIFS: introduce list sorting debugging checks UBIFS: fix assertion warnings in comparison function UBIFS: mark unused key objects as invalid UBIFS: do not write rubbish into truncation scanning node UBIFS: improve assertion in node comparison functions UBIFS: do not use key type in list_sort UBIFS: do not look up truncation nodes UBIFS: fix assertion warning UBIFS: do not treat ENOSPC specially UBIFS: switch to RO mode after synchronizing
This commit is contained in:
commit
06d362931a
|
@ -63,7 +63,9 @@ static int do_commit(struct ubifs_info *c)
|
|||
struct ubifs_lp_stats lst;
|
||||
|
||||
dbg_cmt("start");
|
||||
if (c->ro_media) {
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (c->ro_error) {
|
||||
err = -EROFS;
|
||||
goto out_up;
|
||||
}
|
||||
|
|
156
fs/ubifs/debug.c
156
fs/ubifs/debug.c
|
@ -2239,6 +2239,162 @@ out_free:
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* dbg_check_data_nodes_order - check that list of data nodes is sorted.
|
||||
* @c: UBIFS file-system description object
|
||||
* @head: the list of nodes ('struct ubifs_scan_node' objects)
|
||||
*
|
||||
* This function returns zero if the list of data nodes is sorted correctly,
|
||||
* and %-EINVAL if not.
|
||||
*/
|
||||
int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
{
|
||||
struct list_head *cur;
|
||||
struct ubifs_scan_node *sa, *sb;
|
||||
|
||||
if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
|
||||
return 0;
|
||||
|
||||
for (cur = head->next; cur->next != head; cur = cur->next) {
|
||||
ino_t inuma, inumb;
|
||||
uint32_t blka, blkb;
|
||||
|
||||
cond_resched();
|
||||
sa = container_of(cur, struct ubifs_scan_node, list);
|
||||
sb = container_of(cur->next, struct ubifs_scan_node, list);
|
||||
|
||||
if (sa->type != UBIFS_DATA_NODE) {
|
||||
ubifs_err("bad node type %d", sa->type);
|
||||
dbg_dump_node(c, sa->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sb->type != UBIFS_DATA_NODE) {
|
||||
ubifs_err("bad node type %d", sb->type);
|
||||
dbg_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
inuma = key_inum(c, &sa->key);
|
||||
inumb = key_inum(c, &sb->key);
|
||||
|
||||
if (inuma < inumb)
|
||||
continue;
|
||||
if (inuma > inumb) {
|
||||
ubifs_err("larger inum %lu goes before inum %lu",
|
||||
(unsigned long)inuma, (unsigned long)inumb);
|
||||
goto error_dump;
|
||||
}
|
||||
|
||||
blka = key_block(c, &sa->key);
|
||||
blkb = key_block(c, &sb->key);
|
||||
|
||||
if (blka > blkb) {
|
||||
ubifs_err("larger block %u goes before %u", blka, blkb);
|
||||
goto error_dump;
|
||||
}
|
||||
if (blka == blkb) {
|
||||
ubifs_err("two data nodes for the same block");
|
||||
goto error_dump;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_dump:
|
||||
dbg_dump_node(c, sa->node);
|
||||
dbg_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* dbg_check_nondata_nodes_order - check that list of data nodes is sorted.
|
||||
* @c: UBIFS file-system description object
|
||||
* @head: the list of nodes ('struct ubifs_scan_node' objects)
|
||||
*
|
||||
* This function returns zero if the list of non-data nodes is sorted correctly,
|
||||
* and %-EINVAL if not.
|
||||
*/
|
||||
int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
|
||||
{
|
||||
struct list_head *cur;
|
||||
struct ubifs_scan_node *sa, *sb;
|
||||
|
||||
if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
|
||||
return 0;
|
||||
|
||||
for (cur = head->next; cur->next != head; cur = cur->next) {
|
||||
ino_t inuma, inumb;
|
||||
uint32_t hasha, hashb;
|
||||
|
||||
cond_resched();
|
||||
sa = container_of(cur, struct ubifs_scan_node, list);
|
||||
sb = container_of(cur->next, struct ubifs_scan_node, list);
|
||||
|
||||
if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
|
||||
sa->type != UBIFS_XENT_NODE) {
|
||||
ubifs_err("bad node type %d", sa->type);
|
||||
dbg_dump_node(c, sa->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
|
||||
sa->type != UBIFS_XENT_NODE) {
|
||||
ubifs_err("bad node type %d", sb->type);
|
||||
dbg_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
|
||||
ubifs_err("non-inode node goes before inode node");
|
||||
goto error_dump;
|
||||
}
|
||||
|
||||
if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
|
||||
continue;
|
||||
|
||||
if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
|
||||
/* Inode nodes are sorted in descending size order */
|
||||
if (sa->len < sb->len) {
|
||||
ubifs_err("smaller inode node goes first");
|
||||
goto error_dump;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is either a dentry or xentry, which should be sorted in
|
||||
* ascending (parent ino, hash) order.
|
||||
*/
|
||||
inuma = key_inum(c, &sa->key);
|
||||
inumb = key_inum(c, &sb->key);
|
||||
|
||||
if (inuma < inumb)
|
||||
continue;
|
||||
if (inuma > inumb) {
|
||||
ubifs_err("larger inum %lu goes before inum %lu",
|
||||
(unsigned long)inuma, (unsigned long)inumb);
|
||||
goto error_dump;
|
||||
}
|
||||
|
||||
hasha = key_block(c, &sa->key);
|
||||
hashb = key_block(c, &sb->key);
|
||||
|
||||
if (hasha > hashb) {
|
||||
ubifs_err("larger hash %u goes before %u", hasha, hashb);
|
||||
goto error_dump;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_dump:
|
||||
ubifs_msg("dumping first node");
|
||||
dbg_dump_node(c, sa->node);
|
||||
ubifs_msg("dumping second node");
|
||||
dbg_dump_node(c, sb->node);
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int invocation_cnt;
|
||||
|
||||
int dbg_force_in_the_gaps(void)
|
||||
|
|
|
@ -324,6 +324,8 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
|
|||
int row, int col);
|
||||
int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
|
||||
loff_t size);
|
||||
int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head);
|
||||
int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head);
|
||||
|
||||
/* Force the use of in-the-gaps method for testing */
|
||||
|
||||
|
@ -465,6 +467,8 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
|
|||
#define dbg_check_lprops(c) 0
|
||||
#define dbg_check_lpt_nodes(c, cnode, row, col) 0
|
||||
#define dbg_check_inode_size(c, inode, size) 0
|
||||
#define dbg_check_data_nodes_order(c, head) 0
|
||||
#define dbg_check_nondata_nodes_order(c, head) 0
|
||||
#define dbg_force_in_the_gaps_enabled 0
|
||||
#define dbg_force_in_the_gaps() 0
|
||||
#define dbg_failure_mode 0
|
||||
|
|
|
@ -433,8 +433,9 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
|
|||
struct page *page;
|
||||
|
||||
ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (unlikely(c->ro_media))
|
||||
if (unlikely(c->ro_error))
|
||||
return -EROFS;
|
||||
|
||||
/* Try out the fast-path part first */
|
||||
|
@ -1439,9 +1440,9 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vm
|
|||
|
||||
dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index,
|
||||
i_size_read(inode));
|
||||
ubifs_assert(!(inode->i_sb->s_flags & MS_RDONLY));
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (unlikely(c->ro_media))
|
||||
if (unlikely(c->ro_error))
|
||||
return VM_FAULT_SIGBUS; /* -EROFS */
|
||||
|
||||
/*
|
||||
|
|
|
@ -125,10 +125,16 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|||
struct ubifs_scan_node *sa, *sb;
|
||||
|
||||
cond_resched();
|
||||
if (a == b)
|
||||
return 0;
|
||||
|
||||
sa = list_entry(a, struct ubifs_scan_node, list);
|
||||
sb = list_entry(b, struct ubifs_scan_node, list);
|
||||
|
||||
ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
|
||||
ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
|
||||
ubifs_assert(sa->type == UBIFS_DATA_NODE);
|
||||
ubifs_assert(sb->type == UBIFS_DATA_NODE);
|
||||
|
||||
inuma = key_inum(c, &sa->key);
|
||||
inumb = key_inum(c, &sb->key);
|
||||
|
@ -157,28 +163,40 @@ int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|||
*/
|
||||
int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||
{
|
||||
int typea, typeb;
|
||||
ino_t inuma, inumb;
|
||||
struct ubifs_info *c = priv;
|
||||
struct ubifs_scan_node *sa, *sb;
|
||||
|
||||
cond_resched();
|
||||
if (a == b)
|
||||
return 0;
|
||||
|
||||
sa = list_entry(a, struct ubifs_scan_node, list);
|
||||
sb = list_entry(b, struct ubifs_scan_node, list);
|
||||
typea = key_type(c, &sa->key);
|
||||
typeb = key_type(c, &sb->key);
|
||||
ubifs_assert(typea != UBIFS_DATA_KEY && typeb != UBIFS_DATA_KEY);
|
||||
|
||||
ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
|
||||
key_type(c, &sb->key) != UBIFS_DATA_KEY);
|
||||
ubifs_assert(sa->type != UBIFS_DATA_NODE &&
|
||||
sb->type != UBIFS_DATA_NODE);
|
||||
|
||||
/* Inodes go before directory entries */
|
||||
if (typea == UBIFS_INO_KEY) {
|
||||
if (typeb == UBIFS_INO_KEY)
|
||||
if (sa->type == UBIFS_INO_NODE) {
|
||||
if (sb->type == UBIFS_INO_NODE)
|
||||
return sb->len - sa->len;
|
||||
return -1;
|
||||
}
|
||||
if (typeb == UBIFS_INO_KEY)
|
||||
if (sb->type == UBIFS_INO_NODE)
|
||||
return 1;
|
||||
|
||||
ubifs_assert(typea == UBIFS_DENT_KEY && typeb == UBIFS_DENT_KEY);
|
||||
ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
|
||||
key_type(c, &sa->key) == UBIFS_XENT_KEY);
|
||||
ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
|
||||
key_type(c, &sb->key) == UBIFS_XENT_KEY);
|
||||
ubifs_assert(sa->type == UBIFS_DENT_NODE ||
|
||||
sa->type == UBIFS_XENT_NODE);
|
||||
ubifs_assert(sb->type == UBIFS_DENT_NODE ||
|
||||
sb->type == UBIFS_XENT_NODE);
|
||||
|
||||
inuma = key_inum(c, &sa->key);
|
||||
inumb = key_inum(c, &sb->key);
|
||||
|
||||
|
@ -224,17 +242,33 @@ int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|||
static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
||||
struct list_head *nondata, int *min)
|
||||
{
|
||||
int err;
|
||||
struct ubifs_scan_node *snod, *tmp;
|
||||
|
||||
*min = INT_MAX;
|
||||
|
||||
/* Separate data nodes and non-data nodes */
|
||||
list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
|
||||
int err;
|
||||
ubifs_assert(snod->type == UBIFS_INO_NODE ||
|
||||
snod->type == UBIFS_DATA_NODE ||
|
||||
snod->type == UBIFS_DENT_NODE ||
|
||||
snod->type == UBIFS_XENT_NODE ||
|
||||
snod->type == UBIFS_TRUN_NODE);
|
||||
|
||||
ubifs_assert(snod->type != UBIFS_IDX_NODE);
|
||||
ubifs_assert(snod->type != UBIFS_REF_NODE);
|
||||
ubifs_assert(snod->type != UBIFS_CS_NODE);
|
||||
if (snod->type != UBIFS_INO_NODE &&
|
||||
snod->type != UBIFS_DATA_NODE &&
|
||||
snod->type != UBIFS_DENT_NODE &&
|
||||
snod->type != UBIFS_XENT_NODE) {
|
||||
/* Probably truncation node, zap it */
|
||||
list_del(&snod->list);
|
||||
kfree(snod);
|
||||
continue;
|
||||
}
|
||||
|
||||
ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
|
||||
key_type(c, &snod->key) == UBIFS_INO_KEY ||
|
||||
key_type(c, &snod->key) == UBIFS_DENT_KEY ||
|
||||
key_type(c, &snod->key) == UBIFS_XENT_KEY);
|
||||
|
||||
err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
|
||||
snod->offs, 0);
|
||||
|
@ -258,6 +292,13 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
|||
/* Sort data and non-data nodes */
|
||||
list_sort(c, &sleb->nodes, &data_nodes_cmp);
|
||||
list_sort(c, nondata, &nondata_nodes_cmp);
|
||||
|
||||
err = dbg_check_data_nodes_order(c, &sleb->nodes);
|
||||
if (err)
|
||||
return err;
|
||||
err = dbg_check_nondata_nodes_order(c, nondata);
|
||||
if (err)
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -575,13 +616,14 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
|
|||
struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
|
||||
|
||||
ubifs_assert_cmt_locked(c);
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (ubifs_gc_should_commit(c))
|
||||
return -EAGAIN;
|
||||
|
||||
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
|
||||
|
||||
if (c->ro_media) {
|
||||
if (c->ro_error) {
|
||||
ret = -EROFS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -677,14 +719,12 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
|
|||
|
||||
ret = ubifs_garbage_collect_leb(c, &lp);
|
||||
if (ret < 0) {
|
||||
if (ret == -EAGAIN || ret == -ENOSPC) {
|
||||
if (ret == -EAGAIN) {
|
||||
/*
|
||||
* These codes are not errors, so we have to
|
||||
* return the LEB to lprops. But if the
|
||||
* 'ubifs_return_leb()' function fails, its
|
||||
* failure code is propagated to the caller
|
||||
* instead of the original '-EAGAIN' or
|
||||
* '-ENOSPC'.
|
||||
* This is not error, so we have to return the
|
||||
* LEB to lprops. But if 'ubifs_return_leb()'
|
||||
* fails, its failure code is propagated to the
|
||||
* caller instead of the original '-EAGAIN'.
|
||||
*/
|
||||
err = ubifs_return_leb(c, lp.lnum);
|
||||
if (err)
|
||||
|
@ -774,8 +814,8 @@ out_unlock:
|
|||
out:
|
||||
ubifs_assert(ret < 0);
|
||||
ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
|
||||
ubifs_ro_mode(c, ret);
|
||||
ubifs_wbuf_sync_nolock(wbuf);
|
||||
ubifs_ro_mode(c, ret);
|
||||
mutex_unlock(&wbuf->io_mutex);
|
||||
ubifs_return_leb(c, lp.lnum);
|
||||
return ret;
|
||||
|
|
|
@ -61,8 +61,8 @@
|
|||
*/
|
||||
void ubifs_ro_mode(struct ubifs_info *c, int err)
|
||||
{
|
||||
if (!c->ro_media) {
|
||||
c->ro_media = 1;
|
||||
if (!c->ro_error) {
|
||||
c->ro_error = 1;
|
||||
c->no_chk_data_crc = 0;
|
||||
c->vfs_sb->s_flags |= MS_RDONLY;
|
||||
ubifs_warn("switched to read-only mode, error %d", err);
|
||||
|
@ -356,11 +356,11 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
|
|||
|
||||
dbg_io("LEB %d:%d, %d bytes, jhead %s",
|
||||
wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
|
||||
ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
|
||||
ubifs_assert(!(wbuf->avail & 7));
|
||||
ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (c->ro_media)
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
|
||||
ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail);
|
||||
|
@ -440,11 +440,12 @@ int ubifs_bg_wbufs_sync(struct ubifs_info *c)
|
|||
{
|
||||
int err, i;
|
||||
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
if (!c->need_wbuf_sync)
|
||||
return 0;
|
||||
c->need_wbuf_sync = 0;
|
||||
|
||||
if (c->ro_media) {
|
||||
if (c->ro_error) {
|
||||
err = -EROFS;
|
||||
goto out_timers;
|
||||
}
|
||||
|
@ -519,6 +520,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
|
|||
ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
|
||||
ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size);
|
||||
ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
|
||||
err = -ENOSPC;
|
||||
|
@ -527,7 +529,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
|
|||
|
||||
cancel_wbuf_timer_nolock(wbuf);
|
||||
|
||||
if (c->ro_media)
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
|
||||
if (aligned_len <= wbuf->avail) {
|
||||
|
@ -663,8 +665,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
|
|||
buf_len);
|
||||
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
|
||||
ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
|
||||
if (c->ro_media)
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
|
||||
ubifs_prepare_node(c, buf, len, 1);
|
||||
|
@ -815,7 +818,8 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
|
|||
return 0;
|
||||
|
||||
out:
|
||||
ubifs_err("bad node at LEB %d:%d", lnum, offs);
|
||||
ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs,
|
||||
ubi_is_mapped(c->ubi, lnum));
|
||||
dbg_dump_node(c, buf);
|
||||
dbg_dump_stack();
|
||||
return -EINVAL;
|
||||
|
|
|
@ -122,11 +122,12 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
|
|||
* better to try to allocate space at the ends of eraseblocks. This is
|
||||
* what the squeeze parameter does.
|
||||
*/
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
squeeze = (jhead == BASEHD);
|
||||
again:
|
||||
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
|
||||
|
||||
if (c->ro_media) {
|
||||
if (c->ro_error) {
|
||||
err = -EROFS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
|
@ -305,6 +305,20 @@ static inline void trun_key_init(const struct ubifs_info *c,
|
|||
key->u32[1] = UBIFS_TRUN_KEY << UBIFS_S_KEY_BLOCK_BITS;
|
||||
}
|
||||
|
||||
/**
|
||||
* invalid_key_init - initialize invalid node key.
|
||||
* @c: UBIFS file-system description object
|
||||
* @key: key to initialize
|
||||
*
|
||||
* This is a helper function which marks a @key object as invalid.
|
||||
*/
|
||||
static inline void invalid_key_init(const struct ubifs_info *c,
|
||||
union ubifs_key *key)
|
||||
{
|
||||
key->u32[0] = 0xDEADBEAF;
|
||||
key->u32[1] = UBIFS_INVALID_KEY;
|
||||
}
|
||||
|
||||
/**
|
||||
* key_type - get key type.
|
||||
* @c: UBIFS file-system description object
|
||||
|
|
|
@ -159,7 +159,7 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
|
|||
jhead = &c->jheads[bud->jhead];
|
||||
list_add_tail(&bud->list, &jhead->buds_list);
|
||||
} else
|
||||
ubifs_assert(c->replaying && (c->vfs_sb->s_flags & MS_RDONLY));
|
||||
ubifs_assert(c->replaying && c->ro_mount);
|
||||
|
||||
/*
|
||||
* Note, although this is a new bud, we anyway account this space now,
|
||||
|
@ -223,8 +223,8 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
|
|||
}
|
||||
|
||||
mutex_lock(&c->log_mutex);
|
||||
|
||||
if (c->ro_media) {
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
if (c->ro_error) {
|
||||
err = -EROFS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
|
@ -1363,6 +1363,7 @@ static int read_lsave(struct ubifs_info *c)
|
|||
goto out;
|
||||
for (i = 0; i < c->lsave_cnt; i++) {
|
||||
int lnum = c->lsave[i];
|
||||
struct ubifs_lprops *lprops;
|
||||
|
||||
/*
|
||||
* Due to automatic resizing, the values in the lsave table
|
||||
|
@ -1370,7 +1371,11 @@ static int read_lsave(struct ubifs_info *c)
|
|||
*/
|
||||
if (lnum >= c->leb_cnt)
|
||||
continue;
|
||||
ubifs_lpt_lookup(c, lnum);
|
||||
lprops = ubifs_lpt_lookup(c, lnum);
|
||||
if (IS_ERR(lprops)) {
|
||||
err = PTR_ERR(lprops);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
vfree(buf);
|
||||
|
|
|
@ -705,6 +705,9 @@ static int make_tree_dirty(struct ubifs_info *c)
|
|||
struct ubifs_pnode *pnode;
|
||||
|
||||
pnode = pnode_lookup(c, 0);
|
||||
if (IS_ERR(pnode))
|
||||
return PTR_ERR(pnode);
|
||||
|
||||
while (pnode) {
|
||||
do_make_pnode_dirty(c, pnode);
|
||||
pnode = next_pnode_to_dirty(c, pnode);
|
||||
|
|
|
@ -361,7 +361,8 @@ int ubifs_write_master(struct ubifs_info *c)
|
|||
{
|
||||
int err, lnum, offs, len;
|
||||
|
||||
if (c->ro_media)
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
|
||||
lnum = UBIFS_MST_LNUM;
|
||||
|
|
|
@ -132,7 +132,8 @@ static inline int ubifs_leb_unmap(const struct ubifs_info *c, int lnum)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (c->ro_media)
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
err = ubi_leb_unmap(c->ubi, lnum);
|
||||
if (err) {
|
||||
|
@ -159,7 +160,8 @@ static inline int ubifs_leb_write(const struct ubifs_info *c, int lnum,
|
|||
{
|
||||
int err;
|
||||
|
||||
if (c->ro_media)
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
|
||||
if (err) {
|
||||
|
@ -186,7 +188,8 @@ static inline int ubifs_leb_change(const struct ubifs_info *c, int lnum,
|
|||
{
|
||||
int err;
|
||||
|
||||
if (c->ro_media)
|
||||
ubifs_assert(!c->ro_media && !c->ro_mount);
|
||||
if (c->ro_error)
|
||||
return -EROFS;
|
||||
err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
|
||||
if (err) {
|
||||
|
|
|
@ -292,7 +292,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
|
|||
|
||||
memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
|
||||
|
||||
if ((c->vfs_sb->s_flags & MS_RDONLY)) {
|
||||
if (c->ro_mount) {
|
||||
/* Read-only mode. Keep a copy for switching to rw mode */
|
||||
c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
|
||||
if (!c->rcvrd_mst_node) {
|
||||
|
@ -469,7 +469,7 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
|||
endpt = snod->offs + snod->len;
|
||||
}
|
||||
|
||||
if ((c->vfs_sb->s_flags & MS_RDONLY) && !c->remounting_rw) {
|
||||
if (c->ro_mount && !c->remounting_rw) {
|
||||
/* Add to recovery list */
|
||||
struct ubifs_unclean_leb *ucleb;
|
||||
|
||||
|
@ -772,7 +772,8 @@ out_free:
|
|||
* @sbuf: LEB-sized buffer to use
|
||||
*
|
||||
* This function does a scan of a LEB, but caters for errors that might have
|
||||
* been caused by the unclean unmount from which we are attempting to recover.
|
||||
* been caused by unclean reboots from which we are attempting to recover
|
||||
* (assume that only the last log LEB can be corrupted by an unclean reboot).
|
||||
*
|
||||
* This function returns %0 on success and a negative error code on failure.
|
||||
*/
|
||||
|
@ -883,7 +884,7 @@ int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
|
|||
{
|
||||
int err;
|
||||
|
||||
ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY) || c->remounting_rw);
|
||||
ubifs_assert(!c->ro_mount || c->remounting_rw);
|
||||
|
||||
dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
|
||||
err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
|
||||
|
@ -1461,7 +1462,7 @@ int ubifs_recover_size(struct ubifs_info *c)
|
|||
}
|
||||
}
|
||||
if (e->exists && e->i_size < e->d_size) {
|
||||
if (!e->inode && (c->vfs_sb->s_flags & MS_RDONLY)) {
|
||||
if (!e->inode && c->ro_mount) {
|
||||
/* Fix the inode size and pin it in memory */
|
||||
struct inode *inode;
|
||||
|
||||
|
|
|
@ -627,8 +627,7 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
|
|||
ubifs_assert(sleb->endpt - offs >= used);
|
||||
ubifs_assert(sleb->endpt % c->min_io_size == 0);
|
||||
|
||||
if (sleb->endpt + c->min_io_size <= c->leb_size &&
|
||||
!(c->vfs_sb->s_flags & MS_RDONLY))
|
||||
if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount)
|
||||
err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum,
|
||||
sleb->endpt, UBI_SHORTTERM);
|
||||
|
||||
|
@ -840,6 +839,11 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
|||
if (IS_ERR(sleb)) {
|
||||
if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
|
||||
return PTR_ERR(sleb);
|
||||
/*
|
||||
* Note, the below function will recover this log LEB only if
|
||||
* it is the last, because unclean reboots can possibly corrupt
|
||||
* only the tail of the log.
|
||||
*/
|
||||
sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
|
||||
if (IS_ERR(sleb))
|
||||
return PTR_ERR(sleb);
|
||||
|
@ -851,7 +855,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
|||
}
|
||||
|
||||
node = sleb->buf;
|
||||
|
||||
snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
|
||||
if (c->cs_sqnum == 0) {
|
||||
/*
|
||||
|
@ -898,7 +901,6 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
|
|||
}
|
||||
|
||||
list_for_each_entry(snod, &sleb->nodes, list) {
|
||||
|
||||
cond_resched();
|
||||
|
||||
if (snod->sqnum >= SQNUM_WATERMARK) {
|
||||
|
@ -1011,7 +1013,6 @@ out:
|
|||
int ubifs_replay_journal(struct ubifs_info *c)
|
||||
{
|
||||
int err, i, lnum, offs, free;
|
||||
void *sbuf = NULL;
|
||||
|
||||
BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
|
||||
|
||||
|
@ -1026,14 +1027,8 @@ int ubifs_replay_journal(struct ubifs_info *c)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
sbuf = vmalloc(c->leb_size);
|
||||
if (!sbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
dbg_mnt("start replaying the journal");
|
||||
|
||||
c->replaying = 1;
|
||||
|
||||
lnum = c->ltail_lnum = c->lhead_lnum;
|
||||
offs = c->lhead_offs;
|
||||
|
||||
|
@ -1046,7 +1041,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
|
|||
lnum = UBIFS_LOG_LNUM;
|
||||
offs = 0;
|
||||
}
|
||||
err = replay_log_leb(c, lnum, offs, sbuf);
|
||||
err = replay_log_leb(c, lnum, offs, c->sbuf);
|
||||
if (err == 1)
|
||||
/* We hit the end of the log */
|
||||
break;
|
||||
|
@ -1079,7 +1074,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
|
|||
out:
|
||||
destroy_replay_tree(c);
|
||||
destroy_bud_list(c);
|
||||
vfree(sbuf);
|
||||
c->replaying = 0;
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -542,11 +542,8 @@ int ubifs_read_superblock(struct ubifs_info *c)
|
|||
* due to the unavailability of time-travelling equipment.
|
||||
*/
|
||||
if (c->fmt_version > UBIFS_FORMAT_VERSION) {
|
||||
struct super_block *sb = c->vfs_sb;
|
||||
int mounting_ro = sb->s_flags & MS_RDONLY;
|
||||
|
||||
ubifs_assert(!c->ro_media || mounting_ro);
|
||||
if (!mounting_ro ||
|
||||
ubifs_assert(!c->ro_media || c->ro_mount);
|
||||
if (!c->ro_mount ||
|
||||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
|
||||
ubifs_err("on-flash format version is w%d/r%d, but "
|
||||
"software only supports up to version "
|
||||
|
@ -624,7 +621,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
|
|||
c->old_leb_cnt = c->leb_cnt;
|
||||
if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) {
|
||||
c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size);
|
||||
if (c->vfs_sb->s_flags & MS_RDONLY)
|
||||
if (c->ro_mount)
|
||||
dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs",
|
||||
c->old_leb_cnt, c->leb_cnt);
|
||||
else {
|
||||
|
|
|
@ -197,7 +197,7 @@ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
|||
struct ubifs_ino_node *ino = buf;
|
||||
struct ubifs_scan_node *snod;
|
||||
|
||||
snod = kzalloc(sizeof(struct ubifs_scan_node), GFP_NOFS);
|
||||
snod = kmalloc(sizeof(struct ubifs_scan_node), GFP_NOFS);
|
||||
if (!snod)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -212,13 +212,15 @@ int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
|
|||
case UBIFS_DENT_NODE:
|
||||
case UBIFS_XENT_NODE:
|
||||
case UBIFS_DATA_NODE:
|
||||
case UBIFS_TRUN_NODE:
|
||||
/*
|
||||
* The key is in the same place in all keyed
|
||||
* nodes.
|
||||
*/
|
||||
key_read(c, &ino->key, &snod->key);
|
||||
break;
|
||||
default:
|
||||
invalid_key_init(c, &snod->key);
|
||||
break;
|
||||
}
|
||||
list_add_tail(&snod->list, &sleb->nodes);
|
||||
sleb->nodes_cnt += 1;
|
||||
|
|
|
@ -250,7 +250,7 @@ static int kick_a_thread(void)
|
|||
dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt);
|
||||
|
||||
if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN ||
|
||||
c->ro_media) {
|
||||
c->ro_mount || c->ro_error) {
|
||||
mutex_unlock(&c->umount_mutex);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1137,11 +1137,11 @@ static int check_free_space(struct ubifs_info *c)
|
|||
*/
|
||||
static int mount_ubifs(struct ubifs_info *c)
|
||||
{
|
||||
struct super_block *sb = c->vfs_sb;
|
||||
int err, mounted_read_only = (sb->s_flags & MS_RDONLY);
|
||||
int err;
|
||||
long long x;
|
||||
size_t sz;
|
||||
|
||||
c->ro_mount = !!(c->vfs_sb->s_flags & MS_RDONLY);
|
||||
err = init_constants_early(c);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1154,7 +1154,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
if (err)
|
||||
goto out_free;
|
||||
|
||||
if (c->empty && (mounted_read_only || c->ro_media)) {
|
||||
if (c->empty && (c->ro_mount || c->ro_media)) {
|
||||
/*
|
||||
* This UBI volume is empty, and read-only, or the file system
|
||||
* is mounted read-only - we cannot format it.
|
||||
|
@ -1165,7 +1165,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
if (c->ro_media && !mounted_read_only) {
|
||||
if (c->ro_media && !c->ro_mount) {
|
||||
ubifs_err("cannot mount read-write - read-only media");
|
||||
err = -EROFS;
|
||||
goto out_free;
|
||||
|
@ -1185,7 +1185,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
if (!c->sbuf)
|
||||
goto out_free;
|
||||
|
||||
if (!mounted_read_only) {
|
||||
if (!c->ro_mount) {
|
||||
c->ileb_buf = vmalloc(c->leb_size);
|
||||
if (!c->ileb_buf)
|
||||
goto out_free;
|
||||
|
@ -1228,7 +1228,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
}
|
||||
|
||||
sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
|
||||
if (!mounted_read_only) {
|
||||
if (!c->ro_mount) {
|
||||
err = alloc_wbufs(c);
|
||||
if (err)
|
||||
goto out_cbuf;
|
||||
|
@ -1254,12 +1254,12 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
|
||||
ubifs_msg("recovery needed");
|
||||
c->need_recovery = 1;
|
||||
if (!mounted_read_only) {
|
||||
if (!c->ro_mount) {
|
||||
err = ubifs_recover_inl_heads(c, c->sbuf);
|
||||
if (err)
|
||||
goto out_master;
|
||||
}
|
||||
} else if (!mounted_read_only) {
|
||||
} else if (!c->ro_mount) {
|
||||
/*
|
||||
* Set the "dirty" flag so that if we reboot uncleanly we
|
||||
* will notice this immediately on the next mount.
|
||||
|
@ -1270,7 +1270,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
goto out_master;
|
||||
}
|
||||
|
||||
err = ubifs_lpt_init(c, 1, !mounted_read_only);
|
||||
err = ubifs_lpt_init(c, 1, !c->ro_mount);
|
||||
if (err)
|
||||
goto out_lpt;
|
||||
|
||||
|
@ -1285,11 +1285,11 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
/* Calculate 'min_idx_lebs' after journal replay */
|
||||
c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
|
||||
|
||||
err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
|
||||
err = ubifs_mount_orphans(c, c->need_recovery, c->ro_mount);
|
||||
if (err)
|
||||
goto out_orphans;
|
||||
|
||||
if (!mounted_read_only) {
|
||||
if (!c->ro_mount) {
|
||||
int lnum;
|
||||
|
||||
err = check_free_space(c);
|
||||
|
@ -1351,7 +1351,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
spin_unlock(&ubifs_infos_lock);
|
||||
|
||||
if (c->need_recovery) {
|
||||
if (mounted_read_only)
|
||||
if (c->ro_mount)
|
||||
ubifs_msg("recovery deferred");
|
||||
else {
|
||||
c->need_recovery = 0;
|
||||
|
@ -1378,7 +1378,7 @@ static int mount_ubifs(struct ubifs_info *c)
|
|||
|
||||
ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
|
||||
c->vi.ubi_num, c->vi.vol_id, c->vi.name);
|
||||
if (mounted_read_only)
|
||||
if (c->ro_mount)
|
||||
ubifs_msg("mounted read-only");
|
||||
x = (long long)c->main_lebs * c->leb_size;
|
||||
ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d "
|
||||
|
@ -1640,7 +1640,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
|
|||
}
|
||||
|
||||
dbg_gen("re-mounted read-write");
|
||||
c->vfs_sb->s_flags &= ~MS_RDONLY;
|
||||
c->ro_mount = 0;
|
||||
c->remounting_rw = 0;
|
||||
c->always_chk_crc = 0;
|
||||
err = dbg_check_space_info(c);
|
||||
|
@ -1676,7 +1676,7 @@ static void ubifs_remount_ro(struct ubifs_info *c)
|
|||
int i, err;
|
||||
|
||||
ubifs_assert(!c->need_recovery);
|
||||
ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
|
||||
ubifs_assert(!c->ro_mount);
|
||||
|
||||
mutex_lock(&c->umount_mutex);
|
||||
if (c->bgt) {
|
||||
|
@ -1686,10 +1686,8 @@ static void ubifs_remount_ro(struct ubifs_info *c)
|
|||
|
||||
dbg_save_space_info(c);
|
||||
|
||||
for (i = 0; i < c->jhead_cnt; i++) {
|
||||
for (i = 0; i < c->jhead_cnt; i++)
|
||||
ubifs_wbuf_sync(&c->jheads[i].wbuf);
|
||||
hrtimer_cancel(&c->jheads[i].wbuf.timer);
|
||||
}
|
||||
|
||||
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
|
||||
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
||||
|
@ -1704,6 +1702,7 @@ static void ubifs_remount_ro(struct ubifs_info *c)
|
|||
vfree(c->ileb_buf);
|
||||
c->ileb_buf = NULL;
|
||||
ubifs_lpt_free(c, 1);
|
||||
c->ro_mount = 1;
|
||||
err = dbg_check_space_info(c);
|
||||
if (err)
|
||||
ubifs_ro_mode(c, err);
|
||||
|
@ -1735,7 +1734,7 @@ static void ubifs_put_super(struct super_block *sb)
|
|||
* the mutex is locked.
|
||||
*/
|
||||
mutex_lock(&c->umount_mutex);
|
||||
if (!(c->vfs_sb->s_flags & MS_RDONLY)) {
|
||||
if (!c->ro_mount) {
|
||||
/*
|
||||
* First of all kill the background thread to make sure it does
|
||||
* not interfere with un-mounting and freeing resources.
|
||||
|
@ -1745,23 +1744,22 @@ static void ubifs_put_super(struct super_block *sb)
|
|||
c->bgt = NULL;
|
||||
}
|
||||
|
||||
/* Synchronize write-buffers */
|
||||
if (c->jheads)
|
||||
/*
|
||||
* On fatal errors c->ro_error is set to 1, in which case we do
|
||||
* not write the master node.
|
||||
*/
|
||||
if (!c->ro_error) {
|
||||
int err;
|
||||
|
||||
/* Synchronize write-buffers */
|
||||
for (i = 0; i < c->jhead_cnt; i++)
|
||||
ubifs_wbuf_sync(&c->jheads[i].wbuf);
|
||||
|
||||
/*
|
||||
* On fatal errors c->ro_media is set to 1, in which case we do
|
||||
* not write the master node.
|
||||
*/
|
||||
if (!c->ro_media) {
|
||||
/*
|
||||
* We are being cleanly unmounted which means the
|
||||
* orphans were killed - indicate this in the master
|
||||
* node. Also save the reserved GC LEB number.
|
||||
*/
|
||||
int err;
|
||||
|
||||
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
|
||||
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
|
||||
c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
|
||||
|
@ -1774,6 +1772,10 @@ static void ubifs_put_super(struct super_block *sb)
|
|||
*/
|
||||
ubifs_err("failed to write master node, "
|
||||
"error %d", err);
|
||||
} else {
|
||||
for (i = 0; i < c->jhead_cnt; i++)
|
||||
/* Make sure write-buffer timers are canceled */
|
||||
hrtimer_cancel(&c->jheads[i].wbuf.timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1797,17 +1799,21 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
|
|||
return err;
|
||||
}
|
||||
|
||||
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
|
||||
if (c->ro_mount && !(*flags & MS_RDONLY)) {
|
||||
if (c->ro_error) {
|
||||
ubifs_msg("cannot re-mount R/W due to prior errors");
|
||||
return -EROFS;
|
||||
}
|
||||
if (c->ro_media) {
|
||||
ubifs_msg("cannot re-mount due to prior errors");
|
||||
ubifs_msg("cannot re-mount R/W - UBI volume is R/O");
|
||||
return -EROFS;
|
||||
}
|
||||
err = ubifs_remount_rw(c);
|
||||
if (err)
|
||||
return err;
|
||||
} else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
|
||||
if (c->ro_media) {
|
||||
ubifs_msg("cannot re-mount due to prior errors");
|
||||
} else if (!c->ro_mount && (*flags & MS_RDONLY)) {
|
||||
if (c->ro_error) {
|
||||
ubifs_msg("cannot re-mount R/O due to prior errors");
|
||||
return -EROFS;
|
||||
}
|
||||
ubifs_remount_ro(c);
|
||||
|
@ -2049,8 +2055,8 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
|
|||
*/
|
||||
ubi = open_ubi(name, UBI_READONLY);
|
||||
if (IS_ERR(ubi)) {
|
||||
ubifs_err("cannot open \"%s\", error %d",
|
||||
name, (int)PTR_ERR(ubi));
|
||||
dbg_err("cannot open \"%s\", error %d",
|
||||
name, (int)PTR_ERR(ubi));
|
||||
return PTR_ERR(ubi);
|
||||
}
|
||||
ubi_get_volume_info(ubi, &vi);
|
||||
|
@ -2064,9 +2070,11 @@ static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
|
|||
}
|
||||
|
||||
if (sb->s_root) {
|
||||
struct ubifs_info *c1 = sb->s_fs_info;
|
||||
|
||||
/* A new mount point for already mounted UBIFS */
|
||||
dbg_gen("this ubi volume is already mounted");
|
||||
if ((flags ^ sb->s_flags) & MS_RDONLY) {
|
||||
if (!!(flags & MS_RDONLY) != c1->ro_mount) {
|
||||
err = -EBUSY;
|
||||
goto out_deact;
|
||||
}
|
||||
|
|
|
@ -1177,6 +1177,7 @@ int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
|
|||
unsigned long time = get_seconds();
|
||||
|
||||
dbg_tnc("search key %s", DBGKEY(key));
|
||||
ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
|
||||
|
||||
znode = c->zroot.znode;
|
||||
if (unlikely(!znode)) {
|
||||
|
@ -2966,7 +2967,7 @@ static struct ubifs_znode *right_znode(struct ubifs_info *c,
|
|||
*
|
||||
* This function searches an indexing node by its first key @key and its
|
||||
* address @lnum:@offs. It looks up the indexing tree by pulling all indexing
|
||||
* nodes it traverses to TNC. This function is called fro indexing nodes which
|
||||
* nodes it traverses to TNC. This function is called for indexing nodes which
|
||||
* were found on the media by scanning, for example when garbage-collecting or
|
||||
* when doing in-the-gaps commit. This means that the indexing node which is
|
||||
* looked for does not have to have exactly the same leftmost key @key, because
|
||||
|
@ -2988,6 +2989,8 @@ static struct ubifs_znode *lookup_znode(struct ubifs_info *c,
|
|||
struct ubifs_znode *znode, *zn;
|
||||
int n, nn;
|
||||
|
||||
ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY);
|
||||
|
||||
/*
|
||||
* The arguments have probably been read off flash, so don't assume
|
||||
* they are valid.
|
||||
|
|
|
@ -119,8 +119,12 @@
|
|||
* in TNC. However, when replaying, it is handy to introduce fake "truncation"
|
||||
* keys for truncation nodes because the code becomes simpler. So we define
|
||||
* %UBIFS_TRUN_KEY type.
|
||||
*
|
||||
* But otherwise, out of the journal reply scope, the truncation keys are
|
||||
* invalid.
|
||||
*/
|
||||
#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT
|
||||
#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT
|
||||
#define UBIFS_INVALID_KEY UBIFS_KEY_TYPES_CNT
|
||||
|
||||
/*
|
||||
* How much a directory entry/extended attribute entry adds to the parent/host
|
||||
|
@ -1028,6 +1032,8 @@ struct ubifs_debug_info;
|
|||
* @max_leb_cnt: maximum count of logical eraseblocks
|
||||
* @old_leb_cnt: count of logical eraseblocks before re-size
|
||||
* @ro_media: the underlying UBI volume is read-only
|
||||
* @ro_mount: the file-system was mounted as read-only
|
||||
* @ro_error: UBIFS switched to R/O mode because an error happened
|
||||
*
|
||||
* @dirty_pg_cnt: number of dirty pages (not used)
|
||||
* @dirty_zn_cnt: number of dirty znodes
|
||||
|
@ -1168,11 +1174,14 @@ struct ubifs_debug_info;
|
|||
* @replay_sqnum: sequence number of node currently being replayed
|
||||
* @need_recovery: file-system needs recovery
|
||||
* @replaying: set to %1 during journal replay
|
||||
* @unclean_leb_list: LEBs to recover when mounting ro to rw
|
||||
* @rcvrd_mst_node: recovered master node to write when mounting ro to rw
|
||||
* @unclean_leb_list: LEBs to recover when re-mounting R/O mounted FS to R/W
|
||||
* mode
|
||||
* @rcvrd_mst_node: recovered master node to write when re-mounting R/O mounted
|
||||
* FS to R/W mode
|
||||
* @size_tree: inode size information for recovery
|
||||
* @remounting_rw: set while remounting from ro to rw (sb flags have MS_RDONLY)
|
||||
* @always_chk_crc: always check CRCs (while mounting and remounting rw)
|
||||
* @remounting_rw: set while re-mounting from R/O mode to R/W mode
|
||||
* @always_chk_crc: always check CRCs (while mounting and remounting to R/W
|
||||
* mode)
|
||||
* @mount_opts: UBIFS-specific mount options
|
||||
*
|
||||
* @dbg: debugging-related information
|
||||
|
@ -1268,7 +1277,9 @@ struct ubifs_info {
|
|||
int leb_cnt;
|
||||
int max_leb_cnt;
|
||||
int old_leb_cnt;
|
||||
int ro_media;
|
||||
unsigned int ro_media:1;
|
||||
unsigned int ro_mount:1;
|
||||
unsigned int ro_error:1;
|
||||
|
||||
atomic_long_t dirty_pg_cnt;
|
||||
atomic_long_t dirty_zn_cnt;
|
||||
|
|
Loading…
Reference in New Issue