[PATCH] fs: use list_move()

This patch converts the combination of list_del(A) and list_add(A, B) to
list_move(A, B) under fs/.

Cc: Ian Kent <raven@themaw.net>
Acked-by: Joel Becker <joel.becker@oracle.com>
Cc: Neil Brown <neilb@cse.unsw.edu.au>
Cc: Hans Reiser <reiserfs-dev@namesys.com>
Cc: Urban Widmark <urban@teststation.com>
Acked-by: David Howells <dhowells@redhat.com>
Acked-by: Mark Fasheh <mark.fasheh@oracle.com>
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Akinobu Mita 2006-06-26 00:24:46 -07:00 committed by Linus Torvalds
parent 179e09172a
commit f116629d03
22 changed files with 38 additions and 76 deletions

View File

@ -413,8 +413,7 @@ int afs_server_find_by_peer(const struct rxrpc_peer *peer,
/* we found it in the graveyard - resurrect it */ /* we found it in the graveyard - resurrect it */
found_dead_server: found_dead_server:
list_del(&server->link); list_move_tail(&server->link, &cell->sv_list);
list_add_tail(&server->link, &cell->sv_list);
afs_get_server(server); afs_get_server(server);
afs_kafstimod_del_timer(&server->timeout); afs_kafstimod_del_timer(&server->timeout);
spin_unlock(&cell->sv_gylock); spin_unlock(&cell->sv_gylock);

View File

@ -136,8 +136,7 @@ static int kafsasyncd(void *arg)
if (!list_empty(&kafsasyncd_async_attnq)) { if (!list_empty(&kafsasyncd_async_attnq)) {
op = list_entry(kafsasyncd_async_attnq.next, op = list_entry(kafsasyncd_async_attnq.next,
struct afs_async_op, link); struct afs_async_op, link);
list_del(&op->link); list_move_tail(&op->link,
list_add_tail(&op->link,
&kafsasyncd_async_busyq); &kafsasyncd_async_busyq);
} }
@ -204,8 +203,7 @@ void afs_kafsasyncd_begin_op(struct afs_async_op *op)
init_waitqueue_entry(&op->waiter, kafsasyncd_task); init_waitqueue_entry(&op->waiter, kafsasyncd_task);
add_wait_queue(&op->call->waitq, &op->waiter); add_wait_queue(&op->call->waitq, &op->waiter);
list_del(&op->link); list_move_tail(&op->link, &kafsasyncd_async_busyq);
list_add_tail(&op->link, &kafsasyncd_async_busyq);
spin_unlock(&kafsasyncd_async_lock); spin_unlock(&kafsasyncd_async_lock);
@ -223,8 +221,7 @@ void afs_kafsasyncd_attend_op(struct afs_async_op *op)
spin_lock(&kafsasyncd_async_lock); spin_lock(&kafsasyncd_async_lock);
list_del(&op->link); list_move_tail(&op->link, &kafsasyncd_async_attnq);
list_add_tail(&op->link, &kafsasyncd_async_attnq);
spin_unlock(&kafsasyncd_async_lock); spin_unlock(&kafsasyncd_async_lock);

View File

@ -123,8 +123,7 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
resurrect_server: resurrect_server:
_debug("resurrecting server"); _debug("resurrecting server");
list_del(&zombie->link); list_move_tail(&zombie->link, &cell->sv_list);
list_add_tail(&zombie->link, &cell->sv_list);
afs_get_server(zombie); afs_get_server(zombie);
afs_kafstimod_del_timer(&zombie->timeout); afs_kafstimod_del_timer(&zombie->timeout);
spin_unlock(&cell->sv_gylock); spin_unlock(&cell->sv_gylock);
@ -168,8 +167,7 @@ void afs_put_server(struct afs_server *server)
} }
spin_lock(&cell->sv_gylock); spin_lock(&cell->sv_gylock);
list_del(&server->link); list_move_tail(&server->link, &cell->sv_graveyard);
list_add_tail(&server->link, &cell->sv_graveyard);
/* time out in 10 secs */ /* time out in 10 secs */
afs_kafstimod_add_timer(&server->timeout, 10 * HZ); afs_kafstimod_add_timer(&server->timeout, 10 * HZ);

View File

@ -326,8 +326,7 @@ int afs_vlocation_lookup(struct afs_cell *cell,
/* found in the graveyard - resurrect */ /* found in the graveyard - resurrect */
_debug("found in graveyard"); _debug("found in graveyard");
atomic_inc(&vlocation->usage); atomic_inc(&vlocation->usage);
list_del(&vlocation->link); list_move_tail(&vlocation->link, &cell->vl_list);
list_add_tail(&vlocation->link, &cell->vl_list);
spin_unlock(&cell->vl_gylock); spin_unlock(&cell->vl_gylock);
afs_kafstimod_del_timer(&vlocation->timeout); afs_kafstimod_del_timer(&vlocation->timeout);
@ -478,8 +477,7 @@ static void __afs_put_vlocation(struct afs_vlocation *vlocation)
} }
/* move to graveyard queue */ /* move to graveyard queue */
list_del(&vlocation->link); list_move_tail(&vlocation->link,&cell->vl_graveyard);
list_add_tail(&vlocation->link,&cell->vl_graveyard);
/* remove from pending timeout queue (refcounted if actually being /* remove from pending timeout queue (refcounted if actually being
* updated) */ * updated) */

View File

@ -104,8 +104,7 @@ static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
vnode->cb_expiry * HZ); vnode->cb_expiry * HZ);
spin_lock(&afs_cb_hash_lock); spin_lock(&afs_cb_hash_lock);
list_del(&vnode->cb_hash_link); list_move_tail(&vnode->cb_hash_link,
list_add_tail(&vnode->cb_hash_link,
&afs_cb_hash(server, &vnode->fid)); &afs_cb_hash(server, &vnode->fid));
spin_unlock(&afs_cb_hash_lock); spin_unlock(&afs_cb_hash_lock);

View File

@ -376,8 +376,7 @@ next:
DPRINTK("returning %p %.*s", DPRINTK("returning %p %.*s",
expired, (int)expired->d_name.len, expired->d_name.name); expired, (int)expired->d_name.len, expired->d_name.name);
spin_lock(&dcache_lock); spin_lock(&dcache_lock);
list_del(&expired->d_parent->d_subdirs); list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
list_add(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
spin_unlock(&dcache_lock); spin_unlock(&dcache_lock);
return expired; return expired;
} }

View File

@ -1009,8 +1009,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
/* fallthrough */ /* fallthrough */
default: default:
if (filp->f_pos == 2) { if (filp->f_pos == 2) {
list_del(q); list_move(q, &parent_sd->s_children);
list_add(q, &parent_sd->s_children);
} }
for (p=q->next; p!= &parent_sd->s_children; p=p->next) { for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
struct configfs_dirent *next; struct configfs_dirent *next;
@ -1033,8 +1032,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
dt_type(next)) < 0) dt_type(next)) < 0)
return 0; return 0;
list_del(q); list_move(q, p);
list_add(q, p);
p = q; p = q;
filp->f_pos++; filp->f_pos++;
} }

View File

@ -53,8 +53,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
if (!instr) { if (!instr) {
printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
spin_lock(&c->erase_completion_lock); spin_lock(&c->erase_completion_lock);
list_del(&jeb->list); list_move(&jeb->list, &c->erase_pending_list);
list_add(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size; c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size; c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size; jeb->dirty_size = c->sector_size;
@ -86,8 +85,7 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
/* Erase failed immediately. Refile it on the list */ /* Erase failed immediately. Refile it on the list */
D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
spin_lock(&c->erase_completion_lock); spin_lock(&c->erase_completion_lock);
list_del(&jeb->list); list_move(&jeb->list, &c->erase_pending_list);
list_add(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size; c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size; c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size; jeb->dirty_size = c->sector_size;
@ -161,8 +159,7 @@ static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblo
{ {
D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
spin_lock(&c->erase_completion_lock); spin_lock(&c->erase_completion_lock);
list_del(&jeb->list); list_move_tail(&jeb->list, &c->erase_complete_list);
list_add_tail(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock); spin_unlock(&c->erase_completion_lock);
/* Ensure that kupdated calls us again to mark them clean */ /* Ensure that kupdated calls us again to mark them clean */
jffs2_erase_pending_trigger(c); jffs2_erase_pending_trigger(c);
@ -178,8 +175,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
/* We'd like to give this block another try. */ /* We'd like to give this block another try. */
spin_lock(&c->erase_completion_lock); spin_lock(&c->erase_completion_lock);
list_del(&jeb->list); list_move(&jeb->list, &c->erase_pending_list);
list_add(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size; c->erasing_size -= c->sector_size;
c->dirty_size += c->sector_size; c->dirty_size += c->sector_size;
jeb->dirty_size = c->sector_size; jeb->dirty_size = c->sector_size;
@ -191,8 +187,7 @@ static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock
spin_lock(&c->erase_completion_lock); spin_lock(&c->erase_completion_lock);
c->erasing_size -= c->sector_size; c->erasing_size -= c->sector_size;
c->bad_size += c->sector_size; c->bad_size += c->sector_size;
list_del(&jeb->list); list_move(&jeb->list, &c->bad_list);
list_add(&jeb->list, &c->bad_list);
c->nr_erasing_blocks--; c->nr_erasing_blocks--;
spin_unlock(&c->erase_completion_lock); spin_unlock(&c->erase_completion_lock);
wake_up(&c->erase_wait); wake_up(&c->erase_wait);

View File

@ -211,8 +211,7 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
struct jffs2_eraseblock *ejeb; struct jffs2_eraseblock *ejeb;
ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
list_del(&ejeb->list); list_move_tail(&ejeb->list, &c->erase_pending_list);
list_add_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++; c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c); jffs2_erase_pending_trigger(c);
D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",

View File

@ -495,8 +495,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
/* Fix up the original jeb now it's on the bad_list */ /* Fix up the original jeb now it's on the bad_list */
if (first_raw == jeb->first_node) { if (first_raw == jeb->first_node) {
D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
list_del(&jeb->list); list_move(&jeb->list, &c->erase_pending_list);
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++; c->nr_erasing_blocks++;
jffs2_erase_pending_trigger(c); jffs2_erase_pending_trigger(c);
} }

View File

@ -529,8 +529,7 @@ move_to_confirmed(struct nfs4_client *clp)
dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
list_del_init(&clp->cl_strhash); list_del_init(&clp->cl_strhash);
list_del_init(&clp->cl_idhash); list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
list_add(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
strhashval = clientstr_hashval(clp->cl_recdir); strhashval = clientstr_hashval(clp->cl_recdir);
list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]); list_add(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
renew_client(clp); renew_client(clp);

View File

@ -103,8 +103,7 @@ nfsd_cache_shutdown(void)
static void static void
lru_put_end(struct svc_cacherep *rp) lru_put_end(struct svc_cacherep *rp)
{ {
list_del(&rp->c_lru); list_move_tail(&rp->c_lru, &lru_head);
list_add_tail(&rp->c_lru, &lru_head);
} }
/* /*

View File

@ -381,8 +381,7 @@ do_ast:
ret = DLM_NORMAL; ret = DLM_NORMAL;
if (past->type == DLM_AST) { if (past->type == DLM_AST) {
/* do not alter lock refcount. switching lists. */ /* do not alter lock refcount. switching lists. */
list_del_init(&lock->list); list_move_tail(&lock->list, &res->granted);
list_add_tail(&lock->list, &res->granted);
mlog(0, "ast: adding to granted list... type=%d, " mlog(0, "ast: adding to granted list... type=%d, "
"convert_type=%d\n", lock->ml.type, lock->ml.convert_type); "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
if (lock->ml.convert_type != LKM_IVMODE) { if (lock->ml.convert_type != LKM_IVMODE) {

View File

@ -231,8 +231,7 @@ switch_queues:
lock->ml.convert_type = type; lock->ml.convert_type = type;
/* do not alter lock refcount. switching lists. */ /* do not alter lock refcount. switching lists. */
list_del_init(&lock->list); list_move_tail(&lock->list, &res->converting);
list_add_tail(&lock->list, &res->converting);
unlock_exit: unlock_exit:
spin_unlock(&lock->spinlock); spin_unlock(&lock->spinlock);
@ -248,8 +247,7 @@ void dlm_revert_pending_convert(struct dlm_lock_resource *res,
struct dlm_lock *lock) struct dlm_lock *lock)
{ {
/* do not alter lock refcount. switching lists. */ /* do not alter lock refcount. switching lists. */
list_del_init(&lock->list); list_move_tail(&lock->list, &res->granted);
list_add_tail(&lock->list, &res->granted);
lock->ml.convert_type = LKM_IVMODE; lock->ml.convert_type = LKM_IVMODE;
lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
} }
@ -294,8 +292,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
res->state |= DLM_LOCK_RES_IN_PROGRESS; res->state |= DLM_LOCK_RES_IN_PROGRESS;
/* move lock to local convert queue */ /* move lock to local convert queue */
/* do not alter lock refcount. switching lists. */ /* do not alter lock refcount. switching lists. */
list_del_init(&lock->list); list_move_tail(&lock->list, &res->converting);
list_add_tail(&lock->list, &res->converting);
lock->convert_pending = 1; lock->convert_pending = 1;
lock->ml.convert_type = type; lock->ml.convert_type = type;

View File

@ -239,8 +239,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
mlog(0, "%s: $RECOVERY lock for this node (%u) is " mlog(0, "%s: $RECOVERY lock for this node (%u) is "
"mastered by %u; got lock, manually granting (no ast)\n", "mastered by %u; got lock, manually granting (no ast)\n",
dlm->name, dlm->node_num, res->owner); dlm->name, dlm->node_num, res->owner);
list_del_init(&lock->list); list_move_tail(&lock->list, &res->granted);
list_add_tail(&lock->list, &res->granted);
} }
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);

View File

@ -905,13 +905,11 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
mlog(0, "found lockres owned by dead node while " mlog(0, "found lockres owned by dead node while "
"doing recovery for node %u. sending it.\n", "doing recovery for node %u. sending it.\n",
dead_node); dead_node);
list_del_init(&res->recovering); list_move_tail(&res->recovering, list);
list_add_tail(&res->recovering, list);
} else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "found UNKNOWN owner while doing recovery " mlog(0, "found UNKNOWN owner while doing recovery "
"for node %u. sending it.\n", dead_node); "for node %u. sending it.\n", dead_node);
list_del_init(&res->recovering); list_move_tail(&res->recovering, list);
list_add_tail(&res->recovering, list);
} }
} }
spin_unlock(&dlm->spinlock); spin_unlock(&dlm->spinlock);
@ -1529,8 +1527,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
/* move the lock to its proper place */ /* move the lock to its proper place */
/* do not alter lock refcount. switching lists. */ /* do not alter lock refcount. switching lists. */
list_del_init(&lock->list); list_move_tail(&lock->list, queue);
list_add_tail(&lock->list, queue);
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
mlog(0, "just reordered a local lock!\n"); mlog(0, "just reordered a local lock!\n");

View File

@ -318,8 +318,7 @@ converting:
target->ml.type = target->ml.convert_type; target->ml.type = target->ml.convert_type;
target->ml.convert_type = LKM_IVMODE; target->ml.convert_type = LKM_IVMODE;
list_del_init(&target->list); list_move_tail(&target->list, &res->granted);
list_add_tail(&target->list, &res->granted);
BUG_ON(!target->lksb); BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL; target->lksb->status = DLM_NORMAL;
@ -380,8 +379,7 @@ blocked:
target->ml.type, target->ml.node); target->ml.type, target->ml.node);
// target->ml.type is already correct // target->ml.type is already correct
list_del_init(&target->list); list_move_tail(&target->list, &res->granted);
list_add_tail(&target->list, &res->granted);
BUG_ON(!target->lksb); BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL; target->lksb->status = DLM_NORMAL;

View File

@ -271,8 +271,7 @@ void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
void dlm_commit_pending_cancel(struct dlm_lock_resource *res, void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
struct dlm_lock *lock) struct dlm_lock *lock)
{ {
list_del_init(&lock->list); list_move_tail(&lock->list, &res->granted);
list_add_tail(&lock->list, &res->granted);
lock->ml.convert_type = LKM_IVMODE; lock->ml.convert_type = LKM_IVMODE;
} }

View File

@ -222,8 +222,7 @@ void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list)); BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
OCFS2_I(inode)->ip_handle = handle; OCFS2_I(inode)->ip_handle = handle;
list_del(&(OCFS2_I(inode)->ip_handle_list)); list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
list_add_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
} }
static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle) static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)

View File

@ -834,8 +834,7 @@ static int write_ordered_buffers(spinlock_t * lock,
get_bh(bh); get_bh(bh);
if (test_set_buffer_locked(bh)) { if (test_set_buffer_locked(bh)) {
if (!buffer_dirty(bh)) { if (!buffer_dirty(bh)) {
list_del_init(&jh->list); list_move(&jh->list, &tmp);
list_add(&jh->list, &tmp);
goto loop_next; goto loop_next;
} }
spin_unlock(lock); spin_unlock(lock);
@ -855,8 +854,7 @@ static int write_ordered_buffers(spinlock_t * lock,
ret = -EIO; ret = -EIO;
} }
if (buffer_dirty(bh)) { if (buffer_dirty(bh)) {
list_del_init(&jh->list); list_move(&jh->list, &tmp);
list_add(&jh->list, &tmp);
add_to_chunk(&chunk, bh, lock, write_ordered_chunk); add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
} else { } else {
reiserfs_free_jh(bh); reiserfs_free_jh(bh);

View File

@ -400,8 +400,7 @@ static int smb_request_send_req(struct smb_request *req)
if (!(req->rq_flags & SMB_REQ_TRANSMITTED)) if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
goto out; goto out;
list_del_init(&req->rq_queue); list_move_tail(&req->rq_queue, &server->recvq);
list_add_tail(&req->rq_queue, &server->recvq);
result = 1; result = 1;
out: out:
return result; return result;
@ -435,8 +434,7 @@ int smb_request_send_server(struct smb_sb_info *server)
result = smb_request_send_req(req); result = smb_request_send_req(req);
if (result < 0) { if (result < 0) {
server->conn_error = result; server->conn_error = result;
list_del_init(&req->rq_queue); list_move(&req->rq_queue, &server->xmitq);
list_add(&req->rq_queue, &server->xmitq);
result = -EIO; result = -EIO;
goto out; goto out;
} }

View File

@ -193,8 +193,7 @@ int smbiod_retry(struct smb_sb_info *server)
if (req->rq_flags & SMB_REQ_RETRY) { if (req->rq_flags & SMB_REQ_RETRY) {
/* must move the request to the xmitq */ /* must move the request to the xmitq */
VERBOSE("retrying request %p on recvq\n", req); VERBOSE("retrying request %p on recvq\n", req);
list_del(&req->rq_queue); list_move(&req->rq_queue, &server->xmitq);
list_add(&req->rq_queue, &server->xmitq);
continue; continue;
} }
#endif #endif