[PATCH] fs: fix-up schedule_timeout() usage
Use schedule_timeout_{,un}interruptible() instead of set_current_state()/schedule_timeout() to reduce kernel size. Also use helper functions to convert between human time units and jiffies rather than constant HZ division to avoid rounding errors. Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
373016e9e1
commit
041e0e3b19
|
@ -3215,10 +3215,8 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
|
|||
}
|
||||
|
||||
cifs_sb->tcon = NULL;
|
||||
if (ses) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(HZ / 2);
|
||||
}
|
||||
if (ses)
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(500));
|
||||
if (ses)
|
||||
sesInfoFree(ses);
|
||||
|
||||
|
|
|
@ -1340,8 +1340,7 @@ int journal_stop(handle_t *handle)
|
|||
if (handle->h_sync) {
|
||||
do {
|
||||
old_handle_count = transaction->t_handle_count;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
} while (old_handle_count != transaction->t_handle_count);
|
||||
}
|
||||
|
||||
|
|
|
@ -299,8 +299,7 @@ nlmclnt_alloc_call(void)
|
|||
return call;
|
||||
}
|
||||
printk("nlmclnt_alloc_call: failed, waiting for memory\n");
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule_timeout(5*HZ);
|
||||
schedule_timeout_interruptible(5*HZ);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -34,8 +34,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
|||
res = rpc_call_sync(clnt, msg, flags);
|
||||
if (res != -EJUKEBOX)
|
||||
break;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
|
||||
schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
|
||||
res = -ERESTARTSYS;
|
||||
} while (!signalled());
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
|
|
|
@ -2418,14 +2418,11 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
|||
*timeout = NFS4_POLL_RETRY_MAX;
|
||||
rpc_clnt_sigmask(clnt, &oldset);
|
||||
if (clnt->cl_intr) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(*timeout);
|
||||
schedule_timeout_interruptible(*timeout);
|
||||
if (signalled())
|
||||
res = -ERESTARTSYS;
|
||||
} else {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(*timeout);
|
||||
}
|
||||
} else
|
||||
schedule_timeout_uninterruptible(*timeout);
|
||||
rpc_clnt_sigunmask(clnt, &oldset);
|
||||
*timeout <<= 1;
|
||||
return res;
|
||||
|
@ -2578,8 +2575,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
|
|||
static unsigned long
|
||||
nfs4_set_lock_task_retry(unsigned long timeout)
|
||||
{
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule_timeout(timeout);
|
||||
schedule_timeout_interruptible(timeout);
|
||||
timeout <<= 1;
|
||||
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
||||
return NFS4_LOCK_MAXTIMEOUT;
|
||||
|
|
|
@ -2868,8 +2868,7 @@ static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
|
|||
struct reiserfs_journal *journal = SB_JOURNAL(sb);
|
||||
unsigned long bcount = journal->j_bcount;
|
||||
while (1) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(1);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
|
||||
while ((atomic_read(&journal->j_wcount) > 0 ||
|
||||
atomic_read(&journal->j_jlock)) &&
|
||||
|
|
|
@ -2397,8 +2397,7 @@ smb_proc_readdir_long(struct file *filp, void *dirent, filldir_t filldir,
|
|||
if (req->rq_rcls == ERRSRV && req->rq_err == ERRerror) {
|
||||
/* a damn Win95 bug - sometimes it clags if you
|
||||
ask it too fast */
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule_timeout(HZ/5);
|
||||
schedule_timeout_interruptible(msecs_to_jiffies(200));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,8 +39,7 @@ typedef struct timespec timespec_t;
|
|||
|
||||
static inline void delay(long ticks)
|
||||
{
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout(ticks);
|
||||
schedule_timeout_uninterruptible(ticks);
|
||||
}
|
||||
|
||||
static inline void nanotime(struct timespec *tvp)
|
||||
|
|
|
@ -1780,10 +1780,10 @@ xfsbufd(
|
|||
xfsbufd_force_sleep = 0;
|
||||
}
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
|
||||
schedule_timeout_interruptible
|
||||
(xfs_buf_timer_centisecs * msecs_to_jiffies(10));
|
||||
|
||||
age = (xfs_buf_age_centisecs * HZ) / 100;
|
||||
age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
|
||||
spin_lock(&pbd_delwrite_lock);
|
||||
list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
|
||||
PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
|
||||
|
|
|
@ -467,7 +467,7 @@ xfs_flush_inode(
|
|||
|
||||
igrab(inode);
|
||||
xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
|
||||
delay(HZ/2);
|
||||
delay(msecs_to_jiffies(500));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -492,7 +492,7 @@ xfs_flush_device(
|
|||
|
||||
igrab(inode);
|
||||
xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
|
||||
delay(HZ/2);
|
||||
delay(msecs_to_jiffies(500));
|
||||
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
|
||||
}
|
||||
|
||||
|
@ -520,10 +520,9 @@ xfssyncd(
|
|||
struct vfs_sync_work *work, *n;
|
||||
LIST_HEAD (tmp);
|
||||
|
||||
timeleft = (xfs_syncd_centisecs * HZ) / 100;
|
||||
timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
timeleft = schedule_timeout(timeleft);
|
||||
timeleft = schedule_timeout_interruptible(timeleft);
|
||||
/* swsusp */
|
||||
try_to_freeze();
|
||||
if (kthread_should_stop())
|
||||
|
@ -537,7 +536,8 @@ xfssyncd(
|
|||
*/
|
||||
if (!timeleft || list_empty(&vfsp->vfs_sync_list)) {
|
||||
if (!timeleft)
|
||||
timeleft = (xfs_syncd_centisecs * HZ) / 100;
|
||||
timeleft = xfs_syncd_centisecs *
|
||||
msecs_to_jiffies(10);
|
||||
INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
|
||||
list_add_tail(&vfsp->vfs_sync_work.w_list,
|
||||
&vfsp->vfs_sync_list);
|
||||
|
|
Loading…
Reference in New Issue