xfs: poll waiting for quotacheck
Create a pwork destroy function that uses polling instead of uninterruptible sleep to wait for work items to finish so that we can touch the softlockup watchdog. IOWs, gross hack. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
This commit is contained in:
parent
40786717c8
commit
3e5a428b26
|
@ -588,6 +588,7 @@ xfs_iwalk_threaded(
|
|||
xfs_ino_t startino,
|
||||
xfs_iwalk_fn iwalk_fn,
|
||||
unsigned int inode_records,
|
||||
bool polled,
|
||||
void *data)
|
||||
{
|
||||
struct xfs_pwork_ctl pctl;
|
||||
|
@ -619,6 +620,8 @@ xfs_iwalk_threaded(
|
|||
startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
|
||||
}
|
||||
|
||||
if (polled)
|
||||
xfs_pwork_poll(&pctl);
|
||||
return xfs_pwork_destroy(&pctl);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,8 @@ typedef int (*xfs_iwalk_fn)(struct xfs_mount *mp, struct xfs_trans *tp,
|
|||
int xfs_iwalk(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t startino,
|
||||
xfs_iwalk_fn iwalk_fn, unsigned int inode_records, void *data);
|
||||
int xfs_iwalk_threaded(struct xfs_mount *mp, xfs_ino_t startino,
|
||||
xfs_iwalk_fn iwalk_fn, unsigned int inode_records, void *data);
|
||||
xfs_iwalk_fn iwalk_fn, unsigned int inode_records, bool poll,
|
||||
void *data);
|
||||
|
||||
/* Walk all inode btree records in the filesystem starting from @startino. */
|
||||
typedef int (*xfs_inobt_walk_fn)(struct xfs_mount *mp, struct xfs_trans *tp,
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "xfs_trace.h"
|
||||
#include "xfs_sysctl.h"
|
||||
#include "xfs_pwork.h"
|
||||
#include <linux/nmi.h>
|
||||
|
||||
/*
|
||||
* Parallel Work Queue
|
||||
|
@ -46,6 +47,8 @@ xfs_pwork_work(
|
|||
error = pctl->work_fn(pctl->mp, pwork);
|
||||
if (error && !pctl->error)
|
||||
pctl->error = error;
|
||||
if (atomic_dec_and_test(&pctl->nr_work))
|
||||
wake_up(&pctl->poll_wait);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -74,6 +77,8 @@ xfs_pwork_init(
|
|||
pctl->work_fn = work_fn;
|
||||
pctl->error = 0;
|
||||
pctl->mp = mp;
|
||||
atomic_set(&pctl->nr_work, 0);
|
||||
init_waitqueue_head(&pctl->poll_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -86,6 +91,7 @@ xfs_pwork_queue(
|
|||
{
|
||||
INIT_WORK(&pwork->work, xfs_pwork_work);
|
||||
pwork->pctl = pctl;
|
||||
atomic_inc(&pctl->nr_work);
|
||||
queue_work(pctl->wq, &pwork->work);
|
||||
}
|
||||
|
||||
|
@ -99,6 +105,19 @@ xfs_pwork_destroy(
|
|||
return pctl->error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the work to finish by polling completion status and touch the soft
|
||||
* lockup watchdog. This is for callers such as mount which hold locks.
|
||||
*/
|
||||
void
|
||||
xfs_pwork_poll(
|
||||
struct xfs_pwork_ctl *pctl)
|
||||
{
|
||||
while (wait_event_timeout(pctl->poll_wait,
|
||||
atomic_read(&pctl->nr_work) == 0, HZ) == 0)
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the amount of parallelism that the data device can handle, or 0 for
|
||||
* no limit.
|
||||
|
|
|
@ -18,6 +18,8 @@ struct xfs_pwork_ctl {
|
|||
struct workqueue_struct *wq;
|
||||
struct xfs_mount *mp;
|
||||
xfs_pwork_work_fn work_fn;
|
||||
struct wait_queue_head poll_wait;
|
||||
atomic_t nr_work;
|
||||
int error;
|
||||
};
|
||||
|
||||
|
@ -53,6 +55,7 @@ int xfs_pwork_init(struct xfs_mount *mp, struct xfs_pwork_ctl *pctl,
|
|||
unsigned int nr_threads);
|
||||
void xfs_pwork_queue(struct xfs_pwork_ctl *pctl, struct xfs_pwork *pwork);
|
||||
int xfs_pwork_destroy(struct xfs_pwork_ctl *pctl);
|
||||
void xfs_pwork_poll(struct xfs_pwork_ctl *pctl);
|
||||
unsigned int xfs_pwork_guess_datadev_parallelism(struct xfs_mount *mp);
|
||||
|
||||
#endif /* __XFS_PWORK_H__ */
|
||||
|
|
|
@ -1300,7 +1300,7 @@ xfs_qm_quotacheck(
|
|||
flags |= XFS_PQUOTA_CHKD;
|
||||
}
|
||||
|
||||
error = xfs_iwalk_threaded(mp, 0, xfs_qm_dqusage_adjust, 0, NULL);
|
||||
error = xfs_iwalk_threaded(mp, 0, xfs_qm_dqusage_adjust, 0, true, NULL);
|
||||
if (error)
|
||||
goto error_return;
|
||||
|
||||
|
|
Loading…
Reference in New Issue