Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: fix race condition in AIL push trigger
  xfs: make AIL target updates and compares 32bit safe.
  xfs: always push the AIL to the target
  xfs: exit AIL push work correctly when AIL is empty
  xfs: ensure reclaim cursor is reset correctly at end of AG
This commit is contained in:
Linus Torvalds 2011-05-10 11:56:35 -07:00
commit 675badfc48
2 changed files with 27 additions and 21 deletions

View File

@ -926,6 +926,7 @@ restart:
XFS_LOOKUP_BATCH, XFS_LOOKUP_BATCH,
XFS_ICI_RECLAIM_TAG); XFS_ICI_RECLAIM_TAG);
if (!nr_found) { if (!nr_found) {
done = 1;
rcu_read_unlock(); rcu_read_unlock();
break; break;
} }

View File

@ -350,16 +350,19 @@ xfs_ail_worker(
{ {
struct xfs_ail *ailp = container_of(to_delayed_work(work), struct xfs_ail *ailp = container_of(to_delayed_work(work),
struct xfs_ail, xa_work); struct xfs_ail, xa_work);
long tout;
xfs_lsn_t target = ailp->xa_target;
xfs_lsn_t lsn;
xfs_log_item_t *lip;
int flush_log, count, stuck;
xfs_mount_t *mp = ailp->xa_mount; xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor *cur = &ailp->xa_cursors; struct xfs_ail_cursor *cur = &ailp->xa_cursors;
xfs_log_item_t *lip;
xfs_lsn_t lsn;
xfs_lsn_t target;
long tout = 10;
int flush_log = 0;
int stuck = 0;
int count = 0;
int push_xfsbufd = 0; int push_xfsbufd = 0;
spin_lock(&ailp->xa_lock); spin_lock(&ailp->xa_lock);
target = ailp->xa_target;
xfs_trans_ail_cursor_init(ailp, cur); xfs_trans_ail_cursor_init(ailp, cur);
lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
if (!lip || XFS_FORCED_SHUTDOWN(mp)) { if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
@ -368,8 +371,7 @@ xfs_ail_worker(
*/ */
xfs_trans_ail_cursor_done(ailp, cur); xfs_trans_ail_cursor_done(ailp, cur);
spin_unlock(&ailp->xa_lock); spin_unlock(&ailp->xa_lock);
ailp->xa_last_pushed_lsn = 0; goto out_done;
return;
} }
XFS_STATS_INC(xs_push_ail); XFS_STATS_INC(xs_push_ail);
@ -386,8 +388,7 @@ xfs_ail_worker(
* lots of contention on the AIL lists. * lots of contention on the AIL lists.
*/ */
lsn = lip->li_lsn; lsn = lip->li_lsn;
flush_log = stuck = count = 0; while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
int lock_result; int lock_result;
/* /*
* If we can lock the item without sleeping, unlock the AIL * If we can lock the item without sleeping, unlock the AIL
@ -480,21 +481,25 @@ xfs_ail_worker(
} }
/* assume we have more work to do in a short while */ /* assume we have more work to do in a short while */
tout = 10; out_done:
if (!count) { if (!count) {
/* We're past our target or empty, so idle */ /* We're past our target or empty, so idle */
ailp->xa_last_pushed_lsn = 0; ailp->xa_last_pushed_lsn = 0;
/* /*
* Check for an updated push target before clearing the * We clear the XFS_AIL_PUSHING_BIT first before checking
* XFS_AIL_PUSHING_BIT. If the target changed, we've got more * whether the target has changed. If the target has changed,
* work to do. Wait a bit longer before starting that work. * this pushes the requeue race directly onto the result of the
* atomic test/set bit, so we are guaranteed that either the
* the pusher that changed the target or ourselves will requeue
* the work (but not both).
*/ */
smp_rmb();
if (ailp->xa_target == target) {
clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
smp_rmb();
if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
return; return;
}
tout = 50; tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) { } else if (XFS_LSN_CMP(lsn, target) >= 0) {
/* /*
@ -553,7 +558,7 @@ xfs_ail_push(
* the XFS_AIL_PUSHING_BIT. * the XFS_AIL_PUSHING_BIT.
*/ */
smp_wmb(); smp_wmb();
ailp->xa_target = threshold_lsn; xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
} }