xfs: reduce the number of pagb_lock roundtrips in xfs_alloc_clear_busy
Instead of finding the per-ag and then taking and releasing the pagb_lock for every single busy extent completed sort the list of busy extents and only switch betweens AGs where nessecary. This becomes especially important with the online discard support which will hit this lock more often. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
parent
97d3ac75e5
commit
8a072a4d4c
|
@ -33,7 +33,6 @@
|
|||
#include <linux/migrate.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include "xfs_sb.h"
|
||||
#include "xfs_inum.h"
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
#include <linux/ctype.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/div64.h>
|
||||
|
|
|
@ -2964,24 +2964,60 @@ fail:
|
|||
*rlen = 0;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_alloc_busy_clear(
|
||||
static void
|
||||
xfs_alloc_busy_clear_one(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_perag *pag,
|
||||
struct xfs_busy_extent *busyp)
|
||||
{
|
||||
struct xfs_perag *pag;
|
||||
|
||||
list_del_init(&busyp->list);
|
||||
|
||||
pag = xfs_perag_get(mp, busyp->agno);
|
||||
spin_lock(&pag->pagb_lock);
|
||||
if (busyp->length) {
|
||||
trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
|
||||
busyp->length);
|
||||
rb_erase(&busyp->rb_node, &pag->pagb_tree);
|
||||
}
|
||||
spin_unlock(&pag->pagb_lock);
|
||||
xfs_perag_put(pag);
|
||||
|
||||
list_del_init(&busyp->list);
|
||||
kmem_free(busyp);
|
||||
}
|
||||
|
||||
void
|
||||
xfs_alloc_busy_clear(
|
||||
struct xfs_mount *mp,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct xfs_busy_extent *busyp, *n;
|
||||
struct xfs_perag *pag = NULL;
|
||||
xfs_agnumber_t agno = NULLAGNUMBER;
|
||||
|
||||
list_for_each_entry_safe(busyp, n, list, list) {
|
||||
if (busyp->agno != agno) {
|
||||
if (pag) {
|
||||
spin_unlock(&pag->pagb_lock);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
pag = xfs_perag_get(mp, busyp->agno);
|
||||
spin_lock(&pag->pagb_lock);
|
||||
agno = busyp->agno;
|
||||
}
|
||||
|
||||
xfs_alloc_busy_clear_one(mp, pag, busyp);
|
||||
}
|
||||
|
||||
if (pag) {
|
||||
spin_unlock(&pag->pagb_lock);
|
||||
xfs_perag_put(pag);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback for list_sort to sort busy extents by the AG they reside in.
|
||||
*/
|
||||
int
|
||||
xfs_busy_extent_ag_cmp(
|
||||
void *priv,
|
||||
struct list_head *a,
|
||||
struct list_head *b)
|
||||
{
|
||||
return container_of(a, struct xfs_busy_extent, list)->agno -
|
||||
container_of(b, struct xfs_busy_extent, list)->agno;
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
|
|||
xfs_agblock_t bno, xfs_extlen_t len);
|
||||
|
||||
void
|
||||
xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp);
|
||||
xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list);
|
||||
|
||||
int
|
||||
xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
|
@ -149,6 +149,15 @@ xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
|
|||
void
|
||||
xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
|
||||
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
|
||||
|
||||
int
|
||||
xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
|
||||
|
||||
static inline void xfs_alloc_busy_sort(struct list_head *list)
|
||||
{
|
||||
list_sort(NULL, list, xfs_busy_extent_ag_cmp);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -361,13 +361,12 @@ xlog_cil_committed(
|
|||
int abort)
|
||||
{
|
||||
struct xfs_cil_ctx *ctx = args;
|
||||
struct xfs_busy_extent *busyp, *n;
|
||||
|
||||
xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
|
||||
ctx->start_lsn, abort);
|
||||
|
||||
list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
|
||||
xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
|
||||
xfs_alloc_busy_sort(&ctx->busy_extents);
|
||||
xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents);
|
||||
|
||||
spin_lock(&ctx->cil->xc_cil_lock);
|
||||
list_del(&ctx->committing);
|
||||
|
|
|
@ -608,10 +608,8 @@ STATIC void
|
|||
xfs_trans_free(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct xfs_busy_extent *busyp, *n;
|
||||
|
||||
list_for_each_entry_safe(busyp, n, &tp->t_busy, list)
|
||||
xfs_alloc_busy_clear(tp->t_mountp, busyp);
|
||||
xfs_alloc_busy_sort(&tp->t_busy);
|
||||
xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy);
|
||||
|
||||
atomic_dec(&tp->t_mountp->m_active_trans);
|
||||
xfs_trans_free_dqinfo(tp);
|
||||
|
|
Loading…
Reference in New Issue