xfs: calculate inode walk prefetch more carefully
The existing inode walk prefetch is based on the old bulkstat code, which simply allocated 4 pages worth of memory and prefetched that many inobt records, regardless of however many inodes the caller requested. 65536 inodes is a lot to prefetch (~32M on x64, ~512M on arm64) so let's scale things down a little more intelligently based on the number of inodes requested, etc. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Brian Foster <bfoster@redhat.com>
This commit is contained in:
parent
2810bd6840
commit
938c710d99
|
@ -333,16 +333,58 @@ out:
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We experimentally determined that the reduction in ioctl call overhead
|
||||||
|
* diminishes when userspace asks for more than 2048 inodes, so we'll cap
|
||||||
|
* prefetch at this point.
|
||||||
|
*/
|
||||||
|
#define IWALK_MAX_INODE_PREFETCH (2048U)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Given the number of inodes to prefetch, set the number of inobt records that
|
* Given the number of inodes to prefetch, set the number of inobt records that
|
||||||
* we cache in memory, which controls the number of inodes we try to read
|
* we cache in memory, which controls the number of inodes we try to read
|
||||||
* ahead.
|
* ahead. Set the maximum if @inodes == 0.
|
||||||
*/
|
*/
|
||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
xfs_iwalk_prefetch(
|
xfs_iwalk_prefetch(
|
||||||
unsigned int inode_records)
|
unsigned int inodes)
|
||||||
{
|
{
|
||||||
return PAGE_SIZE * 4 / sizeof(struct xfs_inobt_rec_incore);
|
unsigned int inobt_records;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the caller didn't tell us the number of inodes they wanted,
|
||||||
|
* assume the maximum prefetch possible for best performance.
|
||||||
|
* Otherwise, cap prefetch at that maximum so that we don't start an
|
||||||
|
* absurd amount of prefetch.
|
||||||
|
*/
|
||||||
|
if (inodes == 0)
|
||||||
|
inodes = IWALK_MAX_INODE_PREFETCH;
|
||||||
|
inodes = min(inodes, IWALK_MAX_INODE_PREFETCH);
|
||||||
|
|
||||||
|
/* Round the inode count up to a full chunk. */
|
||||||
|
inodes = round_up(inodes, XFS_INODES_PER_CHUNK);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In order to convert the number of inodes to prefetch into an
|
||||||
|
* estimate of the number of inobt records to cache, we require a
|
||||||
|
* conversion factor that reflects our expectations of the average
|
||||||
|
* loading factor of an inode chunk. Based on data gathered, most
|
||||||
|
* (but not all) filesystems manage to keep the inode chunks totally
|
||||||
|
* full, so we'll underestimate slightly so that our readahead will
|
||||||
|
* still deliver the performance we want on aging filesystems:
|
||||||
|
*
|
||||||
|
* inobt = inodes / (INODES_PER_CHUNK * (4 / 5));
|
||||||
|
*
|
||||||
|
* The funny math is to avoid integer division.
|
||||||
|
*/
|
||||||
|
inobt_records = (inodes * 5) / (4 * XFS_INODES_PER_CHUNK);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate enough space to prefetch at least two inobt records so that
|
||||||
|
* we can cache both the record where the iwalk started and the next
|
||||||
|
* record. This simplifies the AG inode walk loop setup code.
|
||||||
|
*/
|
||||||
|
return max(inobt_records, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue