2013-08-12 18:49:32 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
|
|
|
|
* Copyright (C) 2010 Red Hat, Inc.
|
|
|
|
* All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
2013-10-23 07:36:05 +08:00
|
|
|
#include "xfs_shared.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
2013-08-12 18:49:32 +08:00
|
|
|
#include "xfs_trans_resv.h"
|
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_ag.h"
|
|
|
|
#include "xfs_mount.h"
|
2013-10-15 06:17:51 +08:00
|
|
|
#include "xfs_da_format.h"
|
2013-08-12 18:49:32 +08:00
|
|
|
#include "xfs_inode.h"
|
2013-10-23 07:51:50 +08:00
|
|
|
#include "xfs_bmap_btree.h"
|
2013-08-12 18:49:32 +08:00
|
|
|
#include "xfs_ialloc.h"
|
|
|
|
#include "xfs_quota.h"
|
2013-10-23 07:50:10 +08:00
|
|
|
#include "xfs_trans.h"
|
2013-08-12 18:49:32 +08:00
|
|
|
#include "xfs_qm.h"
|
|
|
|
#include "xfs_trans_space.h"
|
|
|
|
#include "xfs_trace.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A buffer has a format structure overhead in the log in addition
|
|
|
|
* to the data, so we need to take this into account when reserving
|
|
|
|
* space in a transaction for a buffer. Round the space required up
|
|
|
|
* to a multiple of 128 bytes so that we don't change the historical
|
|
|
|
* reservation that has been used for this overhead.
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_buf_log_overhead(void)
|
|
|
|
{
|
|
|
|
return round_up(sizeof(struct xlog_op_header) +
|
|
|
|
sizeof(struct xfs_buf_log_format), 128);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate out transaction log reservation per item in bytes.
|
|
|
|
*
|
|
|
|
* The nbufs argument is used to indicate the number of items that
|
|
|
|
* will be changed in a transaction. size is used to tell how many
|
|
|
|
* bytes should be reserved per item.
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_buf_res(
|
|
|
|
uint nbufs,
|
|
|
|
uint size)
|
|
|
|
{
|
|
|
|
return nbufs * (size + xfs_buf_log_overhead());
|
|
|
|
}
|
|
|
|
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
/*
|
|
|
|
* Logging inodes is really tricksy. They are logged in memory format,
|
|
|
|
* which means that what we write into the log doesn't directly translate into
|
|
|
|
* the amount of space they use on disk.
|
|
|
|
*
|
|
|
|
* Case in point - btree format forks in memory format use more space than the
|
|
|
|
* on-disk format. In memory, the buffer contains a normal btree block header so
|
|
|
|
* the btree code can treat it as though it is just another generic buffer.
|
|
|
|
* However, when we write it to the inode fork, we don't write all of this
|
|
|
|
* header as it isn't needed. e.g. the root is only ever in the inode, so
|
|
|
|
* there's no need for sibling pointers which would waste 16 bytes of space.
|
|
|
|
*
|
|
|
|
* Hence when we have an inode with a maximally sized btree format fork, then
|
|
|
|
* amount of information we actually log is greater than the size of the inode
|
|
|
|
* on disk. Hence we need an inode reservation function that calculates all this
|
|
|
|
* correctly. So, we log:
|
|
|
|
*
|
|
|
|
* - log op headers for object
|
|
|
|
* - inode log format object
|
|
|
|
* - the entire inode contents (core + 2 forks)
|
|
|
|
* - two bmap btree block headers
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_inode_res(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
uint ninodes)
|
|
|
|
{
|
|
|
|
return ninodes * (sizeof(struct xlog_op_header) +
|
|
|
|
sizeof(struct xfs_inode_log_format) +
|
|
|
|
mp->m_sb.sb_inodesize +
|
|
|
|
2 * XFS_BMBT_BLOCK_LEN(mp));
|
|
|
|
}
|
|
|
|
|
2013-08-12 18:49:32 +08:00
|
|
|
/*
|
|
|
|
* Various log reservation values.
|
|
|
|
*
|
|
|
|
* These are based on the size of the file system block because that is what
|
|
|
|
* most transactions manipulate. Each adds in an additional 128 bytes per
|
|
|
|
* item logged to try to account for the overhead of the transaction mechanism.
|
|
|
|
*
|
|
|
|
* Note: Most of the reservations underestimate the number of allocation
|
|
|
|
* groups into which they could free extents in the xfs_bmap_finish() call.
|
|
|
|
* This is because the number in the worst case is quite high and quite
|
|
|
|
* unusual. In order to fix this we need to change xfs_bmap_finish() to free
|
|
|
|
* extents in only a single AG at a time. This will require changes to the
|
|
|
|
* EFI code as well, however, so that the EFI for the extents not freed is
|
|
|
|
* logged again in each transaction. See SGI PV #261917.
|
|
|
|
*
|
|
|
|
* Reservation functions here avoid a huge stack in xfs_trans_init due to
|
|
|
|
* register overflow from temporaries in the calculations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In a write transaction we can allocate a maximum of 2
|
|
|
|
* extents. This gives:
|
|
|
|
* the inode getting the new extents: inode size
|
|
|
|
* the inode's bmap btree: max depth * block size
|
|
|
|
* the agfs of the ags from which the extents are allocated: 2 * sector
|
|
|
|
* the superblock free block counter: sector size
|
|
|
|
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
* And the bmap_finish transaction can free bmap blocks in a join:
|
|
|
|
* the agfs of the ags containing the blocks: 2 * sector size
|
|
|
|
* the agfls of the ags containing the blocks: 2 * sector size
|
|
|
|
* the super block free block counter: sector size
|
|
|
|
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_write_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
MAX((xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
|
|
|
|
XFS_FSB_TO_B(mp, 1)) +
|
|
|
|
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
|
|
|
XFS_FSB_TO_B(mp, 1))),
|
|
|
|
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
|
|
|
XFS_FSB_TO_B(mp, 1))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In truncating a file we free up to two extents at once. We can modify:
|
|
|
|
* the inode being truncated: inode size
|
|
|
|
* the inode's bmap btree: (max depth + 1) * block size
|
|
|
|
* And the bmap_finish transaction can free the blocks and bmap blocks:
|
|
|
|
* the agf for each of the ags: 4 * sector size
|
|
|
|
* the agfl for each of the ags: 4 * sector size
|
|
|
|
* the super block to reflect the freed blocks: sector size
|
|
|
|
* worst case split in allocation btrees per extent assuming 4 extents:
|
|
|
|
* 4 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
* the inode btree: max depth * blocksize
|
|
|
|
* the allocation btrees: 2 trees * (max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_itruncate_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
MAX((xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
|
|
|
|
XFS_FSB_TO_B(mp, 1))),
|
|
|
|
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
|
|
|
|
XFS_FSB_TO_B(mp, 1)) +
|
|
|
|
xfs_calc_buf_res(5, 0) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1)) +
|
2013-12-13 12:51:48 +08:00
|
|
|
xfs_calc_buf_res(2 + mp->m_ialloc_blks +
|
2013-08-12 18:49:32 +08:00
|
|
|
mp->m_in_maxlevels, 0)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In renaming a files we can modify:
|
|
|
|
* the four inodes involved: 4 * inode size
|
|
|
|
* the two directory btrees: 2 * (max depth + v2) * dir block size
|
|
|
|
* the two directory bmap btrees: 2 * max depth * block size
|
|
|
|
* And the bmap_finish transaction can free dir and bmap blocks (two sets
|
|
|
|
* of bmap blocks) giving:
|
|
|
|
* the agf for the ags in which the blocks live: 3 * sector size
|
|
|
|
* the agfl for the ags in which the blocks live: 3 * sector size
|
|
|
|
* the superblock for the free block count: sector size
|
|
|
|
* the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_rename_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
MAX((xfs_calc_inode_res(mp, 4) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
|
|
|
|
XFS_FSB_TO_B(mp, 1))),
|
|
|
|
(xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 3),
|
|
|
|
XFS_FSB_TO_B(mp, 1))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For creating a link to an inode:
|
|
|
|
* the parent directory inode: inode size
|
|
|
|
* the linked inode: inode size
|
|
|
|
* the directory btree could split: (max depth + v2) * dir block size
|
|
|
|
* the directory bmap btree could join or split: (max depth + v2) * blocksize
|
|
|
|
* And the bmap_finish transaction can free some bmap blocks giving:
|
|
|
|
* the agf for the ag in which the blocks live: sector size
|
|
|
|
* the agfl for the ag in which the blocks live: sector size
|
|
|
|
* the superblock for the free block count: sector size
|
|
|
|
* the allocation btrees: 2 trees * (2 * max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_link_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
MAX((xfs_calc_inode_res(mp, 2) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
|
|
|
XFS_FSB_TO_B(mp, 1))),
|
|
|
|
(xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For removing a directory entry we can modify:
|
|
|
|
* the parent directory inode: inode size
|
|
|
|
* the removed inode: inode size
|
|
|
|
* the directory btree could join: (max depth + v2) * dir block size
|
|
|
|
* the directory bmap btree could join or split: (max depth + v2) * blocksize
|
|
|
|
* And the bmap_finish transaction can free the dir and bmap blocks giving:
|
|
|
|
* the agf for the ag in which the blocks live: 2 * sector size
|
|
|
|
* the agfl for the ag in which the blocks live: 2 * sector size
|
|
|
|
* the superblock for the free block count: sector size
|
|
|
|
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_remove_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
MAX((xfs_calc_inode_res(mp, 2) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
|
|
|
|
XFS_FSB_TO_B(mp, 1))),
|
|
|
|
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
|
|
|
XFS_FSB_TO_B(mp, 1))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For create, break it in to the two cases that the transaction
|
|
|
|
* covers. We start with the modify case - allocation done by modification
|
|
|
|
* of the state of existing inodes - and the allocation case.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For create we can modify:
|
|
|
|
* the parent directory inode: inode size
|
|
|
|
* the new inode: inode size
|
|
|
|
* the inode btree entry: block size
|
|
|
|
* the superblock for the nlink flag: sector size
|
|
|
|
* the directory btree: (max depth + v2) * dir block size
|
|
|
|
* the directory inode's bmap btree: (max depth + v2) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_create_resv_modify(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
return xfs_calc_inode_res(mp, 2) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
|
|
|
(uint)XFS_FSB_TO_B(mp, 1) +
|
|
|
|
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For create we can allocate some inodes giving:
|
|
|
|
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
|
|
|
|
* the superblock for the nlink flag: sector size
|
2013-12-13 12:51:48 +08:00
|
|
|
* the inode blocks allocated: mp->m_ialloc_blks * blocksize
|
2013-08-12 18:49:32 +08:00
|
|
|
* the inode btree: max depth * blocksize
|
|
|
|
* the allocation btrees: 2 trees * (max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_create_resv_alloc(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
|
|
|
mp->m_sb.sb_sectsize +
|
2013-12-13 12:51:48 +08:00
|
|
|
xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC uint
|
|
|
|
__xfs_calc_create_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
|
|
|
MAX(xfs_calc_create_resv_alloc(mp),
|
|
|
|
xfs_calc_create_resv_modify(mp));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For icreate we can allocate some inodes giving:
|
|
|
|
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
|
|
|
|
* the superblock for the nlink flag: sector size
|
|
|
|
* the inode btree: max depth * blocksize
|
|
|
|
* the allocation btrees: 2 trees * (max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_icreate_resv_alloc(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
|
|
|
mp->m_sb.sb_sectsize +
|
|
|
|
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_icreate_reservation(xfs_mount_t *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
|
|
|
MAX(xfs_calc_icreate_resv_alloc(mp),
|
|
|
|
xfs_calc_create_resv_modify(mp));
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_create_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
if (xfs_sb_version_hascrc(&mp->m_sb))
|
|
|
|
return xfs_calc_icreate_reservation(mp);
|
|
|
|
return __xfs_calc_create_reservation(mp);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Making a new directory is the same as creating a new file.
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_mkdir_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_create_reservation(mp);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Making a new symplink is the same as creating a new file, but
|
|
|
|
* with the added blocks for remote symlink data which can be up to 1kB in
|
|
|
|
* length (MAXPATHLEN).
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_symlink_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_create_reservation(mp) +
|
|
|
|
xfs_calc_buf_res(1, MAXPATHLEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In freeing an inode we can modify:
|
|
|
|
* the inode being freed: inode size
|
|
|
|
* the super block free inode counter: sector size
|
|
|
|
* the agi hash list and counters: sector size
|
|
|
|
* the inode btree entry: block size
|
|
|
|
* the on disk inode before ours in the agi hash list: inode cluster size
|
|
|
|
* the inode btree: max depth * blocksize
|
|
|
|
* the allocation btrees: 2 trees * (max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_ifree_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
|
2013-12-13 12:51:48 +08:00
|
|
|
max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(1, 0) +
|
2013-12-13 12:51:48 +08:00
|
|
|
xfs_calc_buf_res(2 + mp->m_ialloc_blks +
|
2013-08-12 18:49:32 +08:00
|
|
|
mp->m_in_maxlevels, 0) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When only changing the inode we log the inode and possibly the superblock
|
|
|
|
* We also add a bit of slop for the transaction stuff.
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_ichange_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_inode_res(mp, 1) +
|
|
|
|
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
2013-08-12 18:49:32 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Growing the data section of the filesystem.
|
|
|
|
* superblock
|
|
|
|
* agi and agf
|
|
|
|
* allocation btrees
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_growdata_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Growing the rt section of the filesystem.
|
|
|
|
* In the first set of transactions (ALLOC) we allocate space to the
|
|
|
|
* bitmap or summary files.
|
|
|
|
* superblock: sector size
|
|
|
|
* agf of the ag from which the extent is allocated: sector size
|
|
|
|
* bmap btree for bitmap/summary inode: max depth * blocksize
|
|
|
|
* bitmap/summary inode: inode size
|
|
|
|
* allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_growrtalloc_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
|
|
|
|
XFS_FSB_TO_B(mp, 1)) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Growing the rt section of the filesystem.
|
|
|
|
* In the second set of transactions (ZERO) we zero the new metadata blocks.
|
|
|
|
* one bitmap/summary block: blocksize
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_growrtzero_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Growing the rt section of the filesystem.
|
|
|
|
* In the third set of transactions (FREE) we update metadata without
|
|
|
|
* allocating any new blocks.
|
|
|
|
* superblock: sector size
|
|
|
|
* bitmap inode: inode size
|
|
|
|
* summary inode: inode size
|
|
|
|
* one bitmap block: blocksize
|
|
|
|
* summary blocks: new summary size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_growrtfree_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_inode_res(mp, 2) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
|
|
|
|
xfs_calc_buf_res(1, mp->m_rsumsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Logging the inode modification timestamp on a synchronous write.
|
|
|
|
* inode
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_swrite_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
return xfs_calc_inode_res(mp, 1);
|
2013-08-12 18:49:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Logging the inode mode bits when writing a setuid/setgid file
|
|
|
|
* inode
|
|
|
|
*/
|
|
|
|
STATIC uint
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_writeid_reservation(
|
|
|
|
struct xfs_mount *mp)
|
2013-08-12 18:49:32 +08:00
|
|
|
{
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
return xfs_calc_inode_res(mp, 1);
|
2013-08-12 18:49:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Converting the inode from non-attributed to attributed.
|
|
|
|
* the inode being converted: inode size
|
|
|
|
* agf block and superblock (for block allocation)
|
|
|
|
* the new block (directory sized)
|
|
|
|
* bmap blocks for the new directory block
|
|
|
|
* allocation btrees
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_addafork_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(1, mp->m_dirblksize) +
|
|
|
|
xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
|
|
|
|
XFS_FSB_TO_B(mp, 1)) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Removing the attribute fork of a file
|
|
|
|
* the inode being truncated: inode size
|
|
|
|
* the inode's bmap btree: max depth * block size
|
|
|
|
* And the bmap_finish transaction can free the blocks and bmap blocks:
|
|
|
|
* the agf for each of the ags: 4 * sector size
|
|
|
|
* the agfl for each of the ags: 4 * sector size
|
|
|
|
* the super block to reflect the freed blocks: sector size
|
|
|
|
* worst case split in allocation btrees per extent assuming 4 extents:
|
|
|
|
* 4 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_attrinval_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
return MAX((xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
|
|
|
|
XFS_FSB_TO_B(mp, 1))),
|
|
|
|
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
|
|
|
|
XFS_FSB_TO_B(mp, 1))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting an attribute at mount time.
|
|
|
|
* the inode getting the attribute
|
|
|
|
* the superblock for allocations
|
|
|
|
* the agfs extents are allocated from
|
|
|
|
* the attribute btree * max depth
|
|
|
|
* the inode allocation btree
|
|
|
|
* Since attribute transaction space is dependent on the size of the attribute,
|
|
|
|
* the calculation is done partially at mount time and partially at runtime(see
|
|
|
|
* below).
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_attrsetm_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setting an attribute at runtime, transaction space unit per block.
|
|
|
|
* the superblock for allocations: sector size
|
|
|
|
* the inode bmap btree could join or split: max depth * block size
|
|
|
|
* Since the runtime attribute transaction space is dependent on the total
|
|
|
|
* blocks needed for the 1st bmap, here we calculate out the space unit for
|
|
|
|
* one block so that the caller could figure out the total space according
|
2013-08-12 18:49:59 +08:00
|
|
|
* to the attibute extent length in blocks by:
|
|
|
|
* ext * M_RES(mp)->tr_attrsetrt.tr_logres
|
2013-08-12 18:49:32 +08:00
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_attrsetrt_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
|
|
|
|
XFS_FSB_TO_B(mp, 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Removing an attribute.
|
|
|
|
* the inode: inode size
|
|
|
|
* the attribute btree could join: max depth * block size
|
|
|
|
* the inode bmap btree could join or split: max depth * block size
|
|
|
|
* And the bmap_finish transaction can free the attr blocks freed giving:
|
|
|
|
* the agf for the ag in which the blocks live: 2 * sector size
|
|
|
|
* the agfl for the ag in which the blocks live: 2 * sector size
|
|
|
|
* the superblock for the free block count: sector size
|
|
|
|
* the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_attrrm_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return XFS_DQUOT_LOGRES(mp) +
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
MAX((xfs_calc_inode_res(mp, 1) +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
|
|
|
|
XFS_FSB_TO_B(mp, 1)) +
|
|
|
|
(uint)XFS_FSB_TO_B(mp,
|
|
|
|
XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
|
|
|
|
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
|
|
|
|
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
|
|
|
|
xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
|
|
|
|
XFS_FSB_TO_B(mp, 1))));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clearing a bad agino number in an agi hash bucket.
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_clear_agi_bucket_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clearing the quotaflags in the superblock.
|
|
|
|
* the super block for changing quota flags: sector size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_qm_sbchange_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Adjusting quota limits.
|
|
|
|
* the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_qm_setqlim_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocating quota on disk if needed.
|
2013-08-12 18:49:59 +08:00
|
|
|
* the write transaction log space: M_RES(mp)->tr_write.tr_logres
|
2013-08-12 18:49:32 +08:00
|
|
|
* the unit of quota allocation: one system block size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_qm_dqalloc_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
xfs: inode log reservations are too small
We've been seeing occasional problems with log space leaks and
transaction underruns such as this for some time:
XFS (dm-0): xlog_write: reservation summary:
trans type = FSYNC_TS (36)
unit res = 2740 bytes
current res = -4 bytes
total reg = 0 bytes (o/flow = 0 bytes)
ophdrs = 0 (ophdr space = 0 bytes)
ophdr + reg = 0 bytes
num regions = 0
Turns out that xfstests generic/311 is reliably reproducing this
problem with the test it runs at sequence 16 of it execution. It is
a 100% reliable reproducer with the mkfs configuration of "-b
size=1024 -m crc=1" on a 10GB scratch device.
The problem? Inode forks in btree format are logged in memory
format, not disk format (i.e. bmbt format, not bmdr format). That
means there is a btree block header being logged, when such a
structure is never written to the inode fork in bmdr format. The
bmdr header in the inode is only 4 bytes, while the bmbt header is
24 bytes for v4 filesystems and 72 bytes for v5 filesystems.
We currently reserve the inode size plus the rounded up overhead of
a logging a buffer, which is 128 bytes. That means the reservation
for a 512 byte inode is 640 bytes. What we can actually log is:
inode core, data and attr fork = 512 bytes
inode log format + log op header = 56 + 12 = 68 bytes
data fork bmbt hdr = 24/72 bytes
attr fork bmbt hdr = 24/72 bytes
So, for a v2 inodes we can log at least 628 bytes, but if we split that
inode over the end of the log across log buffers, we need to also
another log op header, which takes us to 640 bytes. If there's
another reservation taken out of this that I haven't taken into
account (perhaps multiple iclog splits?) or I haven't corectly
calculated the bmbt format space used (entirely possible), then
we will overun it.
For v3 inodes the maximum is actually 724 bytes, and even a
single maximally sized btree format fork can blow it (652 bytes).
And that's exactly what is happening with the FSYNC_TS transaction
in the above output - it's consumed 644 bytes of space after the CIL
context took the space reserved for it (2100 bytes).
This problem has always been present in the XFS code - the btree
format inode forks have always been logged in this manner. Hence
there has always been the possibility of an overrun with such a
transaction. The CRC code has just exposed it frequently enough to
be able to debug and understand the root cause....
So, let's fix all the inode log space reservations.
[ I'm so glad we spent the effort to clean up the transaction
reservation code. This is an easy fix now. ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-28 14:10:35 +08:00
|
|
|
ASSERT(M_RES(mp)->tr_write.tr_logres);
|
2013-08-12 18:49:59 +08:00
|
|
|
return M_RES(mp)->tr_write.tr_logres +
|
2013-08-12 18:49:32 +08:00
|
|
|
xfs_calc_buf_res(1,
|
|
|
|
XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Turning off quotas.
|
|
|
|
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
|
|
|
|
* the superblock for the quota flags: sector size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_qm_quotaoff_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return sizeof(struct xfs_qoff_logitem) * 2 +
|
|
|
|
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End of turning off quotas.
|
|
|
|
* the xfs_qoff_logitem_t: sizeof(struct xfs_qoff_logitem) * 2
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_qm_quotaoff_end_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return sizeof(struct xfs_qoff_logitem) * 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Syncing the incore super block changes to disk.
|
|
|
|
* the super block to reflect the changes: sector size
|
|
|
|
*/
|
|
|
|
STATIC uint
|
|
|
|
xfs_calc_sb_reservation(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_trans_resv_calc(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_trans_resv *resp)
|
|
|
|
{
|
2013-08-12 18:49:56 +08:00
|
|
|
/*
|
|
|
|
* The following transactions are logged in physical format and
|
|
|
|
* require a permanent reservation on space.
|
|
|
|
*/
|
|
|
|
resp->tr_write.tr_logres = xfs_calc_write_reservation(mp);
|
|
|
|
resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
|
|
|
|
resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp);
|
|
|
|
resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
|
|
|
|
resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
|
|
|
|
resp->tr_rename.tr_logcount = XFS_RENAME_LOG_COUNT;
|
|
|
|
resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
|
|
|
|
resp->tr_link.tr_logcount = XFS_LINK_LOG_COUNT;
|
|
|
|
resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
|
|
|
|
resp->tr_remove.tr_logcount = XFS_REMOVE_LOG_COUNT;
|
|
|
|
resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
|
|
|
|
resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT;
|
|
|
|
resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_create.tr_logres = xfs_calc_create_reservation(mp);
|
|
|
|
resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT;
|
|
|
|
resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
|
|
|
|
resp->tr_mkdir.tr_logcount = XFS_MKDIR_LOG_COUNT;
|
|
|
|
resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
|
|
|
|
resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
|
|
|
|
resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp);
|
|
|
|
resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT;
|
|
|
|
resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp);
|
|
|
|
resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT;
|
|
|
|
resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp);
|
|
|
|
resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT;
|
|
|
|
resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp);
|
|
|
|
resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT;
|
|
|
|
resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp);
|
|
|
|
resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
|
|
|
|
resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp);
|
|
|
|
resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
|
|
|
|
resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following transactions are logged in logical format with
|
|
|
|
* a default log count.
|
|
|
|
*/
|
|
|
|
resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp);
|
|
|
|
resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
|
|
|
|
|
|
|
resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
|
|
|
|
resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
|
|
|
|
|
|
|
resp->tr_qm_quotaoff.tr_logres = xfs_calc_qm_quotaoff_reservation(mp);
|
|
|
|
resp->tr_qm_quotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
|
|
|
|
|
|
|
resp->tr_qm_equotaoff.tr_logres =
|
|
|
|
xfs_calc_qm_quotaoff_end_reservation(mp);
|
|
|
|
resp->tr_qm_equotaoff.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
|
|
|
|
|
|
|
resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
|
|
|
|
resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT;
|
|
|
|
|
|
|
|
/* The following transaction are logged in logical format */
|
|
|
|
resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp);
|
|
|
|
resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp);
|
|
|
|
resp->tr_swrite.tr_logres = xfs_calc_swrite_reservation(mp);
|
2013-08-12 18:49:57 +08:00
|
|
|
resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp);
|
2013-08-12 18:49:56 +08:00
|
|
|
resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp);
|
|
|
|
resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp);
|
|
|
|
resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp);
|
|
|
|
resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp);
|
|
|
|
resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp);
|
2013-08-12 18:49:32 +08:00
|
|
|
}
|