2018-05-14 14:10:08 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* Copyright (c) 2018 Red Hat, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
|
|
|
#include "xfs_shared.h"
|
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2019-06-29 10:30:21 +08:00
|
|
|
#include "xfs_bit.h"
|
2018-05-14 14:10:08 +08:00
|
|
|
#include "xfs_sb.h"
|
|
|
|
#include "xfs_mount.h"
|
|
|
|
#include "xfs_btree.h"
|
|
|
|
#include "xfs_alloc_btree.h"
|
|
|
|
#include "xfs_rmap_btree.h"
|
|
|
|
#include "xfs_alloc.h"
|
2018-05-14 14:10:08 +08:00
|
|
|
#include "xfs_ialloc.h"
|
2018-05-14 14:10:08 +08:00
|
|
|
#include "xfs_rmap.h"
|
|
|
|
#include "xfs_ag.h"
|
2019-04-12 22:41:17 +08:00
|
|
|
#include "xfs_ag_resv.h"
|
2019-04-12 22:41:18 +08:00
|
|
|
#include "xfs_health.h"
|
2021-03-24 10:05:38 +08:00
|
|
|
#include "xfs_error.h"
|
|
|
|
#include "xfs_bmap.h"
|
|
|
|
#include "xfs_defer.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans.h"
|
2021-06-02 08:48:24 +08:00
|
|
|
#include "xfs_trace.h"
|
2021-06-02 08:48:24 +08:00
|
|
|
#include "xfs_inode.h"
|
|
|
|
#include "xfs_icache.h"
|
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Passive reference counting access wrappers to the perag structures. If the
|
|
|
|
* per-ag structure is to be freed, the freeing code is responsible for cleaning
|
|
|
|
* up objects with passive references before freeing the structure. This is
|
|
|
|
* things like cached buffers.
|
|
|
|
*/
|
|
|
|
struct xfs_perag *
|
|
|
|
xfs_perag_get(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agno)
|
|
|
|
{
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
|
|
|
|
if (pag) {
|
2023-02-13 06:14:52 +08:00
|
|
|
trace_xfs_perag_get(pag, _RET_IP_);
|
2021-06-02 08:48:24 +08:00
|
|
|
ASSERT(atomic_read(&pag->pag_ref) >= 0);
|
2023-02-13 06:14:52 +08:00
|
|
|
atomic_inc(&pag->pag_ref);
|
2021-06-02 08:48:24 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return pag;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* search from @first to find the next perag with the given tag set.
|
|
|
|
*/
|
|
|
|
struct xfs_perag *
|
|
|
|
xfs_perag_get_tag(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t first,
|
xfs: clean up incore inode walk functions
This ambitious series aims to cleans up redundant inode walk code in
xfs_icache.c, hide implementation details of the quotaoff dquot release
code, and eliminates indirect function calls from incore inode walks.
The first thing it does is to move all the code that quotaoff calls to
release dquots from all incore inodes into xfs_icache.c. Next, it
separates the goal of an inode walk from the actual radix tree tags that
may or may not be involved and drops the kludgy XFS_ICI_NO_TAG thing.
Finally, we split the speculative preallocation (blockgc) and quotaoff
dquot release code paths into separate functions so that we can keep the
implementations cohesive.
Christoph suggested last cycle that we 'simply' change quotaoff not to
allow deactivating quota entirely, but as these cleanups are to enable
one major change in behavior (deferred inode inactivation) I do not want
to add a second behavior change (quotaoff) as a dependency.
To be blunt: Additional cleanups are not in scope for this series.
Next, I made two observations about incore inode radix tree walks --
since there's a 1:1 mapping between the walk goal and the per-inode
processing function passed in, we can use the goal to make a direct call
to the processing function. Furthermore, the only caller to supply a
nonzero iter_flags argument is quotaoff, and there's only one INEW flag.
From that observation, I concluded that it's quite possible to remove
two parameters from the xfs_inode_walk* function signatures -- the
iter_flags, and the execute function pointer. The middle of the series
moves the INEW functionality into the one piece (quotaoff) that wants
it, and removes the indirect calls.
The final observation is that the inode reclaim walk loop is now almost
the same as xfs_inode_walk, so it's silly to maintain two copies. Merge
the reclaim loop code into xfs_inode_walk.
Lastly, refactor the per-ag radix tagging functions since there's
duplicated code that can be consolidated.
This series is a prerequisite for the next two patchsets, since deferred
inode inactivation will add another inode radix tree tag and iterator
function to xfs_inode_walk.
v2: walk the vfs inode list when running quotaoff instead of the radix
tree, then rework the (now completely internal) inode walk function
to take the tag as the main parameter.
v3: merge the reclaim loop into xfs_inode_walk, then consolidate the
radix tree tagging functions
v4: rebase to 5.13-rc4
v5: combine with the quotaoff patchset, reorder functions to minimize
forward declarations, split inode walk goals from radix tree tags
to reduce conceptual confusion
v6: start moving the inode cache code towards the xfs_icwalk prefix
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAmC5Yv0ACgkQ+H93GTRK
tOv7Fg//Z7cKph0zSg6qsukMEMZxscuNcEBydCW1bu9gSx1NpszDpiGqAiO5ZB3X
wP2XkCqjuatbNGGvkNLHS/M4sbLX3ELogvYmMRvUhDoaSFxT/KKgxvsyNffiCSS7
xRB/rvWRp9MGRpBWPF0ZUxFU6VBzhCrYdMsNhvW95AEup8S/j+NplwoIif0gzaZZ
Q6Fl4Ca9VEBvJQPV+/zkLih19iFItmARJhPHUs4BO1nZv+CzZBFQHg7Ijw7nW92j
eSY68W4LH/IQ5cqm+HrD/+Z6ns0P7J2viewzVymkNEGnuX4a0xrQrzQ8ydRsAxTi
9EDrpIe3MbSI5YjJfmRe8G3LX5p7vBpqc8TeyZdRDMGWkFjT33HPlQNb6WxKLQbA
mjKdfr8AYZR/UQKW/7oZFrJnOoMpYRAQ4Sn/9BAYZQYm7tiLzuZsrEZ7JBwiUA56
XHmlsDDeLzJeKvjmUu8M3H4oh4Nwf5/I2vJwHjueTfhl83uJP04igIXC4rnq56bM
AAAjH9uV11Fo3q0ywAnRtN2HYj8PEJlCMK5CNskILrGeMITsBPGht0SbaA6hDI2h
GYmltKInHzuPhHC9NfyPVrVr3BrmPR5cBsVFESiz5A4E9rbuKmmna6Yk8MFlMyl8
FRIA3zVatJ2qQXtsAcdI8AZzMd7ciYhkAgCqFKxv8qK/qxITHh4=
=Rxdn
-----END PGP SIGNATURE-----
Merge tag 'inode-walk-cleanups-5.14_2021-06-03' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-5.14-merge2
xfs: clean up incore inode walk functions
This ambitious series aims to cleans up redundant inode walk code in
xfs_icache.c, hide implementation details of the quotaoff dquot release
code, and eliminates indirect function calls from incore inode walks.
The first thing it does is to move all the code that quotaoff calls to
release dquots from all incore inodes into xfs_icache.c. Next, it
separates the goal of an inode walk from the actual radix tree tags that
may or may not be involved and drops the kludgy XFS_ICI_NO_TAG thing.
Finally, we split the speculative preallocation (blockgc) and quotaoff
dquot release code paths into separate functions so that we can keep the
implementations cohesive.
Christoph suggested last cycle that we 'simply' change quotaoff not to
allow deactivating quota entirely, but as these cleanups are to enable
one major change in behavior (deferred inode inactivation) I do not want
to add a second behavior change (quotaoff) as a dependency.
To be blunt: Additional cleanups are not in scope for this series.
Next, I made two observations about incore inode radix tree walks --
since there's a 1:1 mapping between the walk goal and the per-inode
processing function passed in, we can use the goal to make a direct call
to the processing function. Furthermore, the only caller to supply a
nonzero iter_flags argument is quotaoff, and there's only one INEW flag.
From that observation, I concluded that it's quite possible to remove
two parameters from the xfs_inode_walk* function signatures -- the
iter_flags, and the execute function pointer. The middle of the series
moves the INEW functionality into the one piece (quotaoff) that wants
it, and removes the indirect calls.
The final observation is that the inode reclaim walk loop is now almost
the same as xfs_inode_walk, so it's silly to maintain two copies. Merge
the reclaim loop code into xfs_inode_walk.
Lastly, refactor the per-ag radix tagging functions since there's
duplicated code that can be consolidated.
This series is a prerequisite for the next two patchsets, since deferred
inode inactivation will add another inode radix tree tag and iterator
function to xfs_inode_walk.
v2: walk the vfs inode list when running quotaoff instead of the radix
tree, then rework the (now completely internal) inode walk function
to take the tag as the main parameter.
v3: merge the reclaim loop into xfs_inode_walk, then consolidate the
radix tree tagging functions
v4: rebase to 5.13-rc4
v5: combine with the quotaoff patchset, reorder functions to minimize
forward declarations, split inode walk goals from radix tree tags
to reduce conceptual confusion
v6: start moving the inode cache code towards the xfs_icwalk prefix
* tag 'inode-walk-cleanups-5.14_2021-06-03' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
xfs: refactor per-AG inode tagging functions
xfs: merge xfs_reclaim_inodes_ag into xfs_inode_walk_ag
xfs: pass struct xfs_eofblocks to the inode scan callback
xfs: fix radix tree tag signs
xfs: make the icwalk processing functions clean up the grab state
xfs: clean up inode state flag tests in xfs_blockgc_igrab
xfs: remove indirect calls from xfs_inode_walk{,_ag}
xfs: remove iter_flags parameter from xfs_inode_walk_*
xfs: move xfs_inew_wait call into xfs_dqrele_inode
xfs: separate the dqrele_all inode grab logic from xfs_inode_walk_ag_grab
xfs: pass the goal of the incore inode walk to xfs_inode_walk()
xfs: rename xfs_inode_walk functions to xfs_icwalk
xfs: move the inode walk functions further down
xfs: detach inode dquots at the end of inactivation
xfs: move the quotaoff dqrele inode walk into xfs_icache.c
[djwong: added variable names to function declarations while fixing
merge conflicts]
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
2021-06-09 00:26:44 +08:00
|
|
|
unsigned int tag)
|
2021-06-02 08:48:24 +08:00
|
|
|
{
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
int found;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
|
|
|
|
(void **)&pag, first, 1, tag);
|
|
|
|
if (found <= 0) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-02-13 06:14:52 +08:00
|
|
|
trace_xfs_perag_get_tag(pag, _RET_IP_);
|
|
|
|
atomic_inc(&pag->pag_ref);
|
2021-06-02 08:48:24 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
return pag;
|
|
|
|
}
|
|
|
|
|
2023-04-12 09:59:55 +08:00
|
|
|
/* Get a passive reference to the given perag. */
|
|
|
|
struct xfs_perag *
|
|
|
|
xfs_perag_hold(
|
|
|
|
struct xfs_perag *pag)
|
|
|
|
{
|
|
|
|
ASSERT(atomic_read(&pag->pag_ref) > 0 ||
|
|
|
|
atomic_read(&pag->pag_active_ref) > 0);
|
|
|
|
|
|
|
|
trace_xfs_perag_hold(pag, _RET_IP_);
|
|
|
|
atomic_inc(&pag->pag_ref);
|
|
|
|
return pag;
|
|
|
|
}
|
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
void
|
|
|
|
xfs_perag_put(
|
|
|
|
struct xfs_perag *pag)
|
|
|
|
{
|
2023-02-13 06:14:52 +08:00
|
|
|
trace_xfs_perag_put(pag, _RET_IP_);
|
2021-06-02 08:48:24 +08:00
|
|
|
ASSERT(atomic_read(&pag->pag_ref) > 0);
|
2023-02-13 06:14:52 +08:00
|
|
|
atomic_dec(&pag->pag_ref);
|
2021-06-02 08:48:24 +08:00
|
|
|
}
|
|
|
|
|
2023-02-13 06:14:42 +08:00
|
|
|
/*
|
|
|
|
* Active references for perag structures. This is for short term access to the
|
|
|
|
* per ag structures for walking trees or accessing state. If an AG is being
|
|
|
|
* shrunk or is offline, then this will fail to find that AG and return NULL
|
|
|
|
* instead.
|
|
|
|
*/
|
|
|
|
struct xfs_perag *
|
|
|
|
xfs_perag_grab(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agno)
|
|
|
|
{
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
pag = radix_tree_lookup(&mp->m_perag_tree, agno);
|
|
|
|
if (pag) {
|
2023-02-13 06:14:52 +08:00
|
|
|
trace_xfs_perag_grab(pag, _RET_IP_);
|
2023-02-13 06:14:42 +08:00
|
|
|
if (!atomic_inc_not_zero(&pag->pag_active_ref))
|
|
|
|
pag = NULL;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return pag;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* search from @first to find the next perag with the given tag set.
|
|
|
|
*/
|
|
|
|
struct xfs_perag *
|
|
|
|
xfs_perag_grab_tag(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t first,
|
|
|
|
int tag)
|
|
|
|
{
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
int found;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
|
|
|
|
(void **)&pag, first, 1, tag);
|
|
|
|
if (found <= 0) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-02-13 06:14:52 +08:00
|
|
|
trace_xfs_perag_grab_tag(pag, _RET_IP_);
|
2023-02-13 06:14:42 +08:00
|
|
|
if (!atomic_inc_not_zero(&pag->pag_active_ref))
|
|
|
|
pag = NULL;
|
|
|
|
rcu_read_unlock();
|
|
|
|
return pag;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_perag_rele(
|
|
|
|
struct xfs_perag *pag)
|
|
|
|
{
|
2023-02-13 06:14:52 +08:00
|
|
|
trace_xfs_perag_rele(pag, _RET_IP_);
|
2023-02-13 06:14:42 +08:00
|
|
|
if (atomic_dec_and_test(&pag->pag_active_ref))
|
|
|
|
wake_up(&pag->pag_active_wq);
|
|
|
|
}
|
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
/*
|
|
|
|
* xfs_initialize_perag_data
|
|
|
|
*
|
|
|
|
* Read in each per-ag structure so we can count up the number of
|
|
|
|
* allocated inodes, free inodes and used filesystem blocks as this
|
|
|
|
* information is no longer persistent in the superblock. Once we have
|
|
|
|
* this information, write it into the in-core superblock structure.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_initialize_perag_data(
|
2021-06-02 08:48:51 +08:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agcount)
|
2021-06-02 08:48:24 +08:00
|
|
|
{
|
2021-06-02 08:48:51 +08:00
|
|
|
xfs_agnumber_t index;
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
struct xfs_sb *sbp = &mp->m_sb;
|
|
|
|
uint64_t ifree = 0;
|
|
|
|
uint64_t ialloc = 0;
|
|
|
|
uint64_t bfree = 0;
|
|
|
|
uint64_t bfreelst = 0;
|
|
|
|
uint64_t btree = 0;
|
|
|
|
uint64_t fdblocks;
|
|
|
|
int error = 0;
|
2021-06-02 08:48:24 +08:00
|
|
|
|
|
|
|
for (index = 0; index < agcount; index++) {
|
|
|
|
/*
|
2022-07-07 17:07:40 +08:00
|
|
|
* Read the AGF and AGI buffers to populate the per-ag
|
|
|
|
* structures for us.
|
2021-06-02 08:48:24 +08:00
|
|
|
*/
|
2022-07-07 17:07:24 +08:00
|
|
|
pag = xfs_perag_get(mp, index);
|
2022-07-07 17:07:40 +08:00
|
|
|
error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
|
|
|
|
if (!error)
|
|
|
|
error = xfs_ialloc_read_agi(pag, NULL, NULL);
|
2022-07-07 17:07:24 +08:00
|
|
|
if (error) {
|
|
|
|
xfs_perag_put(pag);
|
2021-06-02 08:48:24 +08:00
|
|
|
return error;
|
2022-07-07 17:07:24 +08:00
|
|
|
}
|
2022-07-07 17:07:16 +08:00
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
ifree += pag->pagi_freecount;
|
|
|
|
ialloc += pag->pagi_count;
|
|
|
|
bfree += pag->pagf_freeblks;
|
|
|
|
bfreelst += pag->pagf_flcount;
|
|
|
|
btree += pag->pagf_btreeblks;
|
|
|
|
xfs_perag_put(pag);
|
|
|
|
}
|
|
|
|
fdblocks = bfree + bfreelst + btree;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the new summary counts are obviously incorrect, fail the
|
|
|
|
* mount operation because that implies the AGFs are also corrupt.
|
|
|
|
* Clear FS_COUNTERS so that we don't unmount with a dirty log, which
|
|
|
|
* will prevent xfs_repair from fixing anything.
|
|
|
|
*/
|
|
|
|
if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
|
|
|
|
xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
|
|
|
|
error = -EFSCORRUPTED;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Overwrite incore superblock counters with just-read data */
|
|
|
|
spin_lock(&mp->m_sb_lock);
|
|
|
|
sbp->sb_ifree = ifree;
|
|
|
|
sbp->sb_icount = ialloc;
|
|
|
|
sbp->sb_fdblocks = fdblocks;
|
|
|
|
spin_unlock(&mp->m_sb_lock);
|
|
|
|
|
|
|
|
xfs_reinit_percpu_counters(mp);
|
|
|
|
out:
|
|
|
|
xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
|
|
|
|
return error;
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
STATIC void
|
|
|
|
__xfs_free_perag(
|
|
|
|
struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
|
|
|
|
|
|
|
|
ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
|
|
|
|
kmem_free(pag);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free up the per-ag resources associated with the mount structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
xfs_free_perag(
|
|
|
|
struct xfs_mount *mp)
|
|
|
|
{
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
xfs_agnumber_t agno;
|
|
|
|
|
|
|
|
for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
|
|
|
|
spin_lock(&mp->m_perag_lock);
|
|
|
|
pag = radix_tree_delete(&mp->m_perag_tree, agno);
|
|
|
|
spin_unlock(&mp->m_perag_lock);
|
|
|
|
ASSERT(pag);
|
2022-05-27 08:21:04 +08:00
|
|
|
XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
|
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header
buffer locks will cycle during a transaction roll to get from one intent
item to the next in a chain. Although scrub takes all AG header buffer
locks, this isn't sufficient to guard against scrub checking an AG while
that writer thread is in the middle of finishing a chain because there's
no higher level locking primitive guarding allocation groups.
When there's a collision, cross-referencing between data structures
(e.g. rmapbt and refcountbt) yields false corruption events; if repair
is running, this results in incorrect repairs, which is catastrophic.
Fix this by adding to the perag structure the count of active intents
and make scrub wait until it has both AG header buffer locks and the
intent counter reaches zero.
One quirk of the drain code is that deferred bmap updates also bump and
drop the intent counter. A fundamental decision made during the design
phase of the reverse mapping feature is that updates to the rmapbt
records are always made by the same code that updates the primary
metadata. In other words, callers of bmapi functions expect that the
bmapi functions will queue deferred rmap updates.
Some parts of the reflink code queue deferred refcount (CUI) and bmap
(BUI) updates in the same head transaction, but the deferred work
manager completely finishes the CUI before the BUI work is started. As
a result, the CUI drops the intent count long before the deferred rmap
(RUI) update even has a chance to bump the intent count. The only way
to keep the intent count elevated between the CUI and RUI is for the BUI
to bump the counter until the RUI has been created.
A second quirk of the intent drain code is that deferred work items must
increment the intent counter as soon as the work item is added to the
transaction. When a BUI completes and queues an RUI, the RUI must
increment the counter before the BUI decrements it. The only way to
accomplish this is to require that the counter be bumped as soon as the
deferred work item is created in memory.
In the next patches we'll improve on this facility, but this patch
provides the basic functionality.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 09:59:58 +08:00
|
|
|
xfs_defer_drain_free(&pag->pag_intents_drain);
|
2021-06-02 08:48:24 +08:00
|
|
|
|
|
|
|
cancel_delayed_work_sync(&pag->pag_blockgc_work);
|
|
|
|
xfs_buf_hash_destroy(pag);
|
|
|
|
|
2023-02-13 06:14:42 +08:00
|
|
|
/* drop the mount's active reference */
|
|
|
|
xfs_perag_rele(pag);
|
|
|
|
XFS_IS_CORRUPT(pag->pag_mount,
|
|
|
|
atomic_read(&pag->pag_active_ref) != 0);
|
2021-06-02 08:48:24 +08:00
|
|
|
call_rcu(&pag->rcu_head, __xfs_free_perag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-07 17:13:02 +08:00
|
|
|
/* Find the size of the AG, in blocks. */
|
|
|
|
static xfs_agblock_t
|
|
|
|
__xfs_ag_block_count(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agno,
|
|
|
|
xfs_agnumber_t agcount,
|
|
|
|
xfs_rfsblock_t dblocks)
|
|
|
|
{
|
|
|
|
ASSERT(agno < agcount);
|
|
|
|
|
|
|
|
if (agno < agcount - 1)
|
|
|
|
return mp->m_sb.sb_agblocks;
|
|
|
|
return dblocks - (agno * mp->m_sb.sb_agblocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_agblock_t
|
|
|
|
xfs_ag_block_count(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agno)
|
|
|
|
{
|
|
|
|
return __xfs_ag_block_count(mp, agno, mp->m_sb.sb_agcount,
|
|
|
|
mp->m_sb.sb_dblocks);
|
|
|
|
}
|
|
|
|
|
2022-07-07 17:13:10 +08:00
|
|
|
/* Calculate the first and last possible inode number in an AG. */
|
|
|
|
static void
|
|
|
|
__xfs_agino_range(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agblock_t eoag,
|
|
|
|
xfs_agino_t *first,
|
|
|
|
xfs_agino_t *last)
|
|
|
|
{
|
|
|
|
xfs_agblock_t bno;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the first inode, which will be in the first
|
|
|
|
* cluster-aligned block after the AGFL.
|
|
|
|
*/
|
|
|
|
bno = round_up(XFS_AGFL_BLOCK(mp) + 1, M_IGEO(mp)->cluster_align);
|
|
|
|
*first = XFS_AGB_TO_AGINO(mp, bno);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the last inode, which will be at the end of the
|
|
|
|
* last (aligned) cluster that can be allocated in the AG.
|
|
|
|
*/
|
|
|
|
bno = round_down(eoag, M_IGEO(mp)->cluster_align);
|
|
|
|
*last = XFS_AGB_TO_AGINO(mp, bno) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
xfs_agino_range(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agno,
|
|
|
|
xfs_agino_t *first,
|
|
|
|
xfs_agino_t *last)
|
|
|
|
{
|
|
|
|
return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last);
|
|
|
|
}
|
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
int
|
|
|
|
xfs_initialize_perag(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_agnumber_t agcount,
|
2022-07-07 17:13:02 +08:00
|
|
|
xfs_rfsblock_t dblocks,
|
2021-06-02 08:48:24 +08:00
|
|
|
xfs_agnumber_t *maxagi)
|
|
|
|
{
|
|
|
|
struct xfs_perag *pag;
|
|
|
|
xfs_agnumber_t index;
|
|
|
|
xfs_agnumber_t first_initialised = NULLAGNUMBER;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk the current per-ag tree so we don't try to initialise AGs
|
|
|
|
* that already exist (growfs case). Allocate and insert all the
|
|
|
|
* AGs we don't find ready for initialisation.
|
|
|
|
*/
|
|
|
|
for (index = 0; index < agcount; index++) {
|
|
|
|
pag = xfs_perag_get(mp, index);
|
|
|
|
if (pag) {
|
|
|
|
xfs_perag_put(pag);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
|
|
|
|
if (!pag) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
goto out_unwind_new_pags;
|
|
|
|
}
|
|
|
|
pag->pag_agno = index;
|
|
|
|
pag->pag_mount = mp;
|
|
|
|
|
|
|
|
error = radix_tree_preload(GFP_NOFS);
|
|
|
|
if (error)
|
|
|
|
goto out_free_pag;
|
|
|
|
|
|
|
|
spin_lock(&mp->m_perag_lock);
|
|
|
|
if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
spin_unlock(&mp->m_perag_lock);
|
|
|
|
radix_tree_preload_end();
|
|
|
|
error = -EEXIST;
|
|
|
|
goto out_free_pag;
|
|
|
|
}
|
|
|
|
spin_unlock(&mp->m_perag_lock);
|
|
|
|
radix_tree_preload_end();
|
|
|
|
|
2021-11-10 10:18:50 +08:00
|
|
|
#ifdef __KERNEL__
|
2021-06-02 08:48:24 +08:00
|
|
|
/* Place kernel structure only init below this point. */
|
|
|
|
spin_lock_init(&pag->pag_ici_lock);
|
|
|
|
spin_lock_init(&pag->pagb_lock);
|
|
|
|
spin_lock_init(&pag->pag_state_lock);
|
|
|
|
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
|
|
|
|
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
|
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header
buffer locks will cycle during a transaction roll to get from one intent
item to the next in a chain. Although scrub takes all AG header buffer
locks, this isn't sufficient to guard against scrub checking an AG while
that writer thread is in the middle of finishing a chain because there's
no higher level locking primitive guarding allocation groups.
When there's a collision, cross-referencing between data structures
(e.g. rmapbt and refcountbt) yields false corruption events; if repair
is running, this results in incorrect repairs, which is catastrophic.
Fix this by adding to the perag structure the count of active intents
and make scrub wait until it has both AG header buffer locks and the
intent counter reaches zero.
One quirk of the drain code is that deferred bmap updates also bump and
drop the intent counter. A fundamental decision made during the design
phase of the reverse mapping feature is that updates to the rmapbt
records are always made by the same code that updates the primary
metadata. In other words, callers of bmapi functions expect that the
bmapi functions will queue deferred rmap updates.
Some parts of the reflink code queue deferred refcount (CUI) and bmap
(BUI) updates in the same head transaction, but the deferred work
manager completely finishes the CUI before the BUI work is started. As
a result, the CUI drops the intent count long before the deferred rmap
(RUI) update even has a chance to bump the intent count. The only way
to keep the intent count elevated between the CUI and RUI is for the BUI
to bump the counter until the RUI has been created.
A second quirk of the intent drain code is that deferred work items must
increment the intent counter as soon as the work item is added to the
transaction. When a BUI completes and queues an RUI, the RUI must
increment the counter before the BUI decrements it. The only way to
accomplish this is to require that the counter be bumped as soon as the
deferred work item is created in memory.
In the next patches we'll improve on this facility, but this patch
provides the basic functionality.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 09:59:58 +08:00
|
|
|
xfs_defer_drain_init(&pag->pag_intents_drain);
|
2021-06-02 08:48:24 +08:00
|
|
|
init_waitqueue_head(&pag->pagb_wait);
|
2023-02-13 06:14:42 +08:00
|
|
|
init_waitqueue_head(&pag->pag_active_wq);
|
2021-06-02 08:48:24 +08:00
|
|
|
pag->pagb_count = 0;
|
|
|
|
pag->pagb_tree = RB_ROOT;
|
2021-11-10 10:18:50 +08:00
|
|
|
#endif /* __KERNEL__ */
|
2021-06-02 08:48:24 +08:00
|
|
|
|
|
|
|
error = xfs_buf_hash_init(pag);
|
|
|
|
if (error)
|
|
|
|
goto out_remove_pag;
|
|
|
|
|
2023-02-13 06:14:42 +08:00
|
|
|
/* Active ref owned by mount indicates AG is online. */
|
|
|
|
atomic_set(&pag->pag_active_ref, 1);
|
|
|
|
|
2021-06-02 08:48:24 +08:00
|
|
|
/* first new pag is fully initialized */
|
|
|
|
if (first_initialised == NULLAGNUMBER)
|
|
|
|
first_initialised = index;
|
2022-07-07 17:13:02 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pre-calculated geometry
|
|
|
|
*/
|
|
|
|
pag->block_count = __xfs_ag_block_count(mp, index, agcount,
|
|
|
|
dblocks);
|
|
|
|
pag->min_block = XFS_AGFL_BLOCK(mp);
|
2022-07-07 17:13:10 +08:00
|
|
|
__xfs_agino_range(mp, pag->block_count, &pag->agino_min,
|
|
|
|
&pag->agino_max);
|
2021-06-02 08:48:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
index = xfs_set_inode_alloc(mp, agcount);
|
|
|
|
|
|
|
|
if (maxagi)
|
|
|
|
*maxagi = index;
|
|
|
|
|
|
|
|
mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_remove_pag:
|
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header
buffer locks will cycle during a transaction roll to get from one intent
item to the next in a chain. Although scrub takes all AG header buffer
locks, this isn't sufficient to guard against scrub checking an AG while
that writer thread is in the middle of finishing a chain because there's
no higher level locking primitive guarding allocation groups.
When there's a collision, cross-referencing between data structures
(e.g. rmapbt and refcountbt) yields false corruption events; if repair
is running, this results in incorrect repairs, which is catastrophic.
Fix this by adding to the perag structure the count of active intents
and make scrub wait until it has both AG header buffer locks and the
intent counter reaches zero.
One quirk of the drain code is that deferred bmap updates also bump and
drop the intent counter. A fundamental decision made during the design
phase of the reverse mapping feature is that updates to the rmapbt
records are always made by the same code that updates the primary
metadata. In other words, callers of bmapi functions expect that the
bmapi functions will queue deferred rmap updates.
Some parts of the reflink code queue deferred refcount (CUI) and bmap
(BUI) updates in the same head transaction, but the deferred work
manager completely finishes the CUI before the BUI work is started. As
a result, the CUI drops the intent count long before the deferred rmap
(RUI) update even has a chance to bump the intent count. The only way
to keep the intent count elevated between the CUI and RUI is for the BUI
to bump the counter until the RUI has been created.
A second quirk of the intent drain code is that deferred work items must
increment the intent counter as soon as the work item is added to the
transaction. When a BUI completes and queues an RUI, the RUI must
increment the counter before the BUI decrements it. The only way to
accomplish this is to require that the counter be bumped as soon as the
deferred work item is created in memory.
In the next patches we'll improve on this facility, but this patch
provides the basic functionality.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 09:59:58 +08:00
|
|
|
xfs_defer_drain_free(&pag->pag_intents_drain);
|
2021-06-02 08:48:24 +08:00
|
|
|
radix_tree_delete(&mp->m_perag_tree, index);
|
|
|
|
out_free_pag:
|
|
|
|
kmem_free(pag);
|
|
|
|
out_unwind_new_pags:
|
|
|
|
/* unwind any prior newly initialized pags */
|
|
|
|
for (index = first_initialised; index < agcount; index++) {
|
|
|
|
pag = radix_tree_delete(&mp->m_perag_tree, index);
|
|
|
|
if (!pag)
|
|
|
|
break;
|
|
|
|
xfs_buf_hash_destroy(pag);
|
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header
buffer locks will cycle during a transaction roll to get from one intent
item to the next in a chain. Although scrub takes all AG header buffer
locks, this isn't sufficient to guard against scrub checking an AG while
that writer thread is in the middle of finishing a chain because there's
no higher level locking primitive guarding allocation groups.
When there's a collision, cross-referencing between data structures
(e.g. rmapbt and refcountbt) yields false corruption events; if repair
is running, this results in incorrect repairs, which is catastrophic.
Fix this by adding to the perag structure the count of active intents
and make scrub wait until it has both AG header buffer locks and the
intent counter reaches zero.
One quirk of the drain code is that deferred bmap updates also bump and
drop the intent counter. A fundamental decision made during the design
phase of the reverse mapping feature is that updates to the rmapbt
records are always made by the same code that updates the primary
metadata. In other words, callers of bmapi functions expect that the
bmapi functions will queue deferred rmap updates.
Some parts of the reflink code queue deferred refcount (CUI) and bmap
(BUI) updates in the same head transaction, but the deferred work
manager completely finishes the CUI before the BUI work is started. As
a result, the CUI drops the intent count long before the deferred rmap
(RUI) update even has a chance to bump the intent count. The only way
to keep the intent count elevated between the CUI and RUI is for the BUI
to bump the counter until the RUI has been created.
A second quirk of the intent drain code is that deferred work items must
increment the intent counter as soon as the work item is added to the
transaction. When a BUI completes and queues an RUI, the RUI must
increment the counter before the BUI decrements it. The only way to
accomplish this is to require that the counter be bumped as soon as the
deferred work item is created in memory.
In the next patches we'll improve on this facility, but this patch
provides the basic functionality.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 09:59:58 +08:00
|
|
|
xfs_defer_drain_free(&pag->pag_intents_drain);
|
2021-06-02 08:48:24 +08:00
|
|
|
kmem_free(pag);
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
|
2020-01-24 09:01:17 +08:00
|
|
|
static int
|
2018-05-14 14:10:08 +08:00
|
|
|
xfs_get_aghdr_buf(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
xfs_daddr_t blkno,
|
|
|
|
size_t numblks,
|
2020-01-24 09:01:17 +08:00
|
|
|
struct xfs_buf **bpp,
|
2018-05-14 14:10:08 +08:00
|
|
|
const struct xfs_buf_ops *ops)
|
|
|
|
{
|
|
|
|
struct xfs_buf *bp;
|
2020-01-24 09:01:17 +08:00
|
|
|
int error;
|
2018-05-14 14:10:08 +08:00
|
|
|
|
2020-01-24 09:01:17 +08:00
|
|
|
error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2018-05-14 14:10:08 +08:00
|
|
|
|
|
|
|
bp->b_maps[0].bm_bn = blkno;
|
|
|
|
bp->b_ops = ops;
|
|
|
|
|
2020-01-24 09:01:17 +08:00
|
|
|
*bpp = bp;
|
|
|
|
return 0;
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generic btree root block init function
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xfs_btroot_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
2019-06-13 00:00:00 +08:00
|
|
|
xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
2019-06-29 10:30:21 +08:00
|
|
|
/* Finish initializing a free space btree. */
|
2018-05-14 14:10:08 +08:00
|
|
|
static void
|
2019-06-29 10:30:21 +08:00
|
|
|
xfs_freesp_init_recs(
|
2018-05-14 14:10:08 +08:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
|
|
|
struct xfs_alloc_rec *arec;
|
2019-06-29 10:30:21 +08:00
|
|
|
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
|
2018-05-14 14:10:08 +08:00
|
|
|
|
|
|
|
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
|
|
|
|
arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
|
2019-06-29 10:30:21 +08:00
|
|
|
|
2022-07-07 17:13:21 +08:00
|
|
|
if (xfs_ag_contains_log(mp, id->agno)) {
|
2019-06-29 10:30:21 +08:00
|
|
|
struct xfs_alloc_rec *nrec;
|
|
|
|
xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp,
|
|
|
|
mp->m_sb.sb_logstart);
|
|
|
|
|
|
|
|
ASSERT(start >= mp->m_ag_prealloc_blocks);
|
|
|
|
if (start != mp->m_ag_prealloc_blocks) {
|
|
|
|
/*
|
|
|
|
* Modify first record to pad stripe align of log
|
|
|
|
*/
|
|
|
|
arec->ar_blockcount = cpu_to_be32(start -
|
|
|
|
mp->m_ag_prealloc_blocks);
|
|
|
|
nrec = arec + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert second record at start of internal log
|
|
|
|
* which then gets trimmed.
|
|
|
|
*/
|
|
|
|
nrec->ar_startblock = cpu_to_be32(
|
|
|
|
be32_to_cpu(arec->ar_startblock) +
|
|
|
|
be32_to_cpu(arec->ar_blockcount));
|
|
|
|
arec = nrec;
|
|
|
|
be16_add_cpu(&block->bb_numrecs, 1);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Change record start to after the internal log
|
|
|
|
*/
|
|
|
|
be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the record block count and check for the case where
|
|
|
|
* the log might have consumed all available space in the AG. If
|
|
|
|
* so, reset the record count to 0 to avoid exposure of an invalid
|
|
|
|
* record start block.
|
|
|
|
*/
|
2018-05-14 14:10:08 +08:00
|
|
|
arec->ar_blockcount = cpu_to_be32(id->agsize -
|
|
|
|
be32_to_cpu(arec->ar_startblock));
|
2019-06-29 10:30:21 +08:00
|
|
|
if (!arec->ar_blockcount)
|
|
|
|
block->bb_numrecs = 0;
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
2019-06-29 10:30:21 +08:00
|
|
|
/*
|
|
|
|
* Alloc btree root block init functions
|
|
|
|
*/
|
2018-05-14 14:10:08 +08:00
|
|
|
static void
|
2019-06-29 10:30:21 +08:00
|
|
|
xfs_bnoroot_init(
|
2018-05-14 14:10:08 +08:00
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
2019-06-29 10:30:21 +08:00
|
|
|
xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
|
|
|
|
xfs_freesp_init_recs(mp, bp, id);
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
|
2019-06-29 10:30:21 +08:00
|
|
|
static void
|
|
|
|
xfs_cntroot_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
2019-06-13 00:00:00 +08:00
|
|
|
xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
|
2019-06-29 10:30:21 +08:00
|
|
|
xfs_freesp_init_recs(mp, bp, id);
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reverse map root block init
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xfs_rmaproot_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
|
|
|
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
|
|
|
|
struct xfs_rmap_rec *rrec;
|
|
|
|
|
2019-06-13 00:00:00 +08:00
|
|
|
xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
|
2018-05-14 14:10:08 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* mark the AG header regions as static metadata The BNO
|
|
|
|
* btree block is the first block after the headers, so
|
|
|
|
* it's location defines the size of region the static
|
|
|
|
* metadata consumes.
|
|
|
|
*
|
|
|
|
* Note: unlike mkfs, we never have to account for log
|
|
|
|
* space when growing the data regions
|
|
|
|
*/
|
|
|
|
rrec = XFS_RMAP_REC_ADDR(block, 1);
|
|
|
|
rrec->rm_startblock = 0;
|
|
|
|
rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
|
|
|
|
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
|
|
|
|
rrec->rm_offset = 0;
|
|
|
|
|
|
|
|
/* account freespace btree root blocks */
|
|
|
|
rrec = XFS_RMAP_REC_ADDR(block, 2);
|
|
|
|
rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
|
|
|
|
rrec->rm_blockcount = cpu_to_be32(2);
|
|
|
|
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
|
|
|
|
rrec->rm_offset = 0;
|
|
|
|
|
|
|
|
/* account inode btree root blocks */
|
|
|
|
rrec = XFS_RMAP_REC_ADDR(block, 3);
|
|
|
|
rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
|
|
|
|
rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
|
|
|
|
XFS_IBT_BLOCK(mp));
|
|
|
|
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
|
|
|
|
rrec->rm_offset = 0;
|
|
|
|
|
|
|
|
/* account for rmap btree root */
|
|
|
|
rrec = XFS_RMAP_REC_ADDR(block, 4);
|
|
|
|
rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
|
|
|
|
rrec->rm_blockcount = cpu_to_be32(1);
|
|
|
|
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
|
|
|
|
rrec->rm_offset = 0;
|
|
|
|
|
|
|
|
/* account for refc btree root */
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_reflink(mp)) {
|
2018-05-14 14:10:08 +08:00
|
|
|
rrec = XFS_RMAP_REC_ADDR(block, 5);
|
|
|
|
rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
|
|
|
|
rrec->rm_blockcount = cpu_to_be32(1);
|
|
|
|
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
|
|
|
|
rrec->rm_offset = 0;
|
|
|
|
be16_add_cpu(&block->bb_numrecs, 1);
|
|
|
|
}
|
2019-06-29 10:30:21 +08:00
|
|
|
|
|
|
|
/* account for the log space */
|
2022-07-07 17:13:21 +08:00
|
|
|
if (xfs_ag_contains_log(mp, id->agno)) {
|
2019-06-29 10:30:21 +08:00
|
|
|
rrec = XFS_RMAP_REC_ADDR(block,
|
|
|
|
be16_to_cpu(block->bb_numrecs) + 1);
|
|
|
|
rrec->rm_startblock = cpu_to_be32(
|
|
|
|
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
|
|
|
|
rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
|
|
|
|
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
|
|
|
|
rrec->rm_offset = 0;
|
|
|
|
be16_add_cpu(&block->bb_numrecs, 1);
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialise new secondary superblocks with the pre-grow geometry, but mark
|
|
|
|
* them as "in progress" so we know they haven't yet been activated. This will
|
|
|
|
* get cleared when the update with the new geometry information is done after
|
|
|
|
* changes to the primary are committed. This isn't strictly necessary, but we
|
|
|
|
* get it for free with the delayed buffer write lists and it means we can tell
|
|
|
|
* if a grow operation didn't complete properly after the fact.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
xfs_sbblock_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
2020-03-10 23:57:30 +08:00
|
|
|
struct xfs_dsb *dsb = bp->b_addr;
|
2018-05-14 14:10:08 +08:00
|
|
|
|
|
|
|
xfs_sb_to_disk(dsb, &mp->m_sb);
|
|
|
|
dsb->sb_inprogress = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xfs_agfblock_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
2020-03-10 23:57:29 +08:00
|
|
|
struct xfs_agf *agf = bp->b_addr;
|
2018-05-14 14:10:08 +08:00
|
|
|
xfs_extlen_t tmpsize;
|
|
|
|
|
|
|
|
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
|
|
|
|
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
|
|
|
|
agf->agf_seqno = cpu_to_be32(id->agno);
|
|
|
|
agf->agf_length = cpu_to_be32(id->agsize);
|
|
|
|
agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
|
|
|
|
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
|
|
|
|
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
|
|
|
|
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_rmapbt(mp)) {
|
2018-05-14 14:10:08 +08:00
|
|
|
agf->agf_roots[XFS_BTNUM_RMAPi] =
|
|
|
|
cpu_to_be32(XFS_RMAP_BLOCK(mp));
|
|
|
|
agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
|
|
|
|
agf->agf_rmap_blocks = cpu_to_be32(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
agf->agf_flfirst = cpu_to_be32(1);
|
|
|
|
agf->agf_fllast = 0;
|
|
|
|
agf->agf_flcount = 0;
|
|
|
|
tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
|
|
|
|
agf->agf_freeblks = cpu_to_be32(tmpsize);
|
|
|
|
agf->agf_longest = cpu_to_be32(tmpsize);
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_crc(mp))
|
2018-05-14 14:10:08 +08:00
|
|
|
uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_reflink(mp)) {
|
2018-05-14 14:10:08 +08:00
|
|
|
agf->agf_refcount_root = cpu_to_be32(
|
|
|
|
xfs_refc_block(mp));
|
|
|
|
agf->agf_refcount_level = cpu_to_be32(1);
|
|
|
|
agf->agf_refcount_blocks = cpu_to_be32(1);
|
|
|
|
}
|
2019-06-29 10:30:21 +08:00
|
|
|
|
2022-07-07 17:13:21 +08:00
|
|
|
if (xfs_ag_contains_log(mp, id->agno)) {
|
2019-06-29 10:30:21 +08:00
|
|
|
int64_t logblocks = mp->m_sb.sb_logblocks;
|
|
|
|
|
|
|
|
be32_add_cpu(&agf->agf_freeblks, -logblocks);
|
|
|
|
agf->agf_longest = cpu_to_be32(id->agsize -
|
|
|
|
XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xfs_agflblock_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
|
|
|
struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
|
|
|
|
__be32 *agfl_bno;
|
|
|
|
int bucket;
|
|
|
|
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_crc(mp)) {
|
2018-05-14 14:10:08 +08:00
|
|
|
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
|
|
|
|
agfl->agfl_seqno = cpu_to_be32(id->agno);
|
|
|
|
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
|
|
|
|
}
|
|
|
|
|
2020-03-10 23:57:28 +08:00
|
|
|
agfl_bno = xfs_buf_to_agfl_bno(bp);
|
2018-05-14 14:10:08 +08:00
|
|
|
for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
|
|
|
|
agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
xfs_agiblock_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
{
|
2020-03-10 23:57:29 +08:00
|
|
|
struct xfs_agi *agi = bp->b_addr;
|
2018-05-14 14:10:08 +08:00
|
|
|
int bucket;
|
|
|
|
|
|
|
|
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
|
|
|
|
agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
|
|
|
|
agi->agi_seqno = cpu_to_be32(id->agno);
|
|
|
|
agi->agi_length = cpu_to_be32(id->agsize);
|
|
|
|
agi->agi_count = 0;
|
|
|
|
agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
|
|
|
|
agi->agi_level = cpu_to_be32(1);
|
|
|
|
agi->agi_freecount = 0;
|
|
|
|
agi->agi_newino = cpu_to_be32(NULLAGINO);
|
|
|
|
agi->agi_dirino = cpu_to_be32(NULLAGINO);
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_crc(mp))
|
2018-05-14 14:10:08 +08:00
|
|
|
uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
|
2021-08-19 09:46:37 +08:00
|
|
|
if (xfs_has_finobt(mp)) {
|
2018-05-14 14:10:08 +08:00
|
|
|
agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
|
|
|
|
agi->agi_free_level = cpu_to_be32(1);
|
|
|
|
}
|
|
|
|
for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
|
|
|
|
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
|
2021-08-19 09:46:55 +08:00
|
|
|
if (xfs_has_inobtcounts(mp)) {
|
2020-08-18 00:58:01 +08:00
|
|
|
agi->agi_iblocks = cpu_to_be32(1);
|
2021-08-19 09:46:55 +08:00
|
|
|
if (xfs_has_finobt(mp))
|
2020-08-18 00:58:01 +08:00
|
|
|
agi->agi_fblocks = cpu_to_be32(1);
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
|
|
|
|
struct aghdr_init_data *id);
|
|
|
|
static int
|
|
|
|
xfs_ag_init_hdr(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct aghdr_init_data *id,
|
|
|
|
aghdr_init_work_f work,
|
|
|
|
const struct xfs_buf_ops *ops)
|
|
|
|
{
|
|
|
|
struct xfs_buf *bp;
|
2020-01-24 09:01:17 +08:00
|
|
|
int error;
|
2018-05-14 14:10:08 +08:00
|
|
|
|
2020-01-24 09:01:17 +08:00
|
|
|
error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
|
|
|
|
if (error)
|
|
|
|
return error;
|
2018-05-14 14:10:08 +08:00
|
|
|
|
|
|
|
(*work)(mp, bp, id);
|
|
|
|
|
|
|
|
xfs_buf_delwri_queue(bp, &id->buffer_list);
|
|
|
|
xfs_buf_relse(bp);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct xfs_aghdr_grow_data {
|
|
|
|
xfs_daddr_t daddr;
|
|
|
|
size_t numblks;
|
|
|
|
const struct xfs_buf_ops *ops;
|
|
|
|
aghdr_init_work_f work;
|
|
|
|
xfs_btnum_t type;
|
|
|
|
bool need_init;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare new AG headers to be written to disk. We use uncached buffers here,
|
|
|
|
* as it is assumed these new AG headers are currently beyond the currently
|
|
|
|
* valid filesystem address space. Using cached buffers would trip over EOFS
|
|
|
|
* corruption detection alogrithms in the buffer cache lookup routines.
|
|
|
|
*
|
|
|
|
* This is a non-transactional function, but the prepared buffers are added to a
|
|
|
|
* delayed write buffer list supplied by the caller so they can submit them to
|
|
|
|
* disk and wait on them as required.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_ag_init_headers(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct aghdr_init_data *id)
|
|
|
|
|
|
|
|
{
|
|
|
|
struct xfs_aghdr_grow_data aghdr_data[] = {
|
|
|
|
{ /* SB */
|
|
|
|
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
|
|
|
|
.numblks = XFS_FSS_TO_BB(mp, 1),
|
|
|
|
.ops = &xfs_sb_buf_ops,
|
|
|
|
.work = &xfs_sbblock_init,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* AGF */
|
|
|
|
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
|
|
|
|
.numblks = XFS_FSS_TO_BB(mp, 1),
|
|
|
|
.ops = &xfs_agf_buf_ops,
|
|
|
|
.work = &xfs_agfblock_init,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* AGFL */
|
|
|
|
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
|
|
|
|
.numblks = XFS_FSS_TO_BB(mp, 1),
|
|
|
|
.ops = &xfs_agfl_buf_ops,
|
|
|
|
.work = &xfs_agflblock_init,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* AGI */
|
|
|
|
.daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
|
|
|
|
.numblks = XFS_FSS_TO_BB(mp, 1),
|
|
|
|
.ops = &xfs_agi_buf_ops,
|
|
|
|
.work = &xfs_agiblock_init,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* BNO root block */
|
|
|
|
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
|
|
|
|
.numblks = BTOBB(mp->m_sb.sb_blocksize),
|
2019-02-08 02:45:47 +08:00
|
|
|
.ops = &xfs_bnobt_buf_ops,
|
2018-05-14 14:10:08 +08:00
|
|
|
.work = &xfs_bnoroot_init,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* CNT root block */
|
|
|
|
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
|
|
|
|
.numblks = BTOBB(mp->m_sb.sb_blocksize),
|
2019-02-08 02:45:47 +08:00
|
|
|
.ops = &xfs_cntbt_buf_ops,
|
2018-05-14 14:10:08 +08:00
|
|
|
.work = &xfs_cntroot_init,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* INO root block */
|
|
|
|
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
|
|
|
|
.numblks = BTOBB(mp->m_sb.sb_blocksize),
|
|
|
|
.ops = &xfs_inobt_buf_ops,
|
|
|
|
.work = &xfs_btroot_init,
|
|
|
|
.type = XFS_BTNUM_INO,
|
|
|
|
.need_init = true
|
|
|
|
},
|
|
|
|
{ /* FINO root block */
|
|
|
|
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
|
|
|
|
.numblks = BTOBB(mp->m_sb.sb_blocksize),
|
2019-02-08 02:45:46 +08:00
|
|
|
.ops = &xfs_finobt_buf_ops,
|
2018-05-14 14:10:08 +08:00
|
|
|
.work = &xfs_btroot_init,
|
|
|
|
.type = XFS_BTNUM_FINO,
|
2021-08-19 09:46:37 +08:00
|
|
|
.need_init = xfs_has_finobt(mp)
|
2018-05-14 14:10:08 +08:00
|
|
|
},
|
|
|
|
{ /* RMAP root block */
|
|
|
|
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
|
|
|
|
.numblks = BTOBB(mp->m_sb.sb_blocksize),
|
|
|
|
.ops = &xfs_rmapbt_buf_ops,
|
|
|
|
.work = &xfs_rmaproot_init,
|
2021-08-19 09:46:37 +08:00
|
|
|
.need_init = xfs_has_rmapbt(mp)
|
2018-05-14 14:10:08 +08:00
|
|
|
},
|
|
|
|
{ /* REFC root block */
|
|
|
|
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
|
|
|
|
.numblks = BTOBB(mp->m_sb.sb_blocksize),
|
|
|
|
.ops = &xfs_refcountbt_buf_ops,
|
|
|
|
.work = &xfs_btroot_init,
|
|
|
|
.type = XFS_BTNUM_REFC,
|
2021-08-19 09:46:37 +08:00
|
|
|
.need_init = xfs_has_reflink(mp)
|
2018-05-14 14:10:08 +08:00
|
|
|
},
|
|
|
|
{ /* NULL terminating block */
|
|
|
|
.daddr = XFS_BUF_DADDR_NULL,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
struct xfs_aghdr_grow_data *dp;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
/* Account for AG free space in new AG */
|
|
|
|
id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
|
|
|
|
for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
|
|
|
|
if (!dp->need_init)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
id->daddr = dp->daddr;
|
|
|
|
id->numblks = dp->numblks;
|
|
|
|
id->type = dp->type;
|
|
|
|
error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
2018-05-14 14:10:08 +08:00
|
|
|
|
2021-03-24 10:05:38 +08:00
|
|
|
int
|
|
|
|
xfs_ag_shrink_space(
|
2022-07-07 17:07:09 +08:00
|
|
|
struct xfs_perag *pag,
|
2021-03-24 10:05:38 +08:00
|
|
|
struct xfs_trans **tpp,
|
|
|
|
xfs_extlen_t delta)
|
|
|
|
{
|
2022-07-07 17:07:09 +08:00
|
|
|
struct xfs_mount *mp = pag->pag_mount;
|
2021-03-24 10:05:38 +08:00
|
|
|
struct xfs_alloc_arg args = {
|
|
|
|
.tp = *tpp,
|
|
|
|
.mp = mp,
|
2023-02-13 06:14:53 +08:00
|
|
|
.pag = pag,
|
2021-03-24 10:05:38 +08:00
|
|
|
.minlen = delta,
|
|
|
|
.maxlen = delta,
|
|
|
|
.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
|
|
|
|
.resv = XFS_AG_RESV_NONE,
|
|
|
|
.prod = 1
|
|
|
|
};
|
|
|
|
struct xfs_buf *agibp, *agfbp;
|
|
|
|
struct xfs_agi *agi;
|
|
|
|
struct xfs_agf *agf;
|
2021-06-22 08:39:09 +08:00
|
|
|
xfs_agblock_t aglen;
|
2021-03-24 10:05:38 +08:00
|
|
|
int error, err2;
|
|
|
|
|
2022-07-07 17:07:09 +08:00
|
|
|
ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1);
|
2022-07-07 17:07:24 +08:00
|
|
|
error = xfs_ialloc_read_agi(pag, *tpp, &agibp);
|
2021-03-24 10:05:38 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
agi = agibp->b_addr;
|
|
|
|
|
2022-07-07 17:07:40 +08:00
|
|
|
error = xfs_alloc_read_agf(pag, *tpp, 0, &agfbp);
|
2021-03-24 10:05:38 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
agf = agfbp->b_addr;
|
2021-06-22 08:39:09 +08:00
|
|
|
aglen = be32_to_cpu(agi->agi_length);
|
2021-03-24 10:05:38 +08:00
|
|
|
/* some extra paranoid checks before we shrink the ag */
|
|
|
|
if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length))
|
|
|
|
return -EFSCORRUPTED;
|
2021-06-22 08:39:09 +08:00
|
|
|
if (delta >= aglen)
|
2021-03-24 10:05:38 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-07-13 03:58:47 +08:00
|
|
|
/*
|
|
|
|
* Make sure that the last inode cluster cannot overlap with the new
|
|
|
|
* end of the AG, even if it's sparse.
|
|
|
|
*/
|
2023-02-13 06:14:52 +08:00
|
|
|
error = xfs_ialloc_check_shrink(pag, *tpp, agibp, aglen - delta);
|
2021-07-13 03:58:47 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2021-03-24 10:05:38 +08:00
|
|
|
/*
|
|
|
|
* Disable perag reservations so it doesn't cause the allocation request
|
|
|
|
* to fail. We'll reestablish reservation before we return.
|
|
|
|
*/
|
2022-07-07 17:07:24 +08:00
|
|
|
error = xfs_ag_resv_free(pag);
|
2021-03-24 10:05:38 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/* internal log shouldn't also show up in the free space btrees */
|
2023-02-13 06:14:54 +08:00
|
|
|
error = xfs_alloc_vextent_exact_bno(&args,
|
|
|
|
XFS_AGB_TO_FSB(mp, pag->pag_agno, aglen - delta));
|
2021-03-24 10:05:38 +08:00
|
|
|
if (!error && args.agbno == NULLAGBLOCK)
|
|
|
|
error = -ENOSPC;
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* if extent allocation fails, need to roll the transaction to
|
|
|
|
* ensure that the AGFL fixup has been committed anyway.
|
|
|
|
*/
|
|
|
|
xfs_trans_bhold(*tpp, agfbp);
|
|
|
|
err2 = xfs_trans_roll(tpp);
|
|
|
|
if (err2)
|
|
|
|
return err2;
|
|
|
|
xfs_trans_bjoin(*tpp, agfbp);
|
|
|
|
goto resv_init_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if successfully deleted from freespace btrees, need to confirm
|
|
|
|
* per-AG reservation works as expected.
|
|
|
|
*/
|
|
|
|
be32_add_cpu(&agi->agi_length, -delta);
|
|
|
|
be32_add_cpu(&agf->agf_length, -delta);
|
|
|
|
|
2022-07-07 17:07:24 +08:00
|
|
|
err2 = xfs_ag_resv_init(pag, *tpp);
|
2021-03-24 10:05:38 +08:00
|
|
|
if (err2) {
|
|
|
|
be32_add_cpu(&agi->agi_length, delta);
|
|
|
|
be32_add_cpu(&agf->agf_length, delta);
|
|
|
|
if (err2 != -ENOSPC)
|
|
|
|
goto resv_err;
|
|
|
|
|
2021-10-13 05:17:01 +08:00
|
|
|
__xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, true);
|
2021-03-24 10:05:38 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Roll the transaction before trying to re-init the per-ag
|
|
|
|
* reservation. The new transaction is clean so it will cancel
|
|
|
|
* without any side effects.
|
|
|
|
*/
|
|
|
|
error = xfs_defer_finish(tpp);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
error = -ENOSPC;
|
|
|
|
goto resv_init_out;
|
|
|
|
}
|
|
|
|
xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
|
|
|
|
xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
|
|
|
|
return 0;
|
2022-07-07 17:07:24 +08:00
|
|
|
|
2021-03-24 10:05:38 +08:00
|
|
|
resv_init_out:
|
2022-07-07 17:07:24 +08:00
|
|
|
err2 = xfs_ag_resv_init(pag, *tpp);
|
2021-03-24 10:05:38 +08:00
|
|
|
if (!err2)
|
|
|
|
return error;
|
|
|
|
resv_err:
|
|
|
|
xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2);
|
|
|
|
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
|
|
|
|
return err2;
|
|
|
|
}
|
|
|
|
|
2018-05-14 14:10:08 +08:00
|
|
|
/*
|
|
|
|
* Extent the AG indicated by the @id by the length passed in
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
xfs_ag_extend_space(
|
2022-07-07 17:07:09 +08:00
|
|
|
struct xfs_perag *pag,
|
2018-05-14 14:10:08 +08:00
|
|
|
struct xfs_trans *tp,
|
|
|
|
xfs_extlen_t len)
|
|
|
|
{
|
|
|
|
struct xfs_buf *bp;
|
|
|
|
struct xfs_agi *agi;
|
|
|
|
struct xfs_agf *agf;
|
|
|
|
int error;
|
|
|
|
|
2022-07-07 17:07:09 +08:00
|
|
|
ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1);
|
|
|
|
|
2022-07-07 17:07:24 +08:00
|
|
|
error = xfs_ialloc_read_agi(pag, tp, &bp);
|
2018-05-14 14:10:08 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2020-03-10 23:57:29 +08:00
|
|
|
agi = bp->b_addr;
|
2018-05-14 14:10:08 +08:00
|
|
|
be32_add_cpu(&agi->agi_length, len);
|
|
|
|
xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change agf length.
|
|
|
|
*/
|
2022-07-07 17:07:40 +08:00
|
|
|
error = xfs_alloc_read_agf(pag, tp, 0, &bp);
|
2018-05-14 14:10:08 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2020-03-10 23:57:29 +08:00
|
|
|
agf = bp->b_addr;
|
2018-05-14 14:10:08 +08:00
|
|
|
be32_add_cpu(&agf->agf_length, len);
|
|
|
|
ASSERT(agf->agf_length == agi->agi_length);
|
|
|
|
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the new space.
|
|
|
|
*
|
2018-12-13 00:46:23 +08:00
|
|
|
* XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
|
2018-05-14 14:10:08 +08:00
|
|
|
* this doesn't actually exist in the rmap btree.
|
|
|
|
*/
|
2022-07-07 17:07:09 +08:00
|
|
|
error = xfs_rmap_free(tp, bp, pag, be32_to_cpu(agf->agf_length) - len,
|
2018-12-13 00:46:23 +08:00
|
|
|
len, &XFS_RMAP_OINFO_SKIP_UPDATE);
|
2018-05-14 14:10:08 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2023-04-12 09:59:53 +08:00
|
|
|
error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len,
|
|
|
|
len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE);
|
2022-07-07 17:13:02 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
/* Update perag geometry */
|
|
|
|
pag->block_count = be32_to_cpu(agf->agf_length);
|
2022-07-07 17:13:10 +08:00
|
|
|
__xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
|
|
|
|
&pag->agino_max);
|
2022-07-07 17:13:02 +08:00
|
|
|
return 0;
|
2018-05-14 14:10:08 +08:00
|
|
|
}
|
2019-04-12 22:41:17 +08:00
|
|
|
|
|
|
|
/* Retrieve AG geometry. */
|
|
|
|
int
|
|
|
|
xfs_ag_get_geometry(
|
2022-07-07 17:07:09 +08:00
|
|
|
struct xfs_perag *pag,
|
2019-04-12 22:41:17 +08:00
|
|
|
struct xfs_ag_geometry *ageo)
|
|
|
|
{
|
|
|
|
struct xfs_buf *agi_bp;
|
|
|
|
struct xfs_buf *agf_bp;
|
|
|
|
struct xfs_agi *agi;
|
|
|
|
struct xfs_agf *agf;
|
|
|
|
unsigned int freeblks;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/* Lock the AG headers. */
|
2022-07-07 17:07:24 +08:00
|
|
|
error = xfs_ialloc_read_agi(pag, NULL, &agi_bp);
|
2019-04-12 22:41:17 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
2022-07-07 17:07:40 +08:00
|
|
|
error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
|
2019-04-12 22:41:17 +08:00
|
|
|
if (error)
|
|
|
|
goto out_agi;
|
2020-07-14 00:13:00 +08:00
|
|
|
|
2019-04-12 22:41:17 +08:00
|
|
|
/* Fill out form. */
|
|
|
|
memset(ageo, 0, sizeof(*ageo));
|
2022-07-07 17:07:09 +08:00
|
|
|
ageo->ag_number = pag->pag_agno;
|
2019-04-12 22:41:17 +08:00
|
|
|
|
2020-03-10 23:57:29 +08:00
|
|
|
agi = agi_bp->b_addr;
|
2019-04-12 22:41:17 +08:00
|
|
|
ageo->ag_icount = be32_to_cpu(agi->agi_count);
|
|
|
|
ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
|
|
|
|
|
2020-03-10 23:57:29 +08:00
|
|
|
agf = agf_bp->b_addr;
|
2019-04-12 22:41:17 +08:00
|
|
|
ageo->ag_length = be32_to_cpu(agf->agf_length);
|
|
|
|
freeblks = pag->pagf_freeblks +
|
|
|
|
pag->pagf_flcount +
|
|
|
|
pag->pagf_btreeblks -
|
|
|
|
xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
|
|
|
|
ageo->ag_freeblks = freeblks;
|
2019-04-12 22:41:18 +08:00
|
|
|
xfs_ag_geom_health(pag, ageo);
|
2019-04-12 22:41:17 +08:00
|
|
|
|
|
|
|
/* Release resources. */
|
|
|
|
xfs_buf_relse(agf_bp);
|
|
|
|
out_agi:
|
|
|
|
xfs_buf_relse(agi_bp);
|
|
|
|
return error;
|
|
|
|
}
|