Merge git://oss.sgi.com:8090/xfs/xfs-2.6
* git://oss.sgi.com:8090/xfs/xfs-2.6: (33 commits) [XFS] Don't use kmap in xfs_iozero. [XFS] Remove a bunch of unused functions from XFS. [XFS] Remove unused arguments from the XFS_BTREE_*_ADDR macros. [XFS] Remove unused header files for MAC and CAP checking functionality. [XFS] Make freeze code a little cleaner. [XFS] Remove unused argument to xfs_bmap_finish [XFS] Clean up use of VFS attr flags [XFS] Remove useless memory barrier [XFS] XFS sysctl cleanups [XFS] Fix assertion in xfs_attr_shortform_remove(). [XFS] Fix callers of xfs_iozero() to zero the correct range. [XFS] Ensure a frozen filesystem has a clean log before writing the dummy [XFS] Fix sub-block zeroing for buffered writes into unwritten extents. [XFS] Re-initialize the per-cpu superblock counters after recovery. [XFS] Fix block reservation changes for non-SMP systems. [XFS] Fix block reservation mechanism. [XFS] Make growfs work for amounts greater than 2TB [XFS] Fix inode log item use-after-free on forced shutdown [XFS] Fix attr2 corruption with btree data extents [XFS] Workaround log space issue by increasing XFS_TRANS_PUSH_AIL_RESTARTS ...
This commit is contained in:
commit
958b7f37ee
|
@ -31,15 +31,13 @@ typedef struct {
|
|||
do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
|
||||
#define mrlock_init(mrp, t,n,s) mrinit(mrp, n)
|
||||
#define mrfree(mrp) do { } while (0)
|
||||
#define mraccess(mrp) mraccessf(mrp, 0)
|
||||
#define mrupdate(mrp) mrupdatef(mrp, 0)
|
||||
|
||||
static inline void mraccessf(mrlock_t *mrp, int flags)
|
||||
static inline void mraccess(mrlock_t *mrp)
|
||||
{
|
||||
down_read(&mrp->mr_lock);
|
||||
}
|
||||
|
||||
static inline void mrupdatef(mrlock_t *mrp, int flags)
|
||||
static inline void mrupdate(mrlock_t *mrp)
|
||||
{
|
||||
down_write(&mrp->mr_lock);
|
||||
mrp->mr_writer = 1;
|
||||
|
|
|
@ -249,7 +249,7 @@ xfs_map_blocks(
|
|||
return -error;
|
||||
}
|
||||
|
||||
STATIC inline int
|
||||
STATIC_INLINE int
|
||||
xfs_iomap_valid(
|
||||
xfs_iomap_t *iomapp,
|
||||
loff_t offset)
|
||||
|
@ -1283,13 +1283,18 @@ __xfs_get_blocks(
|
|||
bh_result->b_bdev = iomap.iomap_target->bt_bdev;
|
||||
|
||||
/*
|
||||
* If we previously allocated a block out beyond eof and we are
|
||||
* now coming back to use it then we will need to flag it as new
|
||||
* even if it has a disk address.
|
||||
* If we previously allocated a block out beyond eof and we are now
|
||||
* coming back to use it then we will need to flag it as new even if it
|
||||
* has a disk address.
|
||||
*
|
||||
* With sub-block writes into unwritten extents we also need to mark
|
||||
* the buffer as new so that the unwritten parts of the buffer gets
|
||||
* correctly zeroed.
|
||||
*/
|
||||
if (create &&
|
||||
((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
|
||||
(offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW)))
|
||||
(offset >= i_size_read(inode)) ||
|
||||
(iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
|
||||
set_buffer_new(bh_result);
|
||||
|
||||
if (iomap.iomap_flags & IOMAP_DELAY) {
|
||||
|
|
|
@ -34,13 +34,13 @@
|
|||
#include <linux/backing-dev.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
STATIC kmem_zone_t *xfs_buf_zone;
|
||||
STATIC kmem_shaker_t xfs_buf_shake;
|
||||
static kmem_zone_t *xfs_buf_zone;
|
||||
static kmem_shaker_t xfs_buf_shake;
|
||||
STATIC int xfsbufd(void *);
|
||||
STATIC int xfsbufd_wakeup(int, gfp_t);
|
||||
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
|
||||
|
||||
STATIC struct workqueue_struct *xfslogd_workqueue;
|
||||
static struct workqueue_struct *xfslogd_workqueue;
|
||||
struct workqueue_struct *xfsdatad_workqueue;
|
||||
|
||||
#ifdef XFS_BUF_TRACE
|
||||
|
@ -139,7 +139,7 @@ page_region_mask(
|
|||
return mask;
|
||||
}
|
||||
|
||||
STATIC inline void
|
||||
STATIC_INLINE void
|
||||
set_page_region(
|
||||
struct page *page,
|
||||
size_t offset,
|
||||
|
@ -151,7 +151,7 @@ set_page_region(
|
|||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
STATIC inline int
|
||||
STATIC_INLINE int
|
||||
test_page_region(
|
||||
struct page *page,
|
||||
size_t offset,
|
||||
|
@ -171,9 +171,9 @@ typedef struct a_list {
|
|||
struct a_list *next;
|
||||
} a_list_t;
|
||||
|
||||
STATIC a_list_t *as_free_head;
|
||||
STATIC int as_list_len;
|
||||
STATIC DEFINE_SPINLOCK(as_lock);
|
||||
static a_list_t *as_free_head;
|
||||
static int as_list_len;
|
||||
static DEFINE_SPINLOCK(as_lock);
|
||||
|
||||
/*
|
||||
* Try to batch vunmaps because they are costly.
|
||||
|
@ -1085,7 +1085,7 @@ xfs_buf_iostart(
|
|||
return status;
|
||||
}
|
||||
|
||||
STATIC __inline__ int
|
||||
STATIC_INLINE int
|
||||
_xfs_buf_iolocked(
|
||||
xfs_buf_t *bp)
|
||||
{
|
||||
|
@ -1095,7 +1095,7 @@ _xfs_buf_iolocked(
|
|||
return 0;
|
||||
}
|
||||
|
||||
STATIC __inline__ void
|
||||
STATIC_INLINE void
|
||||
_xfs_buf_ioend(
|
||||
xfs_buf_t *bp,
|
||||
int schedule)
|
||||
|
@ -1426,8 +1426,8 @@ xfs_free_bufhash(
|
|||
/*
|
||||
* buftarg list for delwrite queue processing
|
||||
*/
|
||||
STATIC LIST_HEAD(xfs_buftarg_list);
|
||||
STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
|
||||
LIST_HEAD(xfs_buftarg_list);
|
||||
static DEFINE_SPINLOCK(xfs_buftarg_lock);
|
||||
|
||||
STATIC void
|
||||
xfs_register_buftarg(
|
||||
|
@ -1679,21 +1679,60 @@ xfsbufd_wakeup(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move as many buffers as specified to the supplied list
|
||||
* idicating if we skipped any buffers to prevent deadlocks.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_buf_delwri_split(
|
||||
xfs_buftarg_t *target,
|
||||
struct list_head *list,
|
||||
unsigned long age)
|
||||
{
|
||||
xfs_buf_t *bp, *n;
|
||||
struct list_head *dwq = &target->bt_delwrite_queue;
|
||||
spinlock_t *dwlk = &target->bt_delwrite_lock;
|
||||
int skipped = 0;
|
||||
int force;
|
||||
|
||||
force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
|
||||
INIT_LIST_HEAD(list);
|
||||
spin_lock(dwlk);
|
||||
list_for_each_entry_safe(bp, n, dwq, b_list) {
|
||||
XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
|
||||
ASSERT(bp->b_flags & XBF_DELWRI);
|
||||
|
||||
if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
|
||||
if (!force &&
|
||||
time_before(jiffies, bp->b_queuetime + age)) {
|
||||
xfs_buf_unlock(bp);
|
||||
break;
|
||||
}
|
||||
|
||||
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
|
||||
_XBF_RUN_QUEUES);
|
||||
bp->b_flags |= XBF_WRITE;
|
||||
list_move_tail(&bp->b_list, list);
|
||||
} else
|
||||
skipped++;
|
||||
}
|
||||
spin_unlock(dwlk);
|
||||
|
||||
return skipped;
|
||||
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfsbufd(
|
||||
void *data)
|
||||
{
|
||||
struct list_head tmp;
|
||||
unsigned long age;
|
||||
xfs_buftarg_t *target = (xfs_buftarg_t *)data;
|
||||
xfs_buf_t *bp, *n;
|
||||
struct list_head *dwq = &target->bt_delwrite_queue;
|
||||
spinlock_t *dwlk = &target->bt_delwrite_lock;
|
||||
int count;
|
||||
xfs_buf_t *bp;
|
||||
|
||||
current->flags |= PF_MEMALLOC;
|
||||
|
||||
INIT_LIST_HEAD(&tmp);
|
||||
do {
|
||||
if (unlikely(freezing(current))) {
|
||||
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
|
@ -1705,37 +1744,17 @@ xfsbufd(
|
|||
schedule_timeout_interruptible(
|
||||
xfs_buf_timer_centisecs * msecs_to_jiffies(10));
|
||||
|
||||
xfs_buf_delwri_split(target, &tmp,
|
||||
xfs_buf_age_centisecs * msecs_to_jiffies(10));
|
||||
|
||||
count = 0;
|
||||
age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
|
||||
spin_lock(dwlk);
|
||||
list_for_each_entry_safe(bp, n, dwq, b_list) {
|
||||
XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
|
||||
ASSERT(bp->b_flags & XBF_DELWRI);
|
||||
|
||||
if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
|
||||
if (!test_bit(XBT_FORCE_FLUSH,
|
||||
&target->bt_flags) &&
|
||||
time_before(jiffies,
|
||||
bp->b_queuetime + age)) {
|
||||
xfs_buf_unlock(bp);
|
||||
break;
|
||||
}
|
||||
|
||||
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
|
||||
_XBF_RUN_QUEUES);
|
||||
bp->b_flags |= XBF_WRITE;
|
||||
list_move_tail(&bp->b_list, &tmp);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
spin_unlock(dwlk);
|
||||
|
||||
while (!list_empty(&tmp)) {
|
||||
bp = list_entry(tmp.next, xfs_buf_t, b_list);
|
||||
ASSERT(target == bp->b_target);
|
||||
|
||||
list_del_init(&bp->b_list);
|
||||
xfs_buf_iostrategy(bp);
|
||||
count++;
|
||||
}
|
||||
|
||||
if (as_list_len > 0)
|
||||
|
@ -1743,7 +1762,6 @@ xfsbufd(
|
|||
if (count)
|
||||
blk_run_address_space(target->bt_mapping);
|
||||
|
||||
clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
|
||||
} while (!kthread_should_stop());
|
||||
|
||||
return 0;
|
||||
|
@ -1762,34 +1780,18 @@ xfs_flush_buftarg(
|
|||
struct list_head tmp;
|
||||
xfs_buf_t *bp, *n;
|
||||
int pincount = 0;
|
||||
struct list_head *dwq = &target->bt_delwrite_queue;
|
||||
spinlock_t *dwlk = &target->bt_delwrite_lock;
|
||||
|
||||
xfs_buf_runall_queues(xfsdatad_workqueue);
|
||||
xfs_buf_runall_queues(xfslogd_workqueue);
|
||||
|
||||
INIT_LIST_HEAD(&tmp);
|
||||
spin_lock(dwlk);
|
||||
list_for_each_entry_safe(bp, n, dwq, b_list) {
|
||||
ASSERT(bp->b_target == target);
|
||||
ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
|
||||
XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
|
||||
if (xfs_buf_ispin(bp)) {
|
||||
pincount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
list_move_tail(&bp->b_list, &tmp);
|
||||
}
|
||||
spin_unlock(dwlk);
|
||||
set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
|
||||
pincount = xfs_buf_delwri_split(target, &tmp, 0);
|
||||
|
||||
/*
|
||||
* Dropped the delayed write list lock, now walk the temporary list
|
||||
*/
|
||||
list_for_each_entry_safe(bp, n, &tmp, b_list) {
|
||||
xfs_buf_lock(bp);
|
||||
bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
|
||||
bp->b_flags |= XBF_WRITE;
|
||||
ASSERT(target == bp->b_target);
|
||||
if (wait)
|
||||
bp->b_flags &= ~XBF_ASYNC;
|
||||
else
|
||||
|
|
|
@ -69,8 +69,8 @@ typedef enum {
|
|||
} xfs_buf_flags_t;
|
||||
|
||||
typedef enum {
|
||||
XBT_FORCE_SLEEP = (0 << 1),
|
||||
XBT_FORCE_FLUSH = (1 << 1),
|
||||
XBT_FORCE_SLEEP = 0,
|
||||
XBT_FORCE_FLUSH = 1,
|
||||
} xfs_buftarg_flags_t;
|
||||
|
||||
typedef struct xfs_bufhash {
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include "xfs_mount.h"
|
||||
#include "xfs_export.h"
|
||||
|
||||
STATIC struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
|
||||
static struct dentry dotdot = { .d_name.name = "..", .d_name.len = 2, };
|
||||
|
||||
/*
|
||||
* XFS encodes and decodes the fileid portion of NFS filehandles
|
||||
|
|
|
@ -46,7 +46,7 @@ static struct vm_operations_struct xfs_file_vm_ops;
|
|||
static struct vm_operations_struct xfs_dmapi_file_vm_ops;
|
||||
#endif
|
||||
|
||||
STATIC inline ssize_t
|
||||
STATIC_INLINE ssize_t
|
||||
__xfs_file_read(
|
||||
struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
|
@ -84,7 +84,7 @@ xfs_file_aio_read_invis(
|
|||
return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos);
|
||||
}
|
||||
|
||||
STATIC inline ssize_t
|
||||
STATIC_INLINE ssize_t
|
||||
__xfs_file_write(
|
||||
struct kiocb *iocb,
|
||||
const struct iovec *iov,
|
||||
|
|
|
@ -41,8 +41,6 @@
|
|||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_buf_item.h"
|
||||
|
@ -355,7 +353,6 @@ STATIC int
|
|||
xfs_readlink_by_handle(
|
||||
xfs_mount_t *mp,
|
||||
void __user *arg,
|
||||
struct file *parfilp,
|
||||
struct inode *parinode)
|
||||
{
|
||||
int error;
|
||||
|
@ -388,7 +385,7 @@ xfs_readlink_by_handle(
|
|||
aiov.iov_len = olen;
|
||||
aiov.iov_base = hreq.ohandle;
|
||||
|
||||
auio.uio_iov = &aiov;
|
||||
auio.uio_iov = (struct kvec *)&aiov;
|
||||
auio.uio_iovcnt = 1;
|
||||
auio.uio_offset = 0;
|
||||
auio.uio_segflg = UIO_USERSPACE;
|
||||
|
@ -406,7 +403,6 @@ STATIC int
|
|||
xfs_fssetdm_by_handle(
|
||||
xfs_mount_t *mp,
|
||||
void __user *arg,
|
||||
struct file *parfilp,
|
||||
struct inode *parinode)
|
||||
{
|
||||
int error;
|
||||
|
@ -448,7 +444,6 @@ STATIC int
|
|||
xfs_attrlist_by_handle(
|
||||
xfs_mount_t *mp,
|
||||
void __user *arg,
|
||||
struct file *parfilp,
|
||||
struct inode *parinode)
|
||||
{
|
||||
int error;
|
||||
|
@ -569,7 +564,6 @@ STATIC int
|
|||
xfs_attrmulti_by_handle(
|
||||
xfs_mount_t *mp,
|
||||
void __user *arg,
|
||||
struct file *parfilp,
|
||||
struct inode *parinode)
|
||||
{
|
||||
int error;
|
||||
|
@ -689,7 +683,6 @@ xfs_ioc_xattr(
|
|||
STATIC int
|
||||
xfs_ioc_getbmap(
|
||||
bhv_desc_t *bdp,
|
||||
struct file *filp,
|
||||
int flags,
|
||||
unsigned int cmd,
|
||||
void __user *arg);
|
||||
|
@ -788,7 +781,7 @@ xfs_ioctl(
|
|||
|
||||
case XFS_IOC_GETBMAP:
|
||||
case XFS_IOC_GETBMAPA:
|
||||
return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
|
||||
return xfs_ioc_getbmap(bdp, ioflags, cmd, arg);
|
||||
|
||||
case XFS_IOC_GETBMAPX:
|
||||
return xfs_ioc_getbmapx(bdp, arg);
|
||||
|
@ -802,16 +795,16 @@ xfs_ioctl(
|
|||
return xfs_open_by_handle(mp, arg, filp, inode);
|
||||
|
||||
case XFS_IOC_FSSETDM_BY_HANDLE:
|
||||
return xfs_fssetdm_by_handle(mp, arg, filp, inode);
|
||||
return xfs_fssetdm_by_handle(mp, arg, inode);
|
||||
|
||||
case XFS_IOC_READLINK_BY_HANDLE:
|
||||
return xfs_readlink_by_handle(mp, arg, filp, inode);
|
||||
return xfs_readlink_by_handle(mp, arg, inode);
|
||||
|
||||
case XFS_IOC_ATTRLIST_BY_HANDLE:
|
||||
return xfs_attrlist_by_handle(mp, arg, filp, inode);
|
||||
return xfs_attrlist_by_handle(mp, arg, inode);
|
||||
|
||||
case XFS_IOC_ATTRMULTI_BY_HANDLE:
|
||||
return xfs_attrmulti_by_handle(mp, arg, filp, inode);
|
||||
return xfs_attrmulti_by_handle(mp, arg, inode);
|
||||
|
||||
case XFS_IOC_SWAPEXT: {
|
||||
error = xfs_swapext((struct xfs_swapext __user *)arg);
|
||||
|
@ -1095,11 +1088,6 @@ xfs_ioc_fsgeometry(
|
|||
/*
|
||||
* Linux extended inode flags interface.
|
||||
*/
|
||||
#define LINUX_XFLAG_SYNC 0x00000008 /* Synchronous updates */
|
||||
#define LINUX_XFLAG_IMMUTABLE 0x00000010 /* Immutable file */
|
||||
#define LINUX_XFLAG_APPEND 0x00000020 /* writes to file may only append */
|
||||
#define LINUX_XFLAG_NODUMP 0x00000040 /* do not dump file */
|
||||
#define LINUX_XFLAG_NOATIME 0x00000080 /* do not update atime */
|
||||
|
||||
STATIC unsigned int
|
||||
xfs_merge_ioc_xflags(
|
||||
|
@ -1108,23 +1096,23 @@ xfs_merge_ioc_xflags(
|
|||
{
|
||||
unsigned int xflags = start;
|
||||
|
||||
if (flags & LINUX_XFLAG_IMMUTABLE)
|
||||
if (flags & FS_IMMUTABLE_FL)
|
||||
xflags |= XFS_XFLAG_IMMUTABLE;
|
||||
else
|
||||
xflags &= ~XFS_XFLAG_IMMUTABLE;
|
||||
if (flags & LINUX_XFLAG_APPEND)
|
||||
if (flags & FS_APPEND_FL)
|
||||
xflags |= XFS_XFLAG_APPEND;
|
||||
else
|
||||
xflags &= ~XFS_XFLAG_APPEND;
|
||||
if (flags & LINUX_XFLAG_SYNC)
|
||||
if (flags & FS_SYNC_FL)
|
||||
xflags |= XFS_XFLAG_SYNC;
|
||||
else
|
||||
xflags &= ~XFS_XFLAG_SYNC;
|
||||
if (flags & LINUX_XFLAG_NOATIME)
|
||||
if (flags & FS_NOATIME_FL)
|
||||
xflags |= XFS_XFLAG_NOATIME;
|
||||
else
|
||||
xflags &= ~XFS_XFLAG_NOATIME;
|
||||
if (flags & LINUX_XFLAG_NODUMP)
|
||||
if (flags & FS_NODUMP_FL)
|
||||
xflags |= XFS_XFLAG_NODUMP;
|
||||
else
|
||||
xflags &= ~XFS_XFLAG_NODUMP;
|
||||
|
@ -1139,15 +1127,15 @@ xfs_di2lxflags(
|
|||
unsigned int flags = 0;
|
||||
|
||||
if (di_flags & XFS_DIFLAG_IMMUTABLE)
|
||||
flags |= LINUX_XFLAG_IMMUTABLE;
|
||||
flags |= FS_IMMUTABLE_FL;
|
||||
if (di_flags & XFS_DIFLAG_APPEND)
|
||||
flags |= LINUX_XFLAG_APPEND;
|
||||
flags |= FS_APPEND_FL;
|
||||
if (di_flags & XFS_DIFLAG_SYNC)
|
||||
flags |= LINUX_XFLAG_SYNC;
|
||||
flags |= FS_SYNC_FL;
|
||||
if (di_flags & XFS_DIFLAG_NOATIME)
|
||||
flags |= LINUX_XFLAG_NOATIME;
|
||||
flags |= FS_NOATIME_FL;
|
||||
if (di_flags & XFS_DIFLAG_NODUMP)
|
||||
flags |= LINUX_XFLAG_NODUMP;
|
||||
flags |= FS_NODUMP_FL;
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
@ -1247,9 +1235,9 @@ xfs_ioc_xattr(
|
|||
break;
|
||||
}
|
||||
|
||||
if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
|
||||
LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
|
||||
LINUX_XFLAG_SYNC)) {
|
||||
if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
|
||||
FS_NOATIME_FL | FS_NODUMP_FL | \
|
||||
FS_SYNC_FL)) {
|
||||
error = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
|
@ -1281,7 +1269,6 @@ xfs_ioc_xattr(
|
|||
STATIC int
|
||||
xfs_ioc_getbmap(
|
||||
bhv_desc_t *bdp,
|
||||
struct file *filp,
|
||||
int ioflags,
|
||||
unsigned int cmd,
|
||||
void __user *arg)
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_utils.h"
|
||||
|
@ -250,13 +248,13 @@ xfs_init_security(
|
|||
*
|
||||
* XXX(hch): nfsd is broken, better fix it instead.
|
||||
*/
|
||||
STATIC inline int
|
||||
STATIC_INLINE int
|
||||
xfs_has_fs_struct(struct task_struct *task)
|
||||
{
|
||||
return (task->fs != init_task.fs);
|
||||
}
|
||||
|
||||
STATIC inline void
|
||||
STATIC void
|
||||
xfs_cleanup_inode(
|
||||
bhv_vnode_t *dvp,
|
||||
bhv_vnode_t *vp,
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_buf_item.h"
|
||||
|
@ -134,13 +132,11 @@ STATIC int
|
|||
xfs_iozero(
|
||||
struct inode *ip, /* inode */
|
||||
loff_t pos, /* offset in file */
|
||||
size_t count, /* size of data to zero */
|
||||
loff_t end_size) /* max file size to set */
|
||||
size_t count) /* size of data to zero */
|
||||
{
|
||||
unsigned bytes;
|
||||
struct page *page;
|
||||
struct address_space *mapping;
|
||||
char *kaddr;
|
||||
int status;
|
||||
|
||||
mapping = ip->i_mapping;
|
||||
|
@ -158,26 +154,21 @@ xfs_iozero(
|
|||
if (!page)
|
||||
break;
|
||||
|
||||
kaddr = kmap(page);
|
||||
status = mapping->a_ops->prepare_write(NULL, page, offset,
|
||||
offset + bytes);
|
||||
if (status) {
|
||||
if (status)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
memset((void *) (kaddr + offset), 0, bytes);
|
||||
flush_dcache_page(page);
|
||||
memclear_highpage_flush(page, offset, bytes);
|
||||
|
||||
status = mapping->a_ops->commit_write(NULL, page, offset,
|
||||
offset + bytes);
|
||||
if (!status) {
|
||||
pos += bytes;
|
||||
count -= bytes;
|
||||
if (pos > i_size_read(ip))
|
||||
i_size_write(ip, pos < end_size ? pos : end_size);
|
||||
}
|
||||
|
||||
unlock:
|
||||
kunmap(page);
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
if (status)
|
||||
|
@ -449,8 +440,8 @@ STATIC int /* error (positive) */
|
|||
xfs_zero_last_block(
|
||||
struct inode *ip,
|
||||
xfs_iocore_t *io,
|
||||
xfs_fsize_t isize,
|
||||
xfs_fsize_t end_size)
|
||||
xfs_fsize_t offset,
|
||||
xfs_fsize_t isize)
|
||||
{
|
||||
xfs_fileoff_t last_fsb;
|
||||
xfs_mount_t *mp = io->io_mount;
|
||||
|
@ -459,7 +450,6 @@ xfs_zero_last_block(
|
|||
int zero_len;
|
||||
int error = 0;
|
||||
xfs_bmbt_irec_t imap;
|
||||
loff_t loff;
|
||||
|
||||
ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
|
||||
|
||||
|
@ -494,9 +484,10 @@ xfs_zero_last_block(
|
|||
*/
|
||||
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
|
||||
|
||||
loff = XFS_FSB_TO_B(mp, last_fsb);
|
||||
zero_len = mp->m_sb.sb_blocksize - zero_offset;
|
||||
error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
|
||||
if (isize + zero_len > offset)
|
||||
zero_len = offset - isize;
|
||||
error = xfs_iozero(ip, isize, zero_len);
|
||||
|
||||
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
|
||||
ASSERT(error >= 0);
|
||||
|
@ -519,14 +510,15 @@ xfs_zero_eof(
|
|||
bhv_vnode_t *vp,
|
||||
xfs_iocore_t *io,
|
||||
xfs_off_t offset, /* starting I/O offset */
|
||||
xfs_fsize_t isize, /* current inode size */
|
||||
xfs_fsize_t end_size) /* terminal inode size */
|
||||
xfs_fsize_t isize) /* current inode size */
|
||||
{
|
||||
struct inode *ip = vn_to_inode(vp);
|
||||
xfs_fileoff_t start_zero_fsb;
|
||||
xfs_fileoff_t end_zero_fsb;
|
||||
xfs_fileoff_t zero_count_fsb;
|
||||
xfs_fileoff_t last_fsb;
|
||||
xfs_fileoff_t zero_off;
|
||||
xfs_fsize_t zero_len;
|
||||
xfs_mount_t *mp = io->io_mount;
|
||||
int nimaps;
|
||||
int error = 0;
|
||||
|
@ -540,7 +532,7 @@ xfs_zero_eof(
|
|||
* First handle zeroing the block on which isize resides.
|
||||
* We only zero a part of that block so it is handled specially.
|
||||
*/
|
||||
error = xfs_zero_last_block(ip, io, isize, end_size);
|
||||
error = xfs_zero_last_block(ip, io, offset, isize);
|
||||
if (error) {
|
||||
ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
|
||||
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
|
||||
|
@ -601,10 +593,13 @@ xfs_zero_eof(
|
|||
*/
|
||||
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
|
||||
|
||||
error = xfs_iozero(ip,
|
||||
XFS_FSB_TO_B(mp, start_zero_fsb),
|
||||
XFS_FSB_TO_B(mp, imap.br_blockcount),
|
||||
end_size);
|
||||
zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
|
||||
zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
|
||||
|
||||
if ((zero_off + zero_len) > offset)
|
||||
zero_len = offset - zero_off;
|
||||
|
||||
error = xfs_iozero(ip, zero_off, zero_len);
|
||||
if (error) {
|
||||
goto out_lock;
|
||||
}
|
||||
|
@ -783,8 +778,7 @@ start:
|
|||
*/
|
||||
|
||||
if (pos > isize) {
|
||||
error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
|
||||
isize, pos + count);
|
||||
error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos, isize);
|
||||
if (error) {
|
||||
xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
|
||||
goto out_unlock_mutex;
|
||||
|
|
|
@ -83,7 +83,7 @@ extern int xfs_bdstrat_cb(struct xfs_buf *);
|
|||
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
|
||||
|
||||
extern int xfs_zero_eof(struct bhv_vnode *, struct xfs_iocore *, xfs_off_t,
|
||||
xfs_fsize_t, xfs_fsize_t);
|
||||
xfs_fsize_t);
|
||||
extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
|
||||
const struct iovec *, unsigned int,
|
||||
loff_t *, int, struct cred *);
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_utils.h"
|
||||
|
@ -58,10 +56,10 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
STATIC struct quotactl_ops xfs_quotactl_operations;
|
||||
STATIC struct super_operations xfs_super_operations;
|
||||
STATIC kmem_zone_t *xfs_vnode_zone;
|
||||
STATIC kmem_zone_t *xfs_ioend_zone;
|
||||
static struct quotactl_ops xfs_quotactl_operations;
|
||||
static struct super_operations xfs_super_operations;
|
||||
static kmem_zone_t *xfs_vnode_zone;
|
||||
static kmem_zone_t *xfs_ioend_zone;
|
||||
mempool_t *xfs_ioend_pool;
|
||||
|
||||
STATIC struct xfs_mount_args *
|
||||
|
@ -121,7 +119,7 @@ xfs_max_file_offset(
|
|||
return (((__uint64_t)pagefactor) << bitshift) - 1;
|
||||
}
|
||||
|
||||
STATIC __inline__ void
|
||||
STATIC_INLINE void
|
||||
xfs_set_inodeops(
|
||||
struct inode *inode)
|
||||
{
|
||||
|
@ -147,7 +145,7 @@ xfs_set_inodeops(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC __inline__ void
|
||||
STATIC_INLINE void
|
||||
xfs_revalidate_inode(
|
||||
xfs_mount_t *mp,
|
||||
bhv_vnode_t *vp,
|
||||
|
@ -553,7 +551,6 @@ vfs_sync_worker(
|
|||
error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \
|
||||
SYNC_ATTR | SYNC_REFCACHE, NULL);
|
||||
vfsp->vfs_sync_seq++;
|
||||
wmb();
|
||||
wake_up(&vfsp->vfs_wait_single_sync_task);
|
||||
}
|
||||
|
||||
|
@ -659,9 +656,17 @@ xfs_fs_sync_super(
|
|||
int error;
|
||||
int flags;
|
||||
|
||||
if (unlikely(sb->s_frozen == SB_FREEZE_WRITE))
|
||||
flags = SYNC_QUIESCE;
|
||||
else
|
||||
if (unlikely(sb->s_frozen == SB_FREEZE_WRITE)) {
|
||||
/*
|
||||
* First stage of freeze - no more writers will make progress
|
||||
* now we are here, so we flush delwri and delalloc buffers
|
||||
* here, then wait for all I/O to complete. Data is frozen at
|
||||
* that point. Metadata is not frozen, transactions can still
|
||||
* occur here so don't bother flushing the buftarg (i.e
|
||||
* SYNC_QUIESCE) because it'll just get dirty again.
|
||||
*/
|
||||
flags = SYNC_FSDATA | SYNC_DELWRI | SYNC_WAIT | SYNC_IOWAIT;
|
||||
} else
|
||||
flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0);
|
||||
|
||||
error = bhv_vfs_sync(vfsp, flags, NULL);
|
||||
|
@ -873,7 +878,7 @@ xfs_fs_get_sb(
|
|||
mnt);
|
||||
}
|
||||
|
||||
STATIC struct super_operations xfs_super_operations = {
|
||||
static struct super_operations xfs_super_operations = {
|
||||
.alloc_inode = xfs_fs_alloc_inode,
|
||||
.destroy_inode = xfs_fs_destroy_inode,
|
||||
.write_inode = xfs_fs_write_inode,
|
||||
|
@ -887,7 +892,7 @@ STATIC struct super_operations xfs_super_operations = {
|
|||
.show_options = xfs_fs_show_options,
|
||||
};
|
||||
|
||||
STATIC struct quotactl_ops xfs_quotactl_operations = {
|
||||
static struct quotactl_ops xfs_quotactl_operations = {
|
||||
.quota_sync = xfs_fs_quotasync,
|
||||
.get_xstate = xfs_fs_getxstate,
|
||||
.set_xstate = xfs_fs_setxstate,
|
||||
|
|
|
@ -54,102 +54,204 @@ xfs_stats_clear_proc_handler(
|
|||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
STATIC ctl_table xfs_table[] = {
|
||||
{XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.restrict_chown.min, &xfs_params.restrict_chown.max},
|
||||
|
||||
{XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.sgid_inherit.min, &xfs_params.sgid_inherit.max},
|
||||
|
||||
{XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.symlink_mode.min, &xfs_params.symlink_mode.max},
|
||||
|
||||
{XFS_PANIC_MASK, "panic_mask", &xfs_params.panic_mask.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.panic_mask.min, &xfs_params.panic_mask.max},
|
||||
|
||||
{XFS_ERRLEVEL, "error_level", &xfs_params.error_level.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.error_level.min, &xfs_params.error_level.max},
|
||||
|
||||
{XFS_SYNCD_TIMER, "xfssyncd_centisecs", &xfs_params.syncd_timer.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.syncd_timer.min, &xfs_params.syncd_timer.max},
|
||||
|
||||
{XFS_INHERIT_SYNC, "inherit_sync", &xfs_params.inherit_sync.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.inherit_sync.min, &xfs_params.inherit_sync.max},
|
||||
|
||||
{XFS_INHERIT_NODUMP, "inherit_nodump", &xfs_params.inherit_nodump.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.inherit_nodump.min, &xfs_params.inherit_nodump.max},
|
||||
|
||||
{XFS_INHERIT_NOATIME, "inherit_noatime", &xfs_params.inherit_noatim.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.inherit_noatim.min, &xfs_params.inherit_noatim.max},
|
||||
|
||||
{XFS_BUF_TIMER, "xfsbufd_centisecs", &xfs_params.xfs_buf_timer.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.xfs_buf_timer.min, &xfs_params.xfs_buf_timer.max},
|
||||
|
||||
{XFS_BUF_AGE, "age_buffer_centisecs", &xfs_params.xfs_buf_age.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.xfs_buf_age.min, &xfs_params.xfs_buf_age.max},
|
||||
|
||||
{XFS_INHERIT_NOSYM, "inherit_nosymlinks", &xfs_params.inherit_nosym.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.inherit_nosym.min, &xfs_params.inherit_nosym.max},
|
||||
|
||||
{XFS_ROTORSTEP, "rotorstep", &xfs_params.rotorstep.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.rotorstep.min, &xfs_params.rotorstep.max},
|
||||
|
||||
{XFS_INHERIT_NODFRG, "inherit_nodefrag", &xfs_params.inherit_nodfrg.val,
|
||||
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.inherit_nodfrg.min, &xfs_params.inherit_nodfrg.max},
|
||||
static ctl_table xfs_table[] = {
|
||||
{
|
||||
.ctl_name = XFS_RESTRICT_CHOWN,
|
||||
.procname = "restrict_chown",
|
||||
.data = &xfs_params.restrict_chown.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.restrict_chown.min,
|
||||
.extra2 = &xfs_params.restrict_chown.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_SGID_INHERIT,
|
||||
.procname = "irix_sgid_inherit",
|
||||
.data = &xfs_params.sgid_inherit.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.sgid_inherit.min,
|
||||
.extra2 = &xfs_params.sgid_inherit.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_SYMLINK_MODE,
|
||||
.procname = "irix_symlink_mode",
|
||||
.data = &xfs_params.symlink_mode.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.symlink_mode.min,
|
||||
.extra2 = &xfs_params.symlink_mode.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_PANIC_MASK,
|
||||
.procname = "panic_mask",
|
||||
.data = &xfs_params.panic_mask.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.panic_mask.min,
|
||||
.extra2 = &xfs_params.panic_mask.max
|
||||
},
|
||||
|
||||
{
|
||||
.ctl_name = XFS_ERRLEVEL,
|
||||
.procname = "error_level",
|
||||
.data = &xfs_params.error_level.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.error_level.min,
|
||||
.extra2 = &xfs_params.error_level.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_SYNCD_TIMER,
|
||||
.procname = "xfssyncd_centisecs",
|
||||
.data = &xfs_params.syncd_timer.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.syncd_timer.min,
|
||||
.extra2 = &xfs_params.syncd_timer.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_INHERIT_SYNC,
|
||||
.procname = "inherit_sync",
|
||||
.data = &xfs_params.inherit_sync.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.inherit_sync.min,
|
||||
.extra2 = &xfs_params.inherit_sync.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_INHERIT_NODUMP,
|
||||
.procname = "inherit_nodump",
|
||||
.data = &xfs_params.inherit_nodump.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.inherit_nodump.min,
|
||||
.extra2 = &xfs_params.inherit_nodump.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_INHERIT_NOATIME,
|
||||
.procname = "inherit_noatime",
|
||||
.data = &xfs_params.inherit_noatim.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.inherit_noatim.min,
|
||||
.extra2 = &xfs_params.inherit_noatim.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_BUF_TIMER,
|
||||
.procname = "xfsbufd_centisecs",
|
||||
.data = &xfs_params.xfs_buf_timer.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.xfs_buf_timer.min,
|
||||
.extra2 = &xfs_params.xfs_buf_timer.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_BUF_AGE,
|
||||
.procname = "age_buffer_centisecs",
|
||||
.data = &xfs_params.xfs_buf_age.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.xfs_buf_age.min,
|
||||
.extra2 = &xfs_params.xfs_buf_age.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_INHERIT_NOSYM,
|
||||
.procname = "inherit_nosymlinks",
|
||||
.data = &xfs_params.inherit_nosym.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.inherit_nosym.min,
|
||||
.extra2 = &xfs_params.inherit_nosym.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_ROTORSTEP,
|
||||
.procname = "rotorstep",
|
||||
.data = &xfs_params.rotorstep.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.rotorstep.min,
|
||||
.extra2 = &xfs_params.rotorstep.max
|
||||
},
|
||||
{
|
||||
.ctl_name = XFS_INHERIT_NODFRG,
|
||||
.procname = "inherit_nodefrag",
|
||||
.data = &xfs_params.inherit_nodfrg.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec_minmax,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.inherit_nodfrg.min,
|
||||
.extra2 = &xfs_params.inherit_nodfrg.max
|
||||
},
|
||||
/* please keep this the last entry */
|
||||
#ifdef CONFIG_PROC_FS
|
||||
{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
|
||||
sizeof(int), 0644, NULL, &xfs_stats_clear_proc_handler,
|
||||
&sysctl_intvec, NULL,
|
||||
&xfs_params.stats_clear.min, &xfs_params.stats_clear.max},
|
||||
{
|
||||
.ctl_name = XFS_STATS_CLEAR,
|
||||
.procname = "stats_clear",
|
||||
.data = &xfs_params.stats_clear.val,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &xfs_stats_clear_proc_handler,
|
||||
.strategy = &sysctl_intvec,
|
||||
.extra1 = &xfs_params.stats_clear.min,
|
||||
.extra2 = &xfs_params.stats_clear.max
|
||||
},
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
{0}
|
||||
{}
|
||||
};
|
||||
|
||||
STATIC ctl_table xfs_dir_table[] = {
|
||||
{FS_XFS, "xfs", NULL, 0, 0555, xfs_table},
|
||||
{0}
|
||||
static ctl_table xfs_dir_table[] = {
|
||||
{
|
||||
.ctl_name = FS_XFS,
|
||||
.procname = "xfs",
|
||||
.mode = 0555,
|
||||
.child = xfs_table
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
STATIC ctl_table xfs_root_table[] = {
|
||||
{CTL_FS, "fs", NULL, 0, 0555, xfs_dir_table},
|
||||
{0}
|
||||
static ctl_table xfs_root_table[] = {
|
||||
{
|
||||
.ctl_name = CTL_FS,
|
||||
.procname = "fs",
|
||||
.mode = 0555,
|
||||
.child = xfs_dir_table
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
void
|
||||
xfs_sysctl_register(void)
|
||||
{
|
||||
xfs_table_header = register_sysctl_table(xfs_root_table, 1);
|
||||
xfs_table_header = register_sysctl_table(xfs_root_table, 0);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -91,7 +91,7 @@ typedef enum {
|
|||
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
|
||||
#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
|
||||
#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
|
||||
#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */
|
||||
#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */
|
||||
|
||||
#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */
|
||||
#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
|
||||
|
|
|
@ -26,7 +26,7 @@ DEFINE_SPINLOCK(vnumber_lock);
|
|||
*/
|
||||
#define NVSYNC 37
|
||||
#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
|
||||
STATIC wait_queue_head_t vsync[NVSYNC];
|
||||
static wait_queue_head_t vsync[NVSYNC];
|
||||
|
||||
void
|
||||
vn_init(void)
|
||||
|
|
|
@ -489,14 +489,14 @@ static inline struct bhv_vnode *vn_grab(struct bhv_vnode *vp)
|
|||
#define VN_LOCK(vp) mutex_spinlock(&(vp)->v_lock)
|
||||
#define VN_UNLOCK(vp, s) mutex_spinunlock(&(vp)->v_lock, s)
|
||||
|
||||
static __inline__ void vn_flagset(struct bhv_vnode *vp, uint flag)
|
||||
STATIC_INLINE void vn_flagset(struct bhv_vnode *vp, uint flag)
|
||||
{
|
||||
spin_lock(&vp->v_lock);
|
||||
vp->v_flag |= flag;
|
||||
spin_unlock(&vp->v_lock);
|
||||
}
|
||||
|
||||
static __inline__ uint vn_flagclr(struct bhv_vnode *vp, uint flag)
|
||||
STATIC_INLINE uint vn_flagclr(struct bhv_vnode *vp, uint flag)
|
||||
{
|
||||
uint cleared;
|
||||
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_space.h"
|
||||
|
@ -484,7 +482,7 @@ xfs_qm_dqalloc(
|
|||
|
||||
xfs_trans_bhold(tp, bp);
|
||||
|
||||
if ((error = xfs_bmap_finish(tpp, &flist, firstblock, &committed))) {
|
||||
if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
|
||||
goto error1;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
|
@ -399,7 +397,7 @@ xfs_qm_dquot_logitem_committing(
|
|||
/*
|
||||
* This is the ops vector for dquots
|
||||
*/
|
||||
STATIC struct xfs_item_ops xfs_dquot_item_ops = {
|
||||
static struct xfs_item_ops xfs_dquot_item_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_qm_dquot_logitem_format,
|
||||
|
@ -606,7 +604,7 @@ xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
|
|||
return;
|
||||
}
|
||||
|
||||
STATIC struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
|
||||
static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_qm_qoff_logitem_format,
|
||||
|
@ -628,7 +626,7 @@ STATIC struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
|
|||
/*
|
||||
* This is the ops vector shared by all quotaoff-start log items.
|
||||
*/
|
||||
STATIC struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
|
||||
static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_qm_qoff_logitem_format,
|
||||
|
|
|
@ -44,8 +44,6 @@
|
|||
#include "xfs_bmap.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_space.h"
|
||||
|
@ -64,10 +62,10 @@ uint ndquot;
|
|||
|
||||
kmem_zone_t *qm_dqzone;
|
||||
kmem_zone_t *qm_dqtrxzone;
|
||||
STATIC kmem_shaker_t xfs_qm_shaker;
|
||||
static kmem_shaker_t xfs_qm_shaker;
|
||||
|
||||
STATIC cred_t xfs_zerocr;
|
||||
STATIC xfs_inode_t xfs_zeroino;
|
||||
static cred_t xfs_zerocr;
|
||||
static xfs_inode_t xfs_zeroino;
|
||||
|
||||
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
|
||||
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
|
||||
|
|
|
@ -44,8 +44,6 @@
|
|||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_qm.h"
|
||||
|
@ -384,7 +382,7 @@ xfs_qm_dqrele_null(
|
|||
}
|
||||
|
||||
|
||||
STATIC struct xfs_qmops xfs_qmcore_xfs = {
|
||||
static struct xfs_qmops xfs_qmcore_xfs = {
|
||||
.xfs_qminit = xfs_qm_newmount,
|
||||
.xfs_qmdone = xfs_qm_unmount_quotadestroy,
|
||||
.xfs_qmmount = xfs_qm_endmount,
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_qm.h"
|
||||
|
|
|
@ -46,8 +46,6 @@
|
|||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_utils.h"
|
||||
|
@ -134,7 +132,7 @@ xfs_qm_quotactl(
|
|||
break;
|
||||
|
||||
case Q_XQUOTASYNC:
|
||||
return (xfs_sync_inodes(mp, SYNC_DELWRI, 0, NULL));
|
||||
return (xfs_sync_inodes(mp, SYNC_DELWRI, NULL));
|
||||
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_error.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_priv.h"
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include "debug.h"
|
||||
#include "spin.h"
|
||||
|
||||
static char message[256]; /* keep it off the stack */
|
||||
static char message[1024]; /* keep it off the stack */
|
||||
static DEFINE_SPINLOCK(xfs_err_lock);
|
||||
|
||||
/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
|
||||
|
@ -44,13 +44,14 @@ cmn_err(register int level, char *fmt, ...)
|
|||
spin_lock_irqsave(&xfs_err_lock,flags);
|
||||
va_start(ap, fmt);
|
||||
if (*fmt == '!') fp++;
|
||||
len = vsprintf(message, fp, ap);
|
||||
if (level != CE_DEBUG && message[len-1] != '\n')
|
||||
strcat(message, "\n");
|
||||
printk("%s%s", err_level[level], message);
|
||||
len = vsnprintf(message, sizeof(message), fp, ap);
|
||||
if (len >= sizeof(message))
|
||||
len = sizeof(message) - 1;
|
||||
if (message[len-1] == '\n')
|
||||
message[len-1] = 0;
|
||||
printk("%s%s\n", err_level[level], message);
|
||||
va_end(ap);
|
||||
spin_unlock_irqrestore(&xfs_err_lock,flags);
|
||||
|
||||
BUG_ON(level == CE_PANIC);
|
||||
}
|
||||
|
||||
|
@ -64,11 +65,13 @@ icmn_err(register int level, char *fmt, va_list ap)
|
|||
if(level > XFS_MAX_ERR_LEVEL)
|
||||
level = XFS_MAX_ERR_LEVEL;
|
||||
spin_lock_irqsave(&xfs_err_lock,flags);
|
||||
len = vsprintf(message, fmt, ap);
|
||||
if (level != CE_DEBUG && message[len-1] != '\n')
|
||||
strcat(message, "\n");
|
||||
len = vsnprintf(message, sizeof(message), fmt, ap);
|
||||
if (len >= sizeof(message))
|
||||
len = sizeof(message) - 1;
|
||||
if (message[len-1] == '\n')
|
||||
message[len-1] = 0;
|
||||
printk("%s%s\n", err_level[level], message);
|
||||
spin_unlock_irqrestore(&xfs_err_lock,flags);
|
||||
printk("%s%s", err_level[level], message);
|
||||
BUG_ON(level == CE_PANIC);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,13 +38,37 @@ extern void assfail(char *expr, char *f, int l);
|
|||
|
||||
#ifndef DEBUG
|
||||
# define ASSERT(expr) ((void)0)
|
||||
#else
|
||||
# define ASSERT(expr) ASSERT_ALWAYS(expr)
|
||||
extern unsigned long random(void);
|
||||
#endif
|
||||
|
||||
#ifndef STATIC
|
||||
# define STATIC static
|
||||
# define STATIC static noinline
|
||||
#endif
|
||||
|
||||
#ifndef STATIC_INLINE
|
||||
# define STATIC_INLINE static inline
|
||||
#endif
|
||||
|
||||
#else /* DEBUG */
|
||||
|
||||
# define ASSERT(expr) ASSERT_ALWAYS(expr)
|
||||
extern unsigned long random(void);
|
||||
|
||||
#ifndef STATIC
|
||||
# define STATIC noinline
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We stop inlining of inline functions in debug mode.
|
||||
* Unfortunately, this means static inline in header files
|
||||
* get multiple definitions, so they need to remain static.
|
||||
* This then gives tonnes of warnings about unused but defined
|
||||
* functions, so we need to add the unused attribute to prevent
|
||||
* these spurious warnings.
|
||||
*/
|
||||
#ifndef STATIC_INLINE
|
||||
# define STATIC_INLINE static __attribute__ ((unused)) noinline
|
||||
#endif
|
||||
|
||||
#endif /* DEBUG */
|
||||
|
||||
|
||||
#endif /* __XFS_SUPPORT_DEBUG_H__ */
|
||||
|
|
|
@ -55,7 +55,7 @@ enum uio_seg {
|
|||
};
|
||||
|
||||
struct uio {
|
||||
struct iovec *uio_iov; /* pointer to array of iovecs */
|
||||
struct kvec *uio_iov; /* pointer to array of iovecs */
|
||||
int uio_iovcnt; /* number of iovecs in array */
|
||||
xfs_off_t uio_offset; /* offset in file this uio corresponds to */
|
||||
int uio_resid; /* residual i/o count */
|
||||
|
@ -63,7 +63,7 @@ struct uio {
|
|||
};
|
||||
|
||||
typedef struct uio uio_t;
|
||||
typedef struct iovec iovec_t;
|
||||
typedef struct kvec iovec_t;
|
||||
|
||||
extern int xfs_uio_read (caddr_t, size_t, uio_t *);
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "xfs_inode.h"
|
||||
#include "xfs_btree.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
|
||||
#include <linux/capability.h>
|
||||
|
|
|
@ -58,7 +58,6 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t;
|
|||
/*
|
||||
* Real block structures have a size equal to the disk block size.
|
||||
*/
|
||||
#define XFS_ALLOC_BLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog)
|
||||
#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_alloc_mxr[lev != 0])
|
||||
#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_alloc_mnr[lev != 0])
|
||||
|
||||
|
@ -87,16 +86,13 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t;
|
|||
* Record, key, and pointer address macros for btree blocks.
|
||||
*/
|
||||
#define XFS_ALLOC_REC_ADDR(bb,i,cur) \
|
||||
XFS_BTREE_REC_ADDR(XFS_ALLOC_BLOCK_SIZE(0,cur), xfs_alloc, \
|
||||
bb, i, XFS_ALLOC_BLOCK_MAXRECS(0, cur))
|
||||
XFS_BTREE_REC_ADDR(xfs_alloc, bb, i)
|
||||
|
||||
#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \
|
||||
XFS_BTREE_KEY_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, \
|
||||
bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur))
|
||||
XFS_BTREE_KEY_ADDR(xfs_alloc, bb, i)
|
||||
|
||||
#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \
|
||||
XFS_BTREE_PTR_ADDR(XFS_ALLOC_BLOCK_SIZE(1,cur), xfs_alloc, \
|
||||
bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur))
|
||||
XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur))
|
||||
|
||||
/*
|
||||
* Decrement cursor by one record at the level.
|
||||
|
|
|
@ -57,9 +57,9 @@
|
|||
*/
|
||||
|
||||
#define ATTR_SYSCOUNT 2
|
||||
STATIC struct attrnames posix_acl_access;
|
||||
STATIC struct attrnames posix_acl_default;
|
||||
STATIC struct attrnames *attr_system_names[ATTR_SYSCOUNT];
|
||||
static struct attrnames posix_acl_access;
|
||||
static struct attrnames posix_acl_default;
|
||||
static struct attrnames *attr_system_names[ATTR_SYSCOUNT];
|
||||
|
||||
/*========================================================================
|
||||
* Function prototypes for the kernel.
|
||||
|
@ -198,19 +198,15 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
|
|||
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
|
||||
return (error);
|
||||
|
||||
/*
|
||||
* Determine space new attribute will use, and if it would be
|
||||
* "local" or "remote" (note: local != inline).
|
||||
*/
|
||||
size = xfs_attr_leaf_newentsize(namelen, valuelen,
|
||||
mp->m_sb.sb_blocksize, &local);
|
||||
|
||||
/*
|
||||
* If the inode doesn't have an attribute fork, add one.
|
||||
* (inode must not be locked when we call this routine)
|
||||
*/
|
||||
if (XFS_IFORK_Q(dp) == 0) {
|
||||
if ((error = xfs_bmap_add_attrfork(dp, size, rsvd)))
|
||||
int sf_size = sizeof(xfs_attr_sf_hdr_t) +
|
||||
XFS_ATTR_SF_ENTSIZE_BYNAME(namelen, valuelen);
|
||||
|
||||
if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd)))
|
||||
return(error);
|
||||
}
|
||||
|
||||
|
@ -231,6 +227,13 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
|
|||
args.addname = 1;
|
||||
args.oknoent = 1;
|
||||
|
||||
/*
|
||||
* Determine space new attribute will use, and if it would be
|
||||
* "local" or "remote" (note: local != inline).
|
||||
*/
|
||||
size = xfs_attr_leaf_newentsize(namelen, valuelen,
|
||||
mp->m_sb.sb_blocksize, &local);
|
||||
|
||||
nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
|
||||
if (local) {
|
||||
if (size > (mp->m_sb.sb_blocksize >> 1)) {
|
||||
|
@ -346,7 +349,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
|
|||
error = xfs_attr_shortform_to_leaf(&args);
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args.trans, args.flist,
|
||||
*args.firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -973,7 +976,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
|
|||
error = xfs_attr_leaf_to_node(args);
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans, args->flist,
|
||||
*args->firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -1074,7 +1077,6 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
|
|||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans,
|
||||
args->flist,
|
||||
*args->firstblock,
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
|
@ -1152,7 +1154,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
|
|||
/* bp is gone due to xfs_da_shrink_inode */
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans, args->flist,
|
||||
*args->firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -1307,7 +1309,6 @@ restart:
|
|||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans,
|
||||
args->flist,
|
||||
*args->firstblock,
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
|
@ -1347,7 +1348,7 @@ restart:
|
|||
error = xfs_da_split(state);
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans, args->flist,
|
||||
*args->firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -1459,7 +1460,6 @@ restart:
|
|||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans,
|
||||
args->flist,
|
||||
*args->firstblock,
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
|
@ -1594,7 +1594,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
|
|||
error = xfs_da_join(state);
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans, args->flist,
|
||||
*args->firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -1646,7 +1646,6 @@ xfs_attr_node_removename(xfs_da_args_t *args)
|
|||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans,
|
||||
args->flist,
|
||||
*args->firstblock,
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
|
@ -2090,7 +2089,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
|
|||
args->flist, NULL);
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans, args->flist,
|
||||
*args->firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -2246,7 +2245,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
|
|||
NULL, &done);
|
||||
if (!error) {
|
||||
error = xfs_bmap_finish(&args->trans, args->flist,
|
||||
*args->firstblock, &committed);
|
||||
&committed);
|
||||
}
|
||||
if (error) {
|
||||
ASSERT(committed);
|
||||
|
@ -2477,7 +2476,7 @@ posix_acl_default_exists(
|
|||
return xfs_acl_vhasacl_default(vp);
|
||||
}
|
||||
|
||||
STATIC struct attrnames posix_acl_access = {
|
||||
static struct attrnames posix_acl_access = {
|
||||
.attr_name = "posix_acl_access",
|
||||
.attr_namelen = sizeof("posix_acl_access") - 1,
|
||||
.attr_get = posix_acl_access_get,
|
||||
|
@ -2486,7 +2485,7 @@ STATIC struct attrnames posix_acl_access = {
|
|||
.attr_exists = posix_acl_access_exists,
|
||||
};
|
||||
|
||||
STATIC struct attrnames posix_acl_default = {
|
||||
static struct attrnames posix_acl_default = {
|
||||
.attr_name = "posix_acl_default",
|
||||
.attr_namelen = sizeof("posix_acl_default") - 1,
|
||||
.attr_get = posix_acl_default_get,
|
||||
|
@ -2495,7 +2494,7 @@ STATIC struct attrnames posix_acl_default = {
|
|||
.attr_exists = posix_acl_default_exists,
|
||||
};
|
||||
|
||||
STATIC struct attrnames *attr_system_names[] =
|
||||
static struct attrnames *attr_system_names[] =
|
||||
{ &posix_acl_access, &posix_acl_default };
|
||||
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
|
|||
* Namespace helper routines
|
||||
*========================================================================*/
|
||||
|
||||
STATIC inline attrnames_t *
|
||||
STATIC_INLINE attrnames_t *
|
||||
xfs_attr_flags_namesp(int flags)
|
||||
{
|
||||
return ((flags & XFS_ATTR_SECURE) ? &attr_secure:
|
||||
|
@ -105,7 +105,7 @@ xfs_attr_flags_namesp(int flags)
|
|||
* If namespace bits don't match return 0.
|
||||
* If all match then return 1.
|
||||
*/
|
||||
STATIC inline int
|
||||
STATIC_INLINE int
|
||||
xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
|
||||
{
|
||||
return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
|
||||
|
@ -116,7 +116,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
|
|||
* then return 0.
|
||||
* If all match or are overridable then return 1.
|
||||
*/
|
||||
STATIC inline int
|
||||
STATIC_INLINE int
|
||||
xfs_attr_namesp_match_overrides(int arg_flags, int ondisk_flags)
|
||||
{
|
||||
if (((arg_flags & ATTR_SECURE) == 0) !=
|
||||
|
@ -150,6 +150,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
|
|||
int offset;
|
||||
int minforkoff; /* lower limit on valid forkoff locations */
|
||||
int maxforkoff; /* upper limit on valid forkoff locations */
|
||||
int dsize;
|
||||
xfs_mount_t *mp = dp->i_mount;
|
||||
|
||||
offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
|
||||
|
@ -169,8 +170,43 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* data fork btree root can have at least this many key/ptr pairs */
|
||||
minforkoff = MAX(dp->i_df.if_bytes, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
|
||||
dsize = dp->i_df.if_bytes;
|
||||
|
||||
switch (dp->i_d.di_format) {
|
||||
case XFS_DINODE_FMT_EXTENTS:
|
||||
/*
|
||||
* If there is no attr fork and the data fork is extents,
|
||||
* determine if creating the default attr fork will result
|
||||
* in the extents form migrating to btree. If so, the
|
||||
* minimum offset only needs to be the space required for
|
||||
* the btree root.
|
||||
*/
|
||||
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes > mp->m_attroffset)
|
||||
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
|
||||
break;
|
||||
|
||||
case XFS_DINODE_FMT_BTREE:
|
||||
/*
|
||||
* If have data btree then keep forkoff if we have one,
|
||||
* otherwise we are adding a new attr, so then we set
|
||||
* minforkoff to where the btree root can finish so we have
|
||||
* plenty of room for attrs
|
||||
*/
|
||||
if (dp->i_d.di_forkoff) {
|
||||
if (offset < dp->i_d.di_forkoff)
|
||||
return 0;
|
||||
else
|
||||
return dp->i_d.di_forkoff;
|
||||
} else
|
||||
dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* A data fork btree root must have space for at least
|
||||
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
|
||||
*/
|
||||
minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
|
||||
minforkoff = roundup(minforkoff, 8) >> 3;
|
||||
|
||||
/* attr fork btree root can have at least this many key/ptr pairs */
|
||||
|
@ -336,7 +372,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
|
|||
*/
|
||||
totsize -= size;
|
||||
if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
|
||||
(mp->m_flags & XFS_MOUNT_ATTR2)) {
|
||||
(mp->m_flags & XFS_MOUNT_ATTR2) &&
|
||||
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) {
|
||||
/*
|
||||
* Last attribute now removed, revert to original
|
||||
* inode format making all literal area available
|
||||
|
@ -355,7 +392,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
|
|||
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
|
||||
ASSERT(dp->i_d.di_forkoff);
|
||||
ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
|
||||
!(mp->m_flags & XFS_MOUNT_ATTR2));
|
||||
!(mp->m_flags & XFS_MOUNT_ATTR2) ||
|
||||
dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
|
||||
dp->i_afp->if_ext_max =
|
||||
XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
|
||||
dp->i_df.if_ext_max =
|
||||
|
@ -748,6 +786,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
|
|||
+ be16_to_cpu(name_loc->valuelen);
|
||||
}
|
||||
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
|
||||
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
|
||||
(bytes == sizeof(struct xfs_attr_sf_hdr)))
|
||||
return(-1);
|
||||
return(xfs_attr_shortform_bytesfit(dp, bytes));
|
||||
|
@ -786,6 +825,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
|
|||
|
||||
if (forkoff == -1) {
|
||||
ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
|
||||
ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
|
||||
|
||||
/*
|
||||
* Last attribute was removed, revert to original
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
/*
|
||||
* Index of high bit number in byte, -1 for none set, 0..7 otherwise.
|
||||
*/
|
||||
STATIC const char xfs_highbit[256] = {
|
||||
static const char xfs_highbit[256] = {
|
||||
-1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */
|
||||
3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */
|
||||
4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */
|
||||
|
|
|
@ -185,16 +185,6 @@ xfs_bmap_btree_to_extents(
|
|||
int *logflagsp, /* inode logging flags */
|
||||
int whichfork); /* data or attr fork */
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Check that the extents list for the inode ip is in the right order.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_bmap_check_extents(
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
int whichfork); /* data or attr fork */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called by xfs_bmapi to update file extent records and the btree
|
||||
* after removing space (or undoing a delayed allocation).
|
||||
|
@ -410,7 +400,6 @@ xfs_bmap_count_leaves(
|
|||
STATIC int
|
||||
xfs_bmap_disk_count_leaves(
|
||||
xfs_ifork_t *ifp,
|
||||
xfs_mount_t *mp,
|
||||
xfs_extnum_t idx,
|
||||
xfs_bmbt_block_t *block,
|
||||
int numrecs,
|
||||
|
@ -684,7 +673,7 @@ xfs_bmap_add_extent(
|
|||
ASSERT(nblks <= da_old);
|
||||
if (nblks < da_old)
|
||||
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
|
||||
(int)(da_old - nblks), rsvd);
|
||||
(int64_t)(da_old - nblks), rsvd);
|
||||
}
|
||||
/*
|
||||
* Clear out the allocated field, done with it now in any case.
|
||||
|
@ -1209,7 +1198,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
|
||||
(cur ? cur->bc_private.b.allocated : 0));
|
||||
if (diff > 0 &&
|
||||
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) {
|
||||
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
|
||||
/*
|
||||
* Ick gross gag me with a spoon.
|
||||
*/
|
||||
|
@ -1220,7 +1209,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
diff--;
|
||||
if (!diff ||
|
||||
!xfs_mod_incore_sb(ip->i_mount,
|
||||
XFS_SBS_FDBLOCKS, -diff, rsvd))
|
||||
XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
|
||||
break;
|
||||
}
|
||||
if (temp2) {
|
||||
|
@ -1228,7 +1217,7 @@ xfs_bmap_add_extent_delay_real(
|
|||
diff--;
|
||||
if (!diff ||
|
||||
!xfs_mod_incore_sb(ip->i_mount,
|
||||
XFS_SBS_FDBLOCKS, -diff, rsvd))
|
||||
XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2015,7 +2004,7 @@ xfs_bmap_add_extent_hole_delay(
|
|||
if (oldlen != newlen) {
|
||||
ASSERT(oldlen > newlen);
|
||||
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
|
||||
(int)(oldlen - newlen), rsvd);
|
||||
(int64_t)(oldlen - newlen), rsvd);
|
||||
/*
|
||||
* Nothing to do for disk quota accounting here.
|
||||
*/
|
||||
|
@ -3359,7 +3348,7 @@ xfs_bmap_del_extent(
|
|||
*/
|
||||
ASSERT(da_old >= da_new);
|
||||
if (da_old > da_new)
|
||||
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new),
|
||||
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
|
||||
rsvd);
|
||||
if (delta) {
|
||||
/* DELTA: report the original extent. */
|
||||
|
@ -3543,6 +3532,7 @@ xfs_bmap_forkoff_reset(
|
|||
if (whichfork == XFS_ATTR_FORK &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
|
||||
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
|
||||
((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
|
||||
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
|
||||
ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
|
||||
|
@ -4079,7 +4069,7 @@ xfs_bmap_add_attrfork(
|
|||
} else
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
}
|
||||
if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed)))
|
||||
if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
|
||||
goto error2;
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL);
|
||||
ASSERT(ip->i_df.if_ext_max ==
|
||||
|
@ -4212,7 +4202,6 @@ int /* error */
|
|||
xfs_bmap_finish(
|
||||
xfs_trans_t **tp, /* transaction pointer addr */
|
||||
xfs_bmap_free_t *flist, /* i/o: list extents to free */
|
||||
xfs_fsblock_t firstblock, /* controlled ag for allocs */
|
||||
int *committed) /* xact committed or not */
|
||||
{
|
||||
xfs_efd_log_item_t *efd; /* extent free data */
|
||||
|
@ -4533,8 +4522,7 @@ xfs_bmap_read_extents(
|
|||
error0);
|
||||
if (level == 0)
|
||||
break;
|
||||
pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
|
||||
1, mp->m_bmap_dmxr[1]);
|
||||
pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
|
||||
bno = be64_to_cpu(*pp);
|
||||
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
|
||||
xfs_trans_brelse(tp, bp);
|
||||
|
@ -4577,8 +4565,7 @@ xfs_bmap_read_extents(
|
|||
/*
|
||||
* Copy records into the extent records.
|
||||
*/
|
||||
frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
|
||||
block, 1, mp->m_bmap_dmxr[0]);
|
||||
frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
|
||||
start = i;
|
||||
for (j = 0; j < num_recs; j++, i++, frp++) {
|
||||
trp = xfs_iext_get_ext(ifp, i);
|
||||
|
@ -4929,28 +4916,28 @@ xfs_bmapi(
|
|||
if (rt) {
|
||||
error = xfs_mod_incore_sb(mp,
|
||||
XFS_SBS_FREXTENTS,
|
||||
-(extsz), (flags &
|
||||
-((int64_t)extsz), (flags &
|
||||
XFS_BMAPI_RSVBLOCKS));
|
||||
} else {
|
||||
error = xfs_mod_incore_sb(mp,
|
||||
XFS_SBS_FDBLOCKS,
|
||||
-(alen), (flags &
|
||||
-((int64_t)alen), (flags &
|
||||
XFS_BMAPI_RSVBLOCKS));
|
||||
}
|
||||
if (!error) {
|
||||
error = xfs_mod_incore_sb(mp,
|
||||
XFS_SBS_FDBLOCKS,
|
||||
-(indlen), (flags &
|
||||
-((int64_t)indlen), (flags &
|
||||
XFS_BMAPI_RSVBLOCKS));
|
||||
if (error && rt)
|
||||
xfs_mod_incore_sb(mp,
|
||||
XFS_SBS_FREXTENTS,
|
||||
extsz, (flags &
|
||||
(int64_t)extsz, (flags &
|
||||
XFS_BMAPI_RSVBLOCKS));
|
||||
else if (error)
|
||||
xfs_mod_incore_sb(mp,
|
||||
XFS_SBS_FDBLOCKS,
|
||||
alen, (flags &
|
||||
(int64_t)alen, (flags &
|
||||
XFS_BMAPI_RSVBLOCKS));
|
||||
}
|
||||
|
||||
|
@ -5616,13 +5603,13 @@ xfs_bunmapi(
|
|||
rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
|
||||
do_div(rtexts, mp->m_sb.sb_rextsize);
|
||||
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
|
||||
(int)rtexts, rsvd);
|
||||
(int64_t)rtexts, rsvd);
|
||||
(void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
|
||||
NULL, ip, -((long)del.br_blockcount), 0,
|
||||
XFS_QMOPT_RES_RTBLKS);
|
||||
} else {
|
||||
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
|
||||
(int)del.br_blockcount, rsvd);
|
||||
(int64_t)del.br_blockcount, rsvd);
|
||||
(void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
|
||||
NULL, ip, -((long)del.br_blockcount), 0,
|
||||
XFS_QMOPT_RES_REGBLKS);
|
||||
|
@ -6048,32 +6035,6 @@ xfs_bmap_eof(
|
|||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Check that the extents list for the inode ip is in the right order.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_bmap_check_extents(
|
||||
xfs_inode_t *ip, /* incore inode pointer */
|
||||
int whichfork) /* data or attr fork */
|
||||
{
|
||||
xfs_bmbt_rec_t *ep; /* current extent entry */
|
||||
xfs_extnum_t idx; /* extent record index */
|
||||
xfs_ifork_t *ifp; /* inode fork pointer */
|
||||
xfs_extnum_t nextents; /* number of extents in list */
|
||||
xfs_bmbt_rec_t *nextp; /* next extent entry */
|
||||
|
||||
ifp = XFS_IFORK_PTR(ip, whichfork);
|
||||
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
|
||||
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
|
||||
ep = xfs_iext_get_ext(ifp, 0);
|
||||
for (idx = 0; idx < nextents - 1; idx++) {
|
||||
nextp = xfs_iext_get_ext(ifp, idx + 1);
|
||||
xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
|
||||
(void *)(nextp));
|
||||
ep = nextp;
|
||||
}
|
||||
}
|
||||
|
||||
STATIC
|
||||
xfs_buf_t *
|
||||
xfs_bmap_get_bp(
|
||||
|
@ -6156,8 +6117,7 @@ xfs_check_block(
|
|||
if (root) {
|
||||
keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
|
||||
} else {
|
||||
keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize,
|
||||
xfs_bmbt, block, i, dmxr);
|
||||
keyp = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, i);
|
||||
}
|
||||
|
||||
if (prevp) {
|
||||
|
@ -6172,15 +6132,14 @@ xfs_check_block(
|
|||
if (root) {
|
||||
pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
|
||||
} else {
|
||||
pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
|
||||
xfs_bmbt, block, i, dmxr);
|
||||
pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, i, dmxr);
|
||||
}
|
||||
for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
|
||||
if (root) {
|
||||
thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
|
||||
} else {
|
||||
thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
|
||||
xfs_bmbt, block, j, dmxr);
|
||||
thispa = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, j,
|
||||
dmxr);
|
||||
}
|
||||
if (*thispa == *pp) {
|
||||
cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
|
||||
|
@ -6267,8 +6226,7 @@ xfs_bmap_check_leaf_extents(
|
|||
*/
|
||||
|
||||
xfs_check_block(block, mp, 0, 0);
|
||||
pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
|
||||
1, mp->m_bmap_dmxr[1]);
|
||||
pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
|
||||
bno = be64_to_cpu(*pp);
|
||||
XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
|
||||
if (bp_release) {
|
||||
|
@ -6305,11 +6263,9 @@ xfs_bmap_check_leaf_extents(
|
|||
* conform with the first entry in this one.
|
||||
*/
|
||||
|
||||
ep = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
|
||||
block, 1, mp->m_bmap_dmxr[0]);
|
||||
ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1);
|
||||
for (j = 1; j < num_recs; j++) {
|
||||
nextp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
|
||||
block, j + 1, mp->m_bmap_dmxr[0]);
|
||||
nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1);
|
||||
if (lastp) {
|
||||
xfs_btree_check_rec(XFS_BTNUM_BMAP,
|
||||
(void *)lastp, (void *)ep);
|
||||
|
@ -6454,8 +6410,7 @@ xfs_bmap_count_tree(
|
|||
}
|
||||
|
||||
/* Dive to the next level */
|
||||
pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
|
||||
xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
|
||||
pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
|
||||
bno = be64_to_cpu(*pp);
|
||||
if (unlikely((error =
|
||||
xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
|
||||
|
@ -6470,7 +6425,7 @@ xfs_bmap_count_tree(
|
|||
for (;;) {
|
||||
nextbno = be64_to_cpu(block->bb_rightsib);
|
||||
numrecs = be16_to_cpu(block->bb_numrecs);
|
||||
if (unlikely(xfs_bmap_disk_count_leaves(ifp, mp,
|
||||
if (unlikely(xfs_bmap_disk_count_leaves(ifp,
|
||||
0, block, numrecs, count) < 0)) {
|
||||
xfs_trans_brelse(tp, bp);
|
||||
XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
|
||||
|
@ -6518,7 +6473,6 @@ xfs_bmap_count_leaves(
|
|||
int
|
||||
xfs_bmap_disk_count_leaves(
|
||||
xfs_ifork_t *ifp,
|
||||
xfs_mount_t *mp,
|
||||
xfs_extnum_t idx,
|
||||
xfs_bmbt_block_t *block,
|
||||
int numrecs,
|
||||
|
@ -6528,8 +6482,7 @@ xfs_bmap_disk_count_leaves(
|
|||
xfs_bmbt_rec_t *frp;
|
||||
|
||||
for (b = 1; b <= numrecs; b++) {
|
||||
frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
|
||||
xfs_bmbt, block, idx + b, mp->m_bmap_dmxr[0]);
|
||||
frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b);
|
||||
*count += xfs_bmbt_disk_get_blockcount(frp);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -202,7 +202,6 @@ int /* error */
|
|||
xfs_bmap_finish(
|
||||
struct xfs_trans **tp, /* transaction pointer addr */
|
||||
xfs_bmap_free_t *flist, /* i/o: list extents to free */
|
||||
xfs_fsblock_t firstblock, /* controlled a.g. for allocs */
|
||||
int *committed); /* xact committed or not */
|
||||
|
||||
/*
|
||||
|
|
|
@ -678,47 +678,6 @@ error0:
|
|||
return error;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Get the data from the pointed-to record.
|
||||
*/
|
||||
int
|
||||
xfs_bmbt_get_rec(
|
||||
xfs_btree_cur_t *cur,
|
||||
xfs_fileoff_t *off,
|
||||
xfs_fsblock_t *bno,
|
||||
xfs_filblks_t *len,
|
||||
xfs_exntst_t *state,
|
||||
int *stat)
|
||||
{
|
||||
xfs_bmbt_block_t *block;
|
||||
xfs_buf_t *bp;
|
||||
#ifdef DEBUG
|
||||
int error;
|
||||
#endif
|
||||
int ptr;
|
||||
xfs_bmbt_rec_t *rp;
|
||||
|
||||
block = xfs_bmbt_get_block(cur, 0, &bp);
|
||||
ptr = cur->bc_ptrs[0];
|
||||
#ifdef DEBUG
|
||||
if ((error = xfs_btree_check_lblock(cur, block, 0, bp)))
|
||||
return error;
|
||||
#endif
|
||||
if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) {
|
||||
*stat = 0;
|
||||
return 0;
|
||||
}
|
||||
rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
|
||||
*off = xfs_bmbt_disk_get_startoff(rp);
|
||||
*bno = xfs_bmbt_disk_get_startblock(rp);
|
||||
*len = xfs_bmbt_disk_get_blockcount(rp);
|
||||
*state = xfs_bmbt_disk_get_state(rp);
|
||||
*stat = 1;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Insert one record/level. Return information to the caller
|
||||
* allowing the next level up to proceed if necessary.
|
||||
|
@ -1731,9 +1690,9 @@ xfs_bmdr_to_bmbt(
|
|||
rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
|
||||
rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
|
||||
dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
|
||||
fkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
|
||||
fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
|
||||
tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
|
||||
fpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
|
||||
fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
|
||||
tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
|
||||
dmxr = be16_to_cpu(dblock->bb_numrecs);
|
||||
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
|
||||
|
@ -1862,7 +1821,7 @@ xfs_bmbt_delete(
|
|||
* xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
|
||||
*/
|
||||
|
||||
STATIC __inline__ void
|
||||
STATIC_INLINE void
|
||||
__xfs_bmbt_get_all(
|
||||
__uint64_t l0,
|
||||
__uint64_t l1,
|
||||
|
@ -2015,30 +1974,6 @@ xfs_bmbt_disk_get_blockcount(
|
|||
return (xfs_filblks_t)(INT_GET(r->l1, ARCH_CONVERT) & XFS_MASK64LO(21));
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the startblock field from an on disk bmap extent record.
|
||||
*/
|
||||
xfs_fsblock_t
|
||||
xfs_bmbt_disk_get_startblock(
|
||||
xfs_bmbt_rec_t *r)
|
||||
{
|
||||
#if XFS_BIG_BLKNOS
|
||||
return (((xfs_fsblock_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) |
|
||||
(((xfs_fsblock_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
|
||||
#else
|
||||
#ifdef DEBUG
|
||||
xfs_dfsbno_t b;
|
||||
|
||||
b = (((xfs_dfsbno_t)INT_GET(r->l0, ARCH_CONVERT) & XFS_MASK64LO(9)) << 43) |
|
||||
(((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
|
||||
ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
|
||||
return (xfs_fsblock_t)b;
|
||||
#else /* !DEBUG */
|
||||
return (xfs_fsblock_t)(((xfs_dfsbno_t)INT_GET(r->l1, ARCH_CONVERT)) >> 21);
|
||||
#endif /* DEBUG */
|
||||
#endif /* XFS_BIG_BLKNOS */
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the startoff field from a disk format bmap extent record.
|
||||
*/
|
||||
|
@ -2049,17 +1984,6 @@ xfs_bmbt_disk_get_startoff(
|
|||
return ((xfs_fileoff_t)INT_GET(r->l0, ARCH_CONVERT) &
|
||||
XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
|
||||
}
|
||||
|
||||
xfs_exntst_t
|
||||
xfs_bmbt_disk_get_state(
|
||||
xfs_bmbt_rec_t *r)
|
||||
{
|
||||
int ext_flag;
|
||||
|
||||
ext_flag = (int)((INT_GET(r->l0, ARCH_CONVERT)) >> (64 - BMBT_EXNTFLAG_BITLEN));
|
||||
return xfs_extent_state(xfs_bmbt_disk_get_blockcount(r),
|
||||
ext_flag);
|
||||
}
|
||||
#endif /* XFS_NATIVE_HOST */
|
||||
|
||||
|
||||
|
@ -2684,9 +2608,9 @@ xfs_bmbt_to_bmdr(
|
|||
dblock->bb_numrecs = rblock->bb_numrecs;
|
||||
dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0);
|
||||
fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen);
|
||||
tkp = XFS_BTREE_KEY_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
|
||||
tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1);
|
||||
fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen);
|
||||
tpp = XFS_BTREE_PTR_ADDR(dblocklen, xfs_bmdr, dblock, 1, dmxr);
|
||||
tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr);
|
||||
dmxr = be16_to_cpu(dblock->bb_numrecs);
|
||||
memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
|
||||
memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
|
||||
|
|
|
@ -175,19 +175,11 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t;
|
|||
|
||||
#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp))
|
||||
|
||||
#define XFS_BMAP_IBLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog)
|
||||
#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize)
|
||||
#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \
|
||||
((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \
|
||||
(cur)->bc_private.b.whichfork)->if_broot_bytes)
|
||||
|
||||
#define XFS_BMAP_BLOCK_DSIZE(lev,cur) \
|
||||
(((lev) == (cur)->bc_nlevels - 1 ? \
|
||||
XFS_BMAP_RBLOCK_DSIZE(lev,cur) : XFS_BMAP_IBLOCK_SIZE(lev,cur)))
|
||||
#define XFS_BMAP_BLOCK_ISIZE(lev,cur) \
|
||||
(((lev) == (cur)->bc_nlevels - 1 ? \
|
||||
XFS_BMAP_RBLOCK_ISIZE(lev,cur) : XFS_BMAP_IBLOCK_SIZE(lev,cur)))
|
||||
|
||||
#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \
|
||||
(((lev) == (cur)->bc_nlevels - 1 ? \
|
||||
XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \
|
||||
|
@ -210,37 +202,21 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t;
|
|||
xfs_bmbt, (lev) == 0) : \
|
||||
((cur)->bc_mp->m_bmap_dmnr[(lev) != 0])))
|
||||
|
||||
#define XFS_BMAP_REC_DADDR(bb,i,cur) \
|
||||
(XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_DSIZE( \
|
||||
be16_to_cpu((bb)->bb_level), cur), \
|
||||
xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \
|
||||
be16_to_cpu((bb)->bb_level), cur)))
|
||||
#define XFS_BMAP_REC_IADDR(bb,i,cur) \
|
||||
(XFS_BTREE_REC_ADDR(XFS_BMAP_BLOCK_ISIZE( \
|
||||
be16_to_cpu((bb)->bb_level), cur), \
|
||||
xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
|
||||
be16_to_cpu((bb)->bb_level), cur)))
|
||||
#define XFS_BMAP_REC_DADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i))
|
||||
|
||||
#define XFS_BMAP_REC_IADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i))
|
||||
|
||||
#define XFS_BMAP_KEY_DADDR(bb,i,cur) \
|
||||
(XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_DSIZE( \
|
||||
be16_to_cpu((bb)->bb_level), cur), \
|
||||
xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \
|
||||
be16_to_cpu((bb)->bb_level), cur)))
|
||||
(XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
|
||||
|
||||
#define XFS_BMAP_KEY_IADDR(bb,i,cur) \
|
||||
(XFS_BTREE_KEY_ADDR(XFS_BMAP_BLOCK_ISIZE( \
|
||||
be16_to_cpu((bb)->bb_level), cur), \
|
||||
xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
|
||||
be16_to_cpu((bb)->bb_level), cur)))
|
||||
(XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i))
|
||||
|
||||
#define XFS_BMAP_PTR_DADDR(bb,i,cur) \
|
||||
(XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_DSIZE( \
|
||||
be16_to_cpu((bb)->bb_level), cur), \
|
||||
xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \
|
||||
(XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \
|
||||
be16_to_cpu((bb)->bb_level), cur)))
|
||||
#define XFS_BMAP_PTR_IADDR(bb,i,cur) \
|
||||
(XFS_BTREE_PTR_ADDR(XFS_BMAP_BLOCK_ISIZE( \
|
||||
be16_to_cpu((bb)->bb_level), cur), \
|
||||
xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
|
||||
(XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \
|
||||
be16_to_cpu((bb)->bb_level), cur)))
|
||||
|
||||
/*
|
||||
|
@ -248,11 +224,11 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t;
|
|||
* we don't have a cursor.
|
||||
*/
|
||||
#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \
|
||||
(XFS_BTREE_REC_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
|
||||
(XFS_BTREE_REC_ADDR(xfs_bmbt,bb,i))
|
||||
#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \
|
||||
(XFS_BTREE_KEY_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
|
||||
(XFS_BTREE_KEY_ADDR(xfs_bmbt,bb,i))
|
||||
#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \
|
||||
(XFS_BTREE_PTR_ADDR(sz,xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
|
||||
(XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz)))
|
||||
|
||||
#define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs)
|
||||
#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0)
|
||||
|
@ -315,15 +291,11 @@ extern xfs_exntst_t xfs_bmbt_get_state(xfs_bmbt_rec_t *r);
|
|||
|
||||
#ifndef XFS_NATIVE_HOST
|
||||
extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
|
||||
extern xfs_exntst_t xfs_bmbt_disk_get_state(xfs_bmbt_rec_t *r);
|
||||
extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
|
||||
extern xfs_fsblock_t xfs_bmbt_disk_get_startblock(xfs_bmbt_rec_t *r);
|
||||
extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
|
||||
#else
|
||||
#define xfs_bmbt_disk_get_all(r, s) xfs_bmbt_get_all(r, s)
|
||||
#define xfs_bmbt_disk_get_state(r) xfs_bmbt_get_state(r)
|
||||
#define xfs_bmbt_disk_get_blockcount(r) xfs_bmbt_get_blockcount(r)
|
||||
#define xfs_bmbt_disk_get_startblock(r) xfs_bmbt_get_blockcount(r)
|
||||
#define xfs_bmbt_disk_get_startoff(r) xfs_bmbt_get_startoff(r)
|
||||
#endif /* XFS_NATIVE_HOST */
|
||||
|
||||
|
@ -364,15 +336,6 @@ extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int);
|
|||
extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t,
|
||||
xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t);
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Get the data from the pointed-to record.
|
||||
*/
|
||||
extern int xfs_bmbt_get_rec(struct xfs_btree_cur *, xfs_fileoff_t *,
|
||||
xfs_fsblock_t *, xfs_filblks_t *,
|
||||
xfs_exntst_t *, int *);
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_BMAP_BTREE_H__ */
|
||||
|
|
|
@ -122,13 +122,13 @@ extern const __uint32_t xfs_magics[];
|
|||
* Given block size, type prefix, block pointer, and index of requested entry
|
||||
* (first entry numbered 1).
|
||||
*/
|
||||
#define XFS_BTREE_REC_ADDR(bsz,t,bb,i,mxr) \
|
||||
#define XFS_BTREE_REC_ADDR(t,bb,i) \
|
||||
((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \
|
||||
((i) - 1) * sizeof(t ## _rec_t)))
|
||||
#define XFS_BTREE_KEY_ADDR(bsz,t,bb,i,mxr) \
|
||||
#define XFS_BTREE_KEY_ADDR(t,bb,i) \
|
||||
((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \
|
||||
((i) - 1) * sizeof(t ## _key_t)))
|
||||
#define XFS_BTREE_PTR_ADDR(bsz,t,bb,i,mxr) \
|
||||
#define XFS_BTREE_PTR_ADDR(t,bb,i,mxr) \
|
||||
((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \
|
||||
(mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t)))
|
||||
|
||||
|
|
|
@ -660,7 +660,7 @@ xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
|
|||
/*
|
||||
* This is the ops vector shared by all buf log items.
|
||||
*/
|
||||
STATIC struct xfs_item_ops xfs_buf_item_ops = {
|
||||
static struct xfs_item_ops xfs_buf_item_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_buf_item_format,
|
||||
|
|
|
@ -21,23 +21,7 @@
|
|||
/*
|
||||
* This is the structure used to lay out a buf log item in the
|
||||
* log. The data map describes which 128 byte chunks of the buffer
|
||||
* have been logged. This structure works only on buffers that
|
||||
* reside up to the first TB in the filesystem. These buffers are
|
||||
* generated only by pre-6.2 systems and are known as XFS_LI_6_1_BUF.
|
||||
*/
|
||||
typedef struct xfs_buf_log_format_v1 {
|
||||
unsigned short blf_type; /* buf log item type indicator */
|
||||
unsigned short blf_size; /* size of this item */
|
||||
__int32_t blf_blkno; /* starting blkno of this buf */
|
||||
ushort blf_flags; /* misc state */
|
||||
ushort blf_len; /* number of blocks in this buf */
|
||||
unsigned int blf_map_size; /* size of data bitmap in words */
|
||||
unsigned int blf_data_map[1];/* variable size bitmap of */
|
||||
/* regions of buffer in this item */
|
||||
} xfs_buf_log_format_v1_t;
|
||||
|
||||
/*
|
||||
* This is a form of the above structure with a 64 bit blkno field.
|
||||
* have been logged.
|
||||
* For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
|
||||
*/
|
||||
typedef struct xfs_buf_log_format_t {
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_CAP_H__
|
||||
#define __XFS_CAP_H__
|
||||
|
||||
/*
|
||||
* Capabilities
|
||||
*/
|
||||
typedef __uint64_t xfs_cap_value_t;
|
||||
|
||||
typedef struct xfs_cap_set {
|
||||
xfs_cap_value_t cap_effective; /* use in capability checks */
|
||||
xfs_cap_value_t cap_permitted; /* combined with file attrs */
|
||||
xfs_cap_value_t cap_inheritable;/* pass through exec */
|
||||
} xfs_cap_set_t;
|
||||
|
||||
/* On-disk XFS extended attribute names */
|
||||
#define SGI_CAP_FILE "SGI_CAP_FILE"
|
||||
#define SGI_CAP_FILE_SIZE (sizeof(SGI_CAP_FILE)-1)
|
||||
#define SGI_CAP_LINUX "SGI_CAP_LINUX"
|
||||
#define SGI_CAP_LINUX_SIZE (sizeof(SGI_CAP_LINUX)-1)
|
||||
|
||||
/*
|
||||
* For Linux, we take the bitfields directly from capability.h
|
||||
* and no longer attempt to keep this attribute ondisk compatible
|
||||
* with IRIX. Since this attribute is only set on executables,
|
||||
* it just doesn't make much sense to try. We do use a different
|
||||
* named attribute though, to avoid confusion.
|
||||
*/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_FS_POSIX_CAP
|
||||
|
||||
#include <linux/posix_cap_xattr.h>
|
||||
|
||||
struct bhv_vnode;
|
||||
|
||||
extern int xfs_cap_vhascap(struct bhv_vnode *);
|
||||
extern int xfs_cap_vset(struct bhv_vnode *, void *, size_t);
|
||||
extern int xfs_cap_vget(struct bhv_vnode *, void *, size_t);
|
||||
extern int xfs_cap_vremove(struct bhv_vnode *);
|
||||
|
||||
#define _CAP_EXISTS xfs_cap_vhascap
|
||||
|
||||
#else
|
||||
#define xfs_cap_vset(v,p,sz) (-EOPNOTSUPP)
|
||||
#define xfs_cap_vget(v,p,sz) (-EOPNOTSUPP)
|
||||
#define xfs_cap_vremove(v) (-EOPNOTSUPP)
|
||||
#define _CAP_EXISTS (NULL)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_CAP_H__ */
|
|
@ -1090,8 +1090,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
|
|||
if (blk->magic == XFS_DA_NODE_MAGIC) {
|
||||
node = blk->bp->data;
|
||||
max = be16_to_cpu(node->hdr.count);
|
||||
btreehashval = node->btree[max-1].hashval;
|
||||
blk->hashval = be32_to_cpu(btreehashval);
|
||||
blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
|
||||
|
||||
/*
|
||||
* Binary search. (note: small blocks will skip loop)
|
||||
|
@ -2166,21 +2165,6 @@ xfs_da_reada_buf(
|
|||
return rval;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of bits needed to hold i different values.
|
||||
*/
|
||||
uint
|
||||
xfs_da_log2_roundup(uint i)
|
||||
{
|
||||
uint rval;
|
||||
|
||||
for (rval = 0; rval < NBBY * sizeof(i); rval++) {
|
||||
if ((1 << rval) >= i)
|
||||
break;
|
||||
}
|
||||
return(rval);
|
||||
}
|
||||
|
||||
kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
|
||||
kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
|
||||
|
||||
|
|
|
@ -249,7 +249,6 @@ int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
|
|||
xfs_dabuf_t *dead_buf);
|
||||
|
||||
uint xfs_da_hashname(const uchar_t *name_string, int name_length);
|
||||
uint xfs_da_log2_roundup(uint i);
|
||||
xfs_da_state_t *xfs_da_state_alloc(void);
|
||||
void xfs_da_state_free(xfs_da_state_t *state);
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_dfrag.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_rw.h"
|
||||
|
||||
/*
|
||||
|
|
|
@ -131,32 +131,6 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_errortag_clear(int error_tag, xfs_mount_t *mp)
|
||||
{
|
||||
int i;
|
||||
int64_t fsid;
|
||||
|
||||
memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
|
||||
|
||||
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
|
||||
if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
|
||||
xfs_etest[i] = 0;
|
||||
xfs_etest_fsid[i] = 0LL;
|
||||
kmem_free(xfs_etest_fsname[i],
|
||||
strlen(xfs_etest_fsname[i]) + 1);
|
||||
xfs_etest_fsname[i] = NULL;
|
||||
cmn_err(CE_WARN, "Cleared XFS error tag #%d",
|
||||
error_tag);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
cmn_err(CE_WARN, "XFS error tag %d not on", error_tag);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int
|
||||
xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud)
|
||||
{
|
||||
|
|
|
@ -144,7 +144,6 @@ extern void xfs_error_test_init(void);
|
|||
#endif /* __ANSI_CPP__ */
|
||||
|
||||
extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
|
||||
extern int xfs_errortag_clear(int error_tag, xfs_mount_t *mp);
|
||||
extern int xfs_errortag_clearall(xfs_mount_t *mp);
|
||||
extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
|
||||
#else
|
||||
|
@ -180,6 +179,6 @@ extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...);
|
|||
xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args)
|
||||
|
||||
#define xfs_fs_mount_cmn_err(f, fmt, args...) \
|
||||
((f & XFS_MFSI_QUIET)? cmn_err(CE_WARN, "XFS: " fmt, ## args) : (void)0)
|
||||
((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args))
|
||||
|
||||
#endif /* __XFS_ERROR_H__ */
|
||||
|
|
|
@ -227,7 +227,7 @@ xfs_efi_item_committing(xfs_efi_log_item_t *efip, xfs_lsn_t lsn)
|
|||
/*
|
||||
* This is the ops vector shared by all efi log items.
|
||||
*/
|
||||
STATIC struct xfs_item_ops xfs_efi_item_ops = {
|
||||
static struct xfs_item_ops xfs_efi_item_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_efi_item_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_efi_item_format,
|
||||
|
@ -525,7 +525,7 @@ xfs_efd_item_committing(xfs_efd_log_item_t *efip, xfs_lsn_t lsn)
|
|||
/*
|
||||
* This is the ops vector shared by all efd log items.
|
||||
*/
|
||||
STATIC struct xfs_item_ops xfs_efd_item_ops = {
|
||||
static struct xfs_item_ops xfs_efd_item_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_efd_item_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_efd_item_format,
|
||||
|
|
|
@ -250,8 +250,7 @@ xfs_growfs_data_private(
|
|||
block->bb_numrecs = cpu_to_be16(1);
|
||||
block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
|
||||
block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
|
||||
arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
|
||||
block, 1, mp->m_alloc_mxr[0]);
|
||||
arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
|
||||
arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
|
||||
arec->ar_blockcount = cpu_to_be32(
|
||||
agsize - be32_to_cpu(arec->ar_startblock));
|
||||
|
@ -272,8 +271,7 @@ xfs_growfs_data_private(
|
|||
block->bb_numrecs = cpu_to_be16(1);
|
||||
block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
|
||||
block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
|
||||
arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
|
||||
block, 1, mp->m_alloc_mxr[0]);
|
||||
arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1);
|
||||
arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
|
||||
arec->ar_blockcount = cpu_to_be32(
|
||||
agsize - be32_to_cpu(arec->ar_startblock));
|
||||
|
@ -460,7 +458,7 @@ xfs_fs_counts(
|
|||
{
|
||||
unsigned long s;
|
||||
|
||||
xfs_icsb_sync_counters_lazy(mp);
|
||||
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
|
||||
s = XFS_SB_LOCK(mp);
|
||||
cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||
cnt->freertx = mp->m_sb.sb_frextents;
|
||||
|
@ -491,7 +489,7 @@ xfs_reserve_blocks(
|
|||
__uint64_t *inval,
|
||||
xfs_fsop_resblks_t *outval)
|
||||
{
|
||||
__int64_t lcounter, delta;
|
||||
__int64_t lcounter, delta, fdblks_delta;
|
||||
__uint64_t request;
|
||||
unsigned long s;
|
||||
|
||||
|
@ -504,17 +502,35 @@ xfs_reserve_blocks(
|
|||
}
|
||||
|
||||
request = *inval;
|
||||
|
||||
/*
|
||||
* With per-cpu counters, this becomes an interesting
|
||||
* problem. we needto work out if we are freeing or allocation
|
||||
* blocks first, then we can do the modification as necessary.
|
||||
*
|
||||
* We do this under the XFS_SB_LOCK so that if we are near
|
||||
* ENOSPC, we will hold out any changes while we work out
|
||||
* what to do. This means that the amount of free space can
|
||||
* change while we do this, so we need to retry if we end up
|
||||
* trying to reserve more space than is available.
|
||||
*
|
||||
* We also use the xfs_mod_incore_sb() interface so that we
|
||||
* don't have to care about whether per cpu counter are
|
||||
* enabled, disabled or even compiled in....
|
||||
*/
|
||||
retry:
|
||||
s = XFS_SB_LOCK(mp);
|
||||
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
|
||||
|
||||
/*
|
||||
* If our previous reservation was larger than the current value,
|
||||
* then move any unused blocks back to the free pool.
|
||||
*/
|
||||
|
||||
fdblks_delta = 0;
|
||||
if (mp->m_resblks > request) {
|
||||
lcounter = mp->m_resblks_avail - request;
|
||||
if (lcounter > 0) { /* release unused blocks */
|
||||
mp->m_sb.sb_fdblocks += lcounter;
|
||||
fdblks_delta = lcounter;
|
||||
mp->m_resblks_avail -= lcounter;
|
||||
}
|
||||
mp->m_resblks = request;
|
||||
|
@ -522,24 +538,50 @@ xfs_reserve_blocks(
|
|||
__int64_t free;
|
||||
|
||||
free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||
if (!free)
|
||||
goto out; /* ENOSPC and fdblks_delta = 0 */
|
||||
|
||||
delta = request - mp->m_resblks;
|
||||
lcounter = free - delta;
|
||||
if (lcounter < 0) {
|
||||
/* We can't satisfy the request, just get what we can */
|
||||
mp->m_resblks += free;
|
||||
mp->m_resblks_avail += free;
|
||||
fdblks_delta = -free;
|
||||
mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
|
||||
} else {
|
||||
fdblks_delta = -delta;
|
||||
mp->m_sb.sb_fdblocks =
|
||||
lcounter + XFS_ALLOC_SET_ASIDE(mp);
|
||||
mp->m_resblks = request;
|
||||
mp->m_resblks_avail += delta;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
outval->resblks = mp->m_resblks;
|
||||
outval->resblks_avail = mp->m_resblks_avail;
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
|
||||
if (fdblks_delta) {
|
||||
/*
|
||||
* If we are putting blocks back here, m_resblks_avail is
|
||||
* already at it's max so this will put it in the free pool.
|
||||
*
|
||||
* If we need space, we'll either succeed in getting it
|
||||
* from the free block count or we'll get an enospc. If
|
||||
* we get a ENOSPC, it means things changed while we were
|
||||
* calculating fdblks_delta and so we should try again to
|
||||
* see if there is anything left to reserve.
|
||||
*
|
||||
* Don't set the reserved flag here - we don't want to reserve
|
||||
* the extra reserve blocks from the reserve.....
|
||||
*/
|
||||
int error;
|
||||
error = xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, fdblks_delta, 0);
|
||||
if (error == ENOSPC)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ xfs_ialloc_ag_alloc(
|
|||
return 0;
|
||||
}
|
||||
|
||||
STATIC __inline xfs_agnumber_t
|
||||
STATIC_INLINE xfs_agnumber_t
|
||||
xfs_ialloc_next_ag(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
|
|
|
@ -89,7 +89,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t;
|
|||
/*
|
||||
* Real block structures have a size equal to the disk block size.
|
||||
*/
|
||||
#define XFS_INOBT_BLOCK_SIZE(lev,cur) (1 << (cur)->bc_blocklog)
|
||||
#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_inobt_mxr[lev != 0])
|
||||
#define XFS_INOBT_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_inobt_mnr[lev != 0])
|
||||
#define XFS_INOBT_IS_LAST_REC(cur) \
|
||||
|
@ -110,14 +109,13 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t;
|
|||
* Record, key, and pointer address macros for btree blocks.
|
||||
*/
|
||||
#define XFS_INOBT_REC_ADDR(bb,i,cur) \
|
||||
(XFS_BTREE_REC_ADDR(XFS_INOBT_BLOCK_SIZE(0,cur), xfs_inobt, bb, \
|
||||
i, XFS_INOBT_BLOCK_MAXRECS(0, cur)))
|
||||
(XFS_BTREE_REC_ADDR(xfs_inobt, bb, i))
|
||||
|
||||
#define XFS_INOBT_KEY_ADDR(bb,i,cur) \
|
||||
(XFS_BTREE_KEY_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, \
|
||||
i, XFS_INOBT_BLOCK_MAXRECS(1, cur)))
|
||||
(XFS_BTREE_KEY_ADDR(xfs_inobt, bb, i))
|
||||
|
||||
#define XFS_INOBT_PTR_ADDR(bb,i,cur) \
|
||||
(XFS_BTREE_PTR_ADDR(XFS_INOBT_BLOCK_SIZE(1,cur), xfs_inobt, bb, \
|
||||
(XFS_BTREE_PTR_ADDR(xfs_inobt, bb, \
|
||||
i, XFS_INOBT_BLOCK_MAXRECS(1, cur)))
|
||||
|
||||
/*
|
||||
|
|
|
@ -47,7 +47,6 @@
|
|||
#include "xfs_utils.h"
|
||||
#include "xfs_dir2_trace.h"
|
||||
#include "xfs_quota.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_acl.h"
|
||||
|
||||
|
||||
|
@ -1699,8 +1698,7 @@ xfs_itruncate_finish(
|
|||
* Duplicate the transaction that has the permanent
|
||||
* reservation and commit the old transaction.
|
||||
*/
|
||||
error = xfs_bmap_finish(tp, &free_list, first_block,
|
||||
&committed);
|
||||
error = xfs_bmap_finish(tp, &free_list, &committed);
|
||||
ntp = *tp;
|
||||
if (error) {
|
||||
/*
|
||||
|
@ -1810,7 +1808,7 @@ xfs_igrow_start(
|
|||
* and any blocks between the old and new file sizes.
|
||||
*/
|
||||
error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
|
||||
ip->i_d.di_size, new_size);
|
||||
ip->i_d.di_size);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -2125,7 +2123,7 @@ xfs_iunlink_remove(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static __inline__ int xfs_inode_clean(xfs_inode_t *ip)
|
||||
STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
|
||||
{
|
||||
return (((ip->i_itemp == NULL) ||
|
||||
!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
|
||||
|
@ -2707,10 +2705,24 @@ xfs_idestroy(
|
|||
ktrace_free(ip->i_dir_trace);
|
||||
#endif
|
||||
if (ip->i_itemp) {
|
||||
/* XXXdpd should be able to assert this but shutdown
|
||||
* is leaving the AIL behind. */
|
||||
ASSERT(((ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL) == 0) ||
|
||||
/*
|
||||
* Only if we are shutting down the fs will we see an
|
||||
* inode still in the AIL. If it is there, we should remove
|
||||
* it to prevent a use-after-free from occurring.
|
||||
*/
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_log_item_t *lip = &ip->i_itemp->ili_item;
|
||||
int s;
|
||||
|
||||
ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
|
||||
XFS_FORCED_SHUTDOWN(ip->i_mount));
|
||||
if (lip->li_flags & XFS_LI_IN_AIL) {
|
||||
AIL_LOCK(mp, s);
|
||||
if (lip->li_flags & XFS_LI_IN_AIL)
|
||||
xfs_trans_delete_ail(mp, lip, s);
|
||||
else
|
||||
AIL_UNLOCK(mp, s);
|
||||
}
|
||||
xfs_inode_item_destroy(ip);
|
||||
}
|
||||
kmem_zone_free(xfs_inode_zone, ip);
|
||||
|
|
|
@ -887,7 +887,7 @@ xfs_inode_item_committing(
|
|||
/*
|
||||
* This is the ops vector shared by all buf log items.
|
||||
*/
|
||||
STATIC struct xfs_item_ops xfs_inode_item_ops = {
|
||||
static struct xfs_item_ops xfs_inode_item_ops = {
|
||||
.iop_size = (uint(*)(xfs_log_item_t*))xfs_inode_item_size,
|
||||
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
|
||||
xfs_inode_item_format,
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "xfs_itable.h"
|
||||
#include "xfs_rw.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_cap.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_attr.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_trans_space.h"
|
||||
|
@ -542,7 +540,7 @@ xfs_iomap_write_direct(
|
|||
/*
|
||||
* Complete the transaction
|
||||
*/
|
||||
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error)
|
||||
goto error0;
|
||||
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
|
||||
|
@ -838,8 +836,7 @@ xfs_iomap_write_allocate(
|
|||
if (error)
|
||||
goto trans_cancel;
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list,
|
||||
first_block, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error)
|
||||
goto trans_cancel;
|
||||
|
||||
|
@ -947,8 +944,7 @@ xfs_iomap_write_unwritten(
|
|||
if (error)
|
||||
goto error_on_bmapi_transaction;
|
||||
|
||||
error = xfs_bmap_finish(&(tp), &(free_list),
|
||||
firstfsb, &committed);
|
||||
error = xfs_bmap_finish(&(tp), &(free_list), &committed);
|
||||
if (error)
|
||||
goto error_on_bmapi_transaction;
|
||||
|
||||
|
|
|
@ -1514,7 +1514,6 @@ xlog_recover_reorder_trans(
|
|||
{
|
||||
xlog_recover_item_t *first_item, *itemq, *itemq_next;
|
||||
xfs_buf_log_format_t *buf_f;
|
||||
xfs_buf_log_format_v1_t *obuf_f;
|
||||
ushort flags = 0;
|
||||
|
||||
first_item = itemq = trans->r_itemq;
|
||||
|
@ -1522,29 +1521,16 @@ xlog_recover_reorder_trans(
|
|||
do {
|
||||
itemq_next = itemq->ri_next;
|
||||
buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
|
||||
switch (ITEM_TYPE(itemq)) {
|
||||
case XFS_LI_BUF:
|
||||
flags = buf_f->blf_flags;
|
||||
break;
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
|
||||
flags = obuf_f->blf_flags;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (ITEM_TYPE(itemq)) {
|
||||
case XFS_LI_BUF:
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
flags = buf_f->blf_flags;
|
||||
if (!(flags & XFS_BLI_CANCEL)) {
|
||||
xlog_recover_insert_item_frontq(&trans->r_itemq,
|
||||
itemq);
|
||||
break;
|
||||
}
|
||||
case XFS_LI_INODE:
|
||||
case XFS_LI_6_1_INODE:
|
||||
case XFS_LI_5_3_INODE:
|
||||
case XFS_LI_DQUOT:
|
||||
case XFS_LI_QUOTAOFF:
|
||||
case XFS_LI_EFD:
|
||||
|
@ -1583,7 +1569,6 @@ xlog_recover_do_buffer_pass1(
|
|||
xfs_buf_cancel_t *nextp;
|
||||
xfs_buf_cancel_t *prevp;
|
||||
xfs_buf_cancel_t **bucket;
|
||||
xfs_buf_log_format_v1_t *obuf_f;
|
||||
xfs_daddr_t blkno = 0;
|
||||
uint len = 0;
|
||||
ushort flags = 0;
|
||||
|
@ -1594,13 +1579,6 @@ xlog_recover_do_buffer_pass1(
|
|||
len = buf_f->blf_len;
|
||||
flags = buf_f->blf_flags;
|
||||
break;
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
|
||||
blkno = (xfs_daddr_t) obuf_f->blf_blkno;
|
||||
len = obuf_f->blf_len;
|
||||
flags = obuf_f->blf_flags;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1746,7 +1724,6 @@ xlog_recover_do_buffer_pass2(
|
|||
xlog_t *log,
|
||||
xfs_buf_log_format_t *buf_f)
|
||||
{
|
||||
xfs_buf_log_format_v1_t *obuf_f;
|
||||
xfs_daddr_t blkno = 0;
|
||||
ushort flags = 0;
|
||||
uint len = 0;
|
||||
|
@ -1757,13 +1734,6 @@ xlog_recover_do_buffer_pass2(
|
|||
flags = buf_f->blf_flags;
|
||||
len = buf_f->blf_len;
|
||||
break;
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
|
||||
blkno = (xfs_daddr_t) obuf_f->blf_blkno;
|
||||
flags = obuf_f->blf_flags;
|
||||
len = (xfs_daddr_t) obuf_f->blf_len;
|
||||
break;
|
||||
}
|
||||
|
||||
return xlog_check_buffer_cancelled(log, blkno, len, flags);
|
||||
|
@ -1799,7 +1769,6 @@ xlog_recover_do_inode_buffer(
|
|||
int inodes_per_buf;
|
||||
xfs_agino_t *logged_nextp;
|
||||
xfs_agino_t *buffer_nextp;
|
||||
xfs_buf_log_format_v1_t *obuf_f;
|
||||
unsigned int *data_map = NULL;
|
||||
unsigned int map_size = 0;
|
||||
|
||||
|
@ -1808,12 +1777,6 @@ xlog_recover_do_inode_buffer(
|
|||
data_map = buf_f->blf_data_map;
|
||||
map_size = buf_f->blf_map_size;
|
||||
break;
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
|
||||
data_map = obuf_f->blf_data_map;
|
||||
map_size = obuf_f->blf_map_size;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Set the variables corresponding to the current region to
|
||||
|
@ -1912,7 +1875,6 @@ xlog_recover_do_reg_buffer(
|
|||
int i;
|
||||
int bit;
|
||||
int nbits;
|
||||
xfs_buf_log_format_v1_t *obuf_f;
|
||||
unsigned int *data_map = NULL;
|
||||
unsigned int map_size = 0;
|
||||
int error;
|
||||
|
@ -1922,12 +1884,6 @@ xlog_recover_do_reg_buffer(
|
|||
data_map = buf_f->blf_data_map;
|
||||
map_size = buf_f->blf_map_size;
|
||||
break;
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
|
||||
data_map = obuf_f->blf_data_map;
|
||||
map_size = obuf_f->blf_map_size;
|
||||
break;
|
||||
}
|
||||
bit = 0;
|
||||
i = 1; /* 0 is the buf format structure */
|
||||
|
@ -2160,7 +2116,6 @@ xlog_recover_do_buffer_trans(
|
|||
int pass)
|
||||
{
|
||||
xfs_buf_log_format_t *buf_f;
|
||||
xfs_buf_log_format_v1_t *obuf_f;
|
||||
xfs_mount_t *mp;
|
||||
xfs_buf_t *bp;
|
||||
int error;
|
||||
|
@ -2197,13 +2152,6 @@ xlog_recover_do_buffer_trans(
|
|||
len = buf_f->blf_len;
|
||||
flags = buf_f->blf_flags;
|
||||
break;
|
||||
case XFS_LI_6_1_BUF:
|
||||
case XFS_LI_5_3_BUF:
|
||||
obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
|
||||
blkno = obuf_f->blf_blkno;
|
||||
len = obuf_f->blf_len;
|
||||
flags = obuf_f->blf_flags;
|
||||
break;
|
||||
default:
|
||||
xfs_fs_cmn_err(CE_ALERT, log->l_mp,
|
||||
"xfs_log_recover: unknown buffer type 0x%x, logdev %s",
|
||||
|
@ -2830,9 +2778,7 @@ xlog_recover_do_trans(
|
|||
* where xfs_daddr_t is 32-bits but mount will warn us
|
||||
* off a > 1 TB filesystem before we get here.
|
||||
*/
|
||||
if ((ITEM_TYPE(item) == XFS_LI_BUF) ||
|
||||
(ITEM_TYPE(item) == XFS_LI_6_1_BUF) ||
|
||||
(ITEM_TYPE(item) == XFS_LI_5_3_BUF)) {
|
||||
if ((ITEM_TYPE(item) == XFS_LI_BUF)) {
|
||||
if ((error = xlog_recover_do_buffer_trans(log, item,
|
||||
pass)))
|
||||
break;
|
||||
|
@ -3902,6 +3848,9 @@ xlog_do_recover(
|
|||
ASSERT(XFS_SB_GOOD_VERSION(sbp));
|
||||
xfs_buf_relse(bp);
|
||||
|
||||
/* We've re-read the superblock so re-initialize per-cpu counters */
|
||||
xfs_icsb_reinit_counters(log->l_mp);
|
||||
|
||||
xlog_recover_check_summary(log);
|
||||
|
||||
/* Normal transactions can now occur */
|
||||
|
|
106
fs/xfs/xfs_mac.h
106
fs/xfs/xfs_mac.h
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2001-2002,2005 Silicon Graphics, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it would be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
#ifndef __XFS_MAC_H__
|
||||
#define __XFS_MAC_H__
|
||||
|
||||
/*
|
||||
* Mandatory Access Control
|
||||
*
|
||||
* Layout of a composite MAC label:
|
||||
* ml_list contains the list of categories (MSEN) followed by the list of
|
||||
* divisions (MINT). This is actually a header for the data structure which
|
||||
* will have an ml_list with more than one element.
|
||||
*
|
||||
* -------------------------------
|
||||
* | ml_msen_type | ml_mint_type |
|
||||
* -------------------------------
|
||||
* | ml_level | ml_grade |
|
||||
* -------------------------------
|
||||
* | ml_catcount |
|
||||
* -------------------------------
|
||||
* | ml_divcount |
|
||||
* -------------------------------
|
||||
* | category 1 |
|
||||
* | . . . |
|
||||
* | category N | (where N = ml_catcount)
|
||||
* -------------------------------
|
||||
* | division 1 |
|
||||
* | . . . |
|
||||
* | division M | (where M = ml_divcount)
|
||||
* -------------------------------
|
||||
*/
|
||||
#define XFS_MAC_MAX_SETS 250
|
||||
typedef struct xfs_mac_label {
|
||||
__uint8_t ml_msen_type; /* MSEN label type */
|
||||
__uint8_t ml_mint_type; /* MINT label type */
|
||||
__uint8_t ml_level; /* Hierarchical level */
|
||||
__uint8_t ml_grade; /* Hierarchical grade */
|
||||
__uint16_t ml_catcount; /* Category count */
|
||||
__uint16_t ml_divcount; /* Division count */
|
||||
/* Category set, then Division set */
|
||||
__uint16_t ml_list[XFS_MAC_MAX_SETS];
|
||||
} xfs_mac_label_t;
|
||||
|
||||
/* MSEN label type names. Choose an upper case ASCII character. */
|
||||
#define XFS_MSEN_ADMIN_LABEL 'A' /* Admin: low<admin != tcsec<high */
|
||||
#define XFS_MSEN_EQUAL_LABEL 'E' /* Wildcard - always equal */
|
||||
#define XFS_MSEN_HIGH_LABEL 'H' /* System High - always dominates */
|
||||
#define XFS_MSEN_MLD_HIGH_LABEL 'I' /* System High, multi-level dir */
|
||||
#define XFS_MSEN_LOW_LABEL 'L' /* System Low - always dominated */
|
||||
#define XFS_MSEN_MLD_LABEL 'M' /* TCSEC label on a multi-level dir */
|
||||
#define XFS_MSEN_MLD_LOW_LABEL 'N' /* System Low, multi-level dir */
|
||||
#define XFS_MSEN_TCSEC_LABEL 'T' /* TCSEC label */
|
||||
#define XFS_MSEN_UNKNOWN_LABEL 'U' /* unknown label */
|
||||
|
||||
/* MINT label type names. Choose a lower case ASCII character. */
|
||||
#define XFS_MINT_BIBA_LABEL 'b' /* Dual of a TCSEC label */
|
||||
#define XFS_MINT_EQUAL_LABEL 'e' /* Wildcard - always equal */
|
||||
#define XFS_MINT_HIGH_LABEL 'h' /* High Grade - always dominates */
|
||||
#define XFS_MINT_LOW_LABEL 'l' /* Low Grade - always dominated */
|
||||
|
||||
/* On-disk XFS extended attribute names */
|
||||
#define SGI_MAC_FILE "SGI_MAC_FILE"
|
||||
#define SGI_MAC_FILE_SIZE (sizeof(SGI_MAC_FILE)-1)
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_FS_POSIX_MAC
|
||||
|
||||
/* NOT YET IMPLEMENTED */
|
||||
|
||||
#define MACEXEC 00100
|
||||
#define MACWRITE 00200
|
||||
#define MACREAD 00400
|
||||
|
||||
struct xfs_inode;
|
||||
extern int xfs_mac_iaccess(struct xfs_inode *, mode_t, cred_t *);
|
||||
|
||||
#define _MAC_XFS_IACCESS(i,m,c) (xfs_mac_iaccess(i,m,c))
|
||||
#define _MAC_VACCESS(v,c,m) (xfs_mac_vaccess(v,c,m))
|
||||
#define _MAC_EXISTS xfs_mac_vhaslabel
|
||||
|
||||
#else
|
||||
#define _MAC_XFS_IACCESS(i,m,c) (0)
|
||||
#define _MAC_VACCESS(v,c,m) (0)
|
||||
#define _MAC_EXISTS (NULL)
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __XFS_MAC_H__ */
|
|
@ -52,21 +52,19 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *);
|
|||
|
||||
#ifdef HAVE_PERCPU_SB
|
||||
STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
|
||||
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int);
|
||||
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
|
||||
int, int);
|
||||
STATIC void xfs_icsb_sync_counters(xfs_mount_t *);
|
||||
STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
|
||||
int, int);
|
||||
STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t,
|
||||
int, int);
|
||||
int64_t, int);
|
||||
STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
|
||||
|
||||
#else
|
||||
|
||||
#define xfs_icsb_destroy_counters(mp) do { } while (0)
|
||||
#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
|
||||
#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0)
|
||||
#define xfs_icsb_sync_counters(mp) do { } while (0)
|
||||
#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
|
||||
#define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -545,9 +543,8 @@ xfs_readsb(xfs_mount_t *mp, int flags)
|
|||
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
||||
}
|
||||
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
|
||||
/* Initialize per-cpu counters */
|
||||
xfs_icsb_reinit_counters(mp);
|
||||
|
||||
mp->m_sb_bp = bp;
|
||||
xfs_buf_relse(bp);
|
||||
|
@ -1254,8 +1251,11 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
|
|||
* The SB_LOCK must be held when this routine is called.
|
||||
*/
|
||||
int
|
||||
xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
|
||||
int delta, int rsvd)
|
||||
xfs_mod_incore_sb_unlocked(
|
||||
xfs_mount_t *mp,
|
||||
xfs_sb_field_t field,
|
||||
int64_t delta,
|
||||
int rsvd)
|
||||
{
|
||||
int scounter; /* short counter for 32 bit fields */
|
||||
long long lcounter; /* long counter for 64 bit fields */
|
||||
|
@ -1287,7 +1287,6 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
|
|||
mp->m_sb.sb_ifree = lcounter;
|
||||
return 0;
|
||||
case XFS_SBS_FDBLOCKS:
|
||||
|
||||
lcounter = (long long)
|
||||
mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||
res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
|
||||
|
@ -1418,7 +1417,11 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
|
|||
* routine to do the work.
|
||||
*/
|
||||
int
|
||||
xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd)
|
||||
xfs_mod_incore_sb(
|
||||
xfs_mount_t *mp,
|
||||
xfs_sb_field_t field,
|
||||
int64_t delta,
|
||||
int rsvd)
|
||||
{
|
||||
unsigned long s;
|
||||
int status;
|
||||
|
@ -1485,9 +1488,11 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
|
|||
case XFS_SBS_IFREE:
|
||||
case XFS_SBS_FDBLOCKS:
|
||||
if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
|
||||
status = xfs_icsb_modify_counters_locked(mp,
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
status = xfs_icsb_modify_counters(mp,
|
||||
msbp->msb_field,
|
||||
msbp->msb_delta, rsvd);
|
||||
s = XFS_SB_LOCK(mp);
|
||||
break;
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
|
@ -1521,11 +1526,12 @@ xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
|
|||
case XFS_SBS_IFREE:
|
||||
case XFS_SBS_FDBLOCKS:
|
||||
if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) {
|
||||
status =
|
||||
xfs_icsb_modify_counters_locked(mp,
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
status = xfs_icsb_modify_counters(mp,
|
||||
msbp->msb_field,
|
||||
-(msbp->msb_delta),
|
||||
rsvd);
|
||||
s = XFS_SB_LOCK(mp);
|
||||
break;
|
||||
}
|
||||
/* FALLTHROUGH */
|
||||
|
@ -1733,14 +1739,17 @@ xfs_icsb_cpu_notify(
|
|||
memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
|
||||
break;
|
||||
case CPU_ONLINE:
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
|
||||
xfs_icsb_lock(mp);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
|
||||
xfs_icsb_unlock(mp);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
/* Disable all the counters, then fold the dead cpu's
|
||||
* count into the total on the global superblock and
|
||||
* re-enable the counters. */
|
||||
xfs_icsb_lock(mp);
|
||||
s = XFS_SB_LOCK(mp);
|
||||
xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
|
||||
xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
|
||||
|
@ -1752,10 +1761,14 @@ xfs_icsb_cpu_notify(
|
|||
|
||||
memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
|
||||
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
|
||||
XFS_ICSB_SB_LOCKED, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
|
||||
XFS_ICSB_SB_LOCKED, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
|
||||
XFS_ICSB_SB_LOCKED, 0);
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
xfs_icsb_unlock(mp);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1784,6 +1797,9 @@ xfs_icsb_init_counters(
|
|||
cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
|
||||
memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
|
||||
}
|
||||
|
||||
mutex_init(&mp->m_icsb_mutex);
|
||||
|
||||
/*
|
||||
* start with all counters disabled so that the
|
||||
* initial balance kicks us off correctly
|
||||
|
@ -1792,6 +1808,22 @@ xfs_icsb_init_counters(
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
xfs_icsb_reinit_counters(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
xfs_icsb_lock(mp);
|
||||
/*
|
||||
* start with all counters disabled so that the
|
||||
* initial balance kicks us off correctly
|
||||
*/
|
||||
mp->m_icsb_counters = -1;
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
|
||||
xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
|
||||
xfs_icsb_unlock(mp);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_icsb_destroy_counters(
|
||||
xfs_mount_t *mp)
|
||||
|
@ -1800,9 +1832,10 @@ xfs_icsb_destroy_counters(
|
|||
unregister_hotcpu_notifier(&mp->m_icsb_notifier);
|
||||
free_percpu(mp->m_sb_cnts);
|
||||
}
|
||||
mutex_destroy(&mp->m_icsb_mutex);
|
||||
}
|
||||
|
||||
STATIC inline void
|
||||
STATIC_INLINE void
|
||||
xfs_icsb_lock_cntr(
|
||||
xfs_icsb_cnts_t *icsbp)
|
||||
{
|
||||
|
@ -1811,7 +1844,7 @@ xfs_icsb_lock_cntr(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC inline void
|
||||
STATIC_INLINE void
|
||||
xfs_icsb_unlock_cntr(
|
||||
xfs_icsb_cnts_t *icsbp)
|
||||
{
|
||||
|
@ -1819,7 +1852,7 @@ xfs_icsb_unlock_cntr(
|
|||
}
|
||||
|
||||
|
||||
STATIC inline void
|
||||
STATIC_INLINE void
|
||||
xfs_icsb_lock_all_counters(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
|
@ -1832,7 +1865,7 @@ xfs_icsb_lock_all_counters(
|
|||
}
|
||||
}
|
||||
|
||||
STATIC inline void
|
||||
STATIC_INLINE void
|
||||
xfs_icsb_unlock_all_counters(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
|
@ -1888,6 +1921,17 @@ xfs_icsb_disable_counter(
|
|||
|
||||
ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
|
||||
|
||||
/*
|
||||
* If we are already disabled, then there is nothing to do
|
||||
* here. We check before locking all the counters to avoid
|
||||
* the expensive lock operation when being called in the
|
||||
* slow path and the counter is already disabled. This is
|
||||
* safe because the only time we set or clear this state is under
|
||||
* the m_icsb_mutex.
|
||||
*/
|
||||
if (xfs_icsb_counter_disabled(mp, field))
|
||||
return 0;
|
||||
|
||||
xfs_icsb_lock_all_counters(mp);
|
||||
if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
|
||||
/* drain back to superblock */
|
||||
|
@ -1948,8 +1992,8 @@ xfs_icsb_enable_counter(
|
|||
xfs_icsb_unlock_all_counters(mp);
|
||||
}
|
||||
|
||||
STATIC void
|
||||
xfs_icsb_sync_counters_int(
|
||||
void
|
||||
xfs_icsb_sync_counters_flags(
|
||||
xfs_mount_t *mp,
|
||||
int flags)
|
||||
{
|
||||
|
@ -1981,40 +2025,39 @@ STATIC void
|
|||
xfs_icsb_sync_counters(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
xfs_icsb_sync_counters_int(mp, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* lazy addition used for things like df, background sb syncs, etc
|
||||
*/
|
||||
void
|
||||
xfs_icsb_sync_counters_lazy(
|
||||
xfs_mount_t *mp)
|
||||
{
|
||||
xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT);
|
||||
xfs_icsb_sync_counters_flags(mp, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Balance and enable/disable counters as necessary.
|
||||
*
|
||||
* Thresholds for re-enabling counters are somewhat magic.
|
||||
* inode counts are chosen to be the same number as single
|
||||
* on disk allocation chunk per CPU, and free blocks is
|
||||
* something far enough zero that we aren't going thrash
|
||||
* when we get near ENOSPC.
|
||||
* Thresholds for re-enabling counters are somewhat magic. inode counts are
|
||||
* chosen to be the same number as single on disk allocation chunk per CPU, and
|
||||
* free blocks is something far enough zero that we aren't going thrash when we
|
||||
* get near ENOSPC. We also need to supply a minimum we require per cpu to
|
||||
* prevent looping endlessly when xfs_alloc_space asks for more than will
|
||||
* be distributed to a single CPU but each CPU has enough blocks to be
|
||||
* reenabled.
|
||||
*
|
||||
* Note that we can be called when counters are already disabled.
|
||||
* xfs_icsb_disable_counter() optimises the counter locking in this case to
|
||||
* prevent locking every per-cpu counter needlessly.
|
||||
*/
|
||||
#define XFS_ICSB_INO_CNTR_REENABLE 64
|
||||
|
||||
#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
|
||||
#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
|
||||
(512 + XFS_ALLOC_SET_ASIDE(mp))
|
||||
(uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
|
||||
STATIC void
|
||||
xfs_icsb_balance_counter(
|
||||
xfs_mount_t *mp,
|
||||
xfs_sb_field_t field,
|
||||
int flags)
|
||||
int flags,
|
||||
int min_per_cpu)
|
||||
{
|
||||
uint64_t count, resid;
|
||||
int weight = num_online_cpus();
|
||||
int s;
|
||||
uint64_t min = (uint64_t)min_per_cpu;
|
||||
|
||||
if (!(flags & XFS_ICSB_SB_LOCKED))
|
||||
s = XFS_SB_LOCK(mp);
|
||||
|
@ -2027,19 +2070,19 @@ xfs_icsb_balance_counter(
|
|||
case XFS_SBS_ICOUNT:
|
||||
count = mp->m_sb.sb_icount;
|
||||
resid = do_div(count, weight);
|
||||
if (count < XFS_ICSB_INO_CNTR_REENABLE)
|
||||
if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
|
||||
goto out;
|
||||
break;
|
||||
case XFS_SBS_IFREE:
|
||||
count = mp->m_sb.sb_ifree;
|
||||
resid = do_div(count, weight);
|
||||
if (count < XFS_ICSB_INO_CNTR_REENABLE)
|
||||
if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
|
||||
goto out;
|
||||
break;
|
||||
case XFS_SBS_FDBLOCKS:
|
||||
count = mp->m_sb.sb_fdblocks;
|
||||
resid = do_div(count, weight);
|
||||
if (count < XFS_ICSB_FDBLK_CNTR_REENABLE(mp))
|
||||
if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
|
||||
goto out;
|
||||
break;
|
||||
default:
|
||||
|
@ -2054,32 +2097,39 @@ out:
|
|||
XFS_SB_UNLOCK(mp, s);
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_icsb_modify_counters_int(
|
||||
int
|
||||
xfs_icsb_modify_counters(
|
||||
xfs_mount_t *mp,
|
||||
xfs_sb_field_t field,
|
||||
int delta,
|
||||
int rsvd,
|
||||
int flags)
|
||||
int64_t delta,
|
||||
int rsvd)
|
||||
{
|
||||
xfs_icsb_cnts_t *icsbp;
|
||||
long long lcounter; /* long counter for 64 bit fields */
|
||||
int cpu, s, locked = 0;
|
||||
int ret = 0, balance_done = 0;
|
||||
int cpu, ret = 0, s;
|
||||
|
||||
might_sleep();
|
||||
again:
|
||||
cpu = get_cpu();
|
||||
icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu),
|
||||
xfs_icsb_lock_cntr(icsbp);
|
||||
icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
|
||||
|
||||
/*
|
||||
* if the counter is disabled, go to slow path
|
||||
*/
|
||||
if (unlikely(xfs_icsb_counter_disabled(mp, field)))
|
||||
goto slow_path;
|
||||
xfs_icsb_lock_cntr(icsbp);
|
||||
if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
|
||||
xfs_icsb_unlock_cntr(icsbp);
|
||||
goto slow_path;
|
||||
}
|
||||
|
||||
switch (field) {
|
||||
case XFS_SBS_ICOUNT:
|
||||
lcounter = icsbp->icsb_icount;
|
||||
lcounter += delta;
|
||||
if (unlikely(lcounter < 0))
|
||||
goto slow_path;
|
||||
goto balance_counter;
|
||||
icsbp->icsb_icount = lcounter;
|
||||
break;
|
||||
|
||||
|
@ -2087,7 +2137,7 @@ again:
|
|||
lcounter = icsbp->icsb_ifree;
|
||||
lcounter += delta;
|
||||
if (unlikely(lcounter < 0))
|
||||
goto slow_path;
|
||||
goto balance_counter;
|
||||
icsbp->icsb_ifree = lcounter;
|
||||
break;
|
||||
|
||||
|
@ -2097,7 +2147,7 @@ again:
|
|||
lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
|
||||
lcounter += delta;
|
||||
if (unlikely(lcounter < 0))
|
||||
goto slow_path;
|
||||
goto balance_counter;
|
||||
icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
|
||||
break;
|
||||
default:
|
||||
|
@ -2106,72 +2156,78 @@ again:
|
|||
}
|
||||
xfs_icsb_unlock_cntr(icsbp);
|
||||
put_cpu();
|
||||
if (locked)
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The slow path needs to be run with the SBLOCK
|
||||
* held so that we prevent other threads from
|
||||
* attempting to run this path at the same time.
|
||||
* this provides exclusion for the balancing code,
|
||||
* and exclusive fallback if the balance does not
|
||||
* provide enough resources to continue in an unlocked
|
||||
* manner.
|
||||
*/
|
||||
slow_path:
|
||||
put_cpu();
|
||||
|
||||
/*
|
||||
* serialise with a mutex so we don't burn lots of cpu on
|
||||
* the superblock lock. We still need to hold the superblock
|
||||
* lock, however, when we modify the global structures.
|
||||
*/
|
||||
xfs_icsb_lock(mp);
|
||||
|
||||
/*
|
||||
* Now running atomically.
|
||||
*
|
||||
* If the counter is enabled, someone has beaten us to rebalancing.
|
||||
* Drop the lock and try again in the fast path....
|
||||
*/
|
||||
if (!(xfs_icsb_counter_disabled(mp, field))) {
|
||||
xfs_icsb_unlock(mp);
|
||||
goto again;
|
||||
}
|
||||
|
||||
/*
|
||||
* The counter is currently disabled. Because we are
|
||||
* running atomically here, we know a rebalance cannot
|
||||
* be in progress. Hence we can go straight to operating
|
||||
* on the global superblock. We do not call xfs_mod_incore_sb()
|
||||
* here even though we need to get the SB_LOCK. Doing so
|
||||
* will cause us to re-enter this function and deadlock.
|
||||
* Hence we get the SB_LOCK ourselves and then call
|
||||
* xfs_mod_incore_sb_unlocked() as the unlocked path operates
|
||||
* directly on the global counters.
|
||||
*/
|
||||
s = XFS_SB_LOCK(mp);
|
||||
ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
|
||||
/*
|
||||
* Now that we've modified the global superblock, we
|
||||
* may be able to re-enable the distributed counters
|
||||
* (e.g. lots of space just got freed). After that
|
||||
* we are done.
|
||||
*/
|
||||
if (ret != ENOSPC)
|
||||
xfs_icsb_balance_counter(mp, field, 0, 0);
|
||||
xfs_icsb_unlock(mp);
|
||||
return ret;
|
||||
|
||||
balance_counter:
|
||||
xfs_icsb_unlock_cntr(icsbp);
|
||||
put_cpu();
|
||||
|
||||
/* need to hold superblock incase we need
|
||||
* to disable a counter */
|
||||
if (!(flags & XFS_ICSB_SB_LOCKED)) {
|
||||
s = XFS_SB_LOCK(mp);
|
||||
locked = 1;
|
||||
flags |= XFS_ICSB_SB_LOCKED;
|
||||
}
|
||||
if (!balance_done) {
|
||||
xfs_icsb_balance_counter(mp, field, flags);
|
||||
balance_done = 1;
|
||||
goto again;
|
||||
} else {
|
||||
/*
|
||||
* we might not have enough on this local
|
||||
* cpu to allocate for a bulk request.
|
||||
* We need to drain this field from all CPUs
|
||||
* and disable the counter fastpath
|
||||
* We may have multiple threads here if multiple per-cpu
|
||||
* counters run dry at the same time. This will mean we can
|
||||
* do more balances than strictly necessary but it is not
|
||||
* the common slowpath case.
|
||||
*/
|
||||
xfs_icsb_disable_counter(mp, field);
|
||||
}
|
||||
xfs_icsb_lock(mp);
|
||||
|
||||
ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
|
||||
|
||||
if (locked)
|
||||
XFS_SB_UNLOCK(mp, s);
|
||||
return ret;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_icsb_modify_counters(
|
||||
xfs_mount_t *mp,
|
||||
xfs_sb_field_t field,
|
||||
int delta,
|
||||
int rsvd)
|
||||
{
|
||||
return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when superblock is already locked
|
||||
/*
|
||||
* running atomically.
|
||||
*
|
||||
* This will leave the counter in the correct state for future
|
||||
* accesses. After the rebalance, we simply try again and our retry
|
||||
* will either succeed through the fast path or slow path without
|
||||
* another balance operation being required.
|
||||
*/
|
||||
STATIC int
|
||||
xfs_icsb_modify_counters_locked(
|
||||
xfs_mount_t *mp,
|
||||
xfs_sb_field_t field,
|
||||
int delta,
|
||||
int rsvd)
|
||||
{
|
||||
return xfs_icsb_modify_counters_int(mp, field, delta,
|
||||
rsvd, XFS_ICSB_SB_LOCKED);
|
||||
xfs_icsb_balance_counter(mp, field, 0, delta);
|
||||
xfs_icsb_unlock(mp);
|
||||
goto again;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#ifndef __XFS_MOUNT_H__
|
||||
#define __XFS_MOUNT_H__
|
||||
|
||||
|
||||
typedef struct xfs_trans_reservations {
|
||||
uint tr_write; /* extent alloc trans */
|
||||
uint tr_itruncate; /* truncate trans */
|
||||
|
@ -306,11 +307,13 @@ typedef struct xfs_icsb_cnts {
|
|||
#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
|
||||
|
||||
extern int xfs_icsb_init_counters(struct xfs_mount *);
|
||||
extern void xfs_icsb_sync_counters_lazy(struct xfs_mount *);
|
||||
extern void xfs_icsb_reinit_counters(struct xfs_mount *);
|
||||
extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int);
|
||||
|
||||
#else
|
||||
#define xfs_icsb_init_counters(mp) (0)
|
||||
#define xfs_icsb_sync_counters_lazy(mp) do { } while (0)
|
||||
#define xfs_icsb_reinit_counters(mp) do { } while (0)
|
||||
#define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0)
|
||||
#endif
|
||||
|
||||
typedef struct xfs_mount {
|
||||
|
@ -419,6 +422,7 @@ typedef struct xfs_mount {
|
|||
xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */
|
||||
unsigned long m_icsb_counters; /* disabled per-cpu counters */
|
||||
struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
|
||||
struct mutex m_icsb_mutex; /* balancer sync lock */
|
||||
#endif
|
||||
} xfs_mount_t;
|
||||
|
||||
|
@ -562,12 +566,33 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
|
|||
return (xfs_agblock_t) do_div(ld, mp->m_sb.sb_agblocks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-cpu superblock locking functions
|
||||
*/
|
||||
#ifdef HAVE_PERCPU_SB
|
||||
STATIC_INLINE void
|
||||
xfs_icsb_lock(xfs_mount_t *mp)
|
||||
{
|
||||
mutex_lock(&mp->m_icsb_mutex);
|
||||
}
|
||||
|
||||
STATIC_INLINE void
|
||||
xfs_icsb_unlock(xfs_mount_t *mp)
|
||||
{
|
||||
mutex_unlock(&mp->m_icsb_mutex);
|
||||
}
|
||||
#else
|
||||
#define xfs_icsb_lock(mp)
|
||||
#define xfs_icsb_unlock(mp)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This structure is for use by the xfs_mod_incore_sb_batch() routine.
|
||||
* xfs_growfs can specify a few fields which are more than int limit
|
||||
*/
|
||||
typedef struct xfs_mod_sb {
|
||||
xfs_sb_field_t msb_field; /* Field to modify, see below */
|
||||
int msb_delta; /* Change to make to specified field */
|
||||
int64_t msb_delta; /* Change to make to specified field */
|
||||
} xfs_mod_sb_t;
|
||||
|
||||
#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
|
||||
|
@ -585,17 +610,17 @@ extern int xfs_unmountfs(xfs_mount_t *, struct cred *);
|
|||
extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *);
|
||||
extern int xfs_unmountfs_writesb(xfs_mount_t *);
|
||||
extern int xfs_unmount_flush(xfs_mount_t *, int);
|
||||
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int, int);
|
||||
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
|
||||
extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t,
|
||||
int, int);
|
||||
int64_t, int);
|
||||
extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
|
||||
uint, int);
|
||||
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
|
||||
extern int xfs_readsb(xfs_mount_t *, int);
|
||||
extern void xfs_freesb(xfs_mount_t *);
|
||||
extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int);
|
||||
extern int xfs_syncsub(xfs_mount_t *, int, int, int *);
|
||||
extern int xfs_sync_inodes(xfs_mount_t *, int, int, int *);
|
||||
extern int xfs_syncsub(xfs_mount_t *, int, int *);
|
||||
extern int xfs_sync_inodes(xfs_mount_t *, int, int *);
|
||||
extern xfs_agnumber_t xfs_initialize_perag(struct bhv_vfs *, xfs_mount_t *,
|
||||
xfs_agnumber_t);
|
||||
extern void xfs_xlatesb(void *, struct xfs_sb *, int, __int64_t);
|
||||
|
|
|
@ -565,7 +565,7 @@ xfs_rename(
|
|||
IHOLD(target_ip);
|
||||
IHOLD(src_ip);
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
xfs_bmap_cancel(&free_list);
|
||||
xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
|
||||
|
|
|
@ -147,7 +147,7 @@ xfs_growfs_rt_alloc(
|
|||
/*
|
||||
* Free any blocks freed up in the transaction, then commit.
|
||||
*/
|
||||
error = xfs_bmap_finish(&tp, &flist, firstblock, &committed);
|
||||
error = xfs_bmap_finish(&tp, &flist, &committed);
|
||||
if (error)
|
||||
goto error_exit;
|
||||
xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
|
||||
|
@ -913,57 +913,6 @@ xfs_rtcheck_alloc_range(
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Check whether the given block in the bitmap has the given value.
|
||||
*/
|
||||
STATIC int /* 1 for matches, 0 for not */
|
||||
xfs_rtcheck_bit(
|
||||
xfs_mount_t *mp, /* file system mount structure */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_rtblock_t start, /* bit (block) to check */
|
||||
int val) /* 1 for free, 0 for allocated */
|
||||
{
|
||||
int bit; /* bit number in the word */
|
||||
xfs_rtblock_t block; /* bitmap block number */
|
||||
xfs_buf_t *bp; /* buf for the block */
|
||||
xfs_rtword_t *bufp; /* pointer into the buffer */
|
||||
/* REFERENCED */
|
||||
int error; /* error value */
|
||||
xfs_rtword_t wdiff; /* difference between bit & expected */
|
||||
int word; /* word number in the buffer */
|
||||
xfs_rtword_t wval; /* word value from buffer */
|
||||
|
||||
block = XFS_BITTOBLOCK(mp, start);
|
||||
error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
|
||||
bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp);
|
||||
word = XFS_BITTOWORD(mp, start);
|
||||
bit = (int)(start & (XFS_NBWORD - 1));
|
||||
wval = bufp[word];
|
||||
xfs_trans_brelse(tp, bp);
|
||||
wdiff = (wval ^ -val) & ((xfs_rtword_t)1 << bit);
|
||||
return !wdiff;
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Check that the given extent (block range) is free already.
|
||||
*/
|
||||
STATIC int /* error */
|
||||
xfs_rtcheck_free_range(
|
||||
xfs_mount_t *mp, /* file system mount point */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_rtblock_t bno, /* starting block number of extent */
|
||||
xfs_extlen_t len, /* length of extent */
|
||||
int *stat) /* out: 1 for free, 0 for not */
|
||||
{
|
||||
xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */
|
||||
|
||||
return xfs_rtcheck_range(mp, tp, bno, len, 1, &new, stat);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check that the given range is either all allocated (val = 0) or
|
||||
* all free (val = 1).
|
||||
|
@ -2382,60 +2331,3 @@ xfs_rtpick_extent(
|
|||
*pick = b;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Debug code: print out the value of a range in the bitmap.
|
||||
*/
|
||||
void
|
||||
xfs_rtprint_range(
|
||||
xfs_mount_t *mp, /* file system mount structure */
|
||||
xfs_trans_t *tp, /* transaction pointer */
|
||||
xfs_rtblock_t start, /* starting block to print */
|
||||
xfs_extlen_t len) /* length to print */
|
||||
{
|
||||
xfs_extlen_t i; /* block number in the extent */
|
||||
|
||||
cmn_err(CE_DEBUG, "%Ld: ", (long long)start);
|
||||
for (i = 0; i < len; i++)
|
||||
cmn_err(CE_DEBUG, "%d", xfs_rtcheck_bit(mp, tp, start + i, 1));
|
||||
cmn_err(CE_DEBUG, "\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Debug code: print the summary file.
|
||||
*/
|
||||
void
|
||||
xfs_rtprint_summary(
|
||||
xfs_mount_t *mp, /* file system mount structure */
|
||||
xfs_trans_t *tp) /* transaction pointer */
|
||||
{
|
||||
xfs_suminfo_t c; /* summary data */
|
||||
xfs_rtblock_t i; /* bitmap block number */
|
||||
int l; /* summary information level */
|
||||
int p; /* flag for printed anything */
|
||||
xfs_fsblock_t sb; /* summary block number */
|
||||
xfs_buf_t *sumbp; /* summary block buffer */
|
||||
|
||||
sumbp = NULL;
|
||||
for (l = 0; l < mp->m_rsumlevels; l++) {
|
||||
for (p = 0, i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
|
||||
(void)xfs_rtget_summary(mp, tp, l, i, &sumbp, &sb, &c);
|
||||
if (c) {
|
||||
if (!p) {
|
||||
cmn_err(CE_DEBUG, "%Ld-%Ld:", 1LL << l,
|
||||
XFS_RTMIN((1LL << l) +
|
||||
((1LL << l) - 1LL),
|
||||
mp->m_sb.sb_rextents));
|
||||
p = 1;
|
||||
}
|
||||
cmn_err(CE_DEBUG, " %Ld:%d", (long long)i, c);
|
||||
}
|
||||
}
|
||||
if (p)
|
||||
cmn_err(CE_DEBUG, "\n");
|
||||
}
|
||||
if (sumbp)
|
||||
xfs_trans_brelse(tp, sumbp);
|
||||
}
|
||||
#endif /* DEBUG */
|
||||
|
|
|
@ -133,24 +133,6 @@ xfs_rtpick_extent(
|
|||
xfs_extlen_t len, /* allocation length (rtextents) */
|
||||
xfs_rtblock_t *pick); /* result rt extent */
|
||||
|
||||
/*
|
||||
* Debug code: print out the value of a range in the bitmap.
|
||||
*/
|
||||
void
|
||||
xfs_rtprint_range(
|
||||
struct xfs_mount *mp, /* file system mount structure */
|
||||
struct xfs_trans *tp, /* transaction pointer */
|
||||
xfs_rtblock_t start, /* starting block to print */
|
||||
xfs_extlen_t len); /* length to print */
|
||||
|
||||
/*
|
||||
* Debug code: print the summary file.
|
||||
*/
|
||||
void
|
||||
xfs_rtprint_summary(
|
||||
struct xfs_mount *mp, /* file system mount structure */
|
||||
struct xfs_trans *tp); /* transaction pointer */
|
||||
|
||||
/*
|
||||
* Grow the realtime area of the filesystem.
|
||||
*/
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
#include "xfs_attr.h"
|
||||
#include "xfs_bmap.h"
|
||||
#include "xfs_acl.h"
|
||||
#include "xfs_mac.h"
|
||||
#include "xfs_error.h"
|
||||
#include "xfs_buf_item.h"
|
||||
#include "xfs_rw.h"
|
||||
|
|
|
@ -339,7 +339,7 @@ xfs_trans_reserve(
|
|||
*/
|
||||
if (blocks > 0) {
|
||||
error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
|
||||
-blocks, rsvd);
|
||||
-((int64_t)blocks), rsvd);
|
||||
if (error != 0) {
|
||||
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
|
||||
return (XFS_ERROR(ENOSPC));
|
||||
|
@ -380,7 +380,7 @@ xfs_trans_reserve(
|
|||
*/
|
||||
if (rtextents > 0) {
|
||||
error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS,
|
||||
-rtextents, rsvd);
|
||||
-((int64_t)rtextents), rsvd);
|
||||
if (error) {
|
||||
error = XFS_ERROR(ENOSPC);
|
||||
goto undo_log;
|
||||
|
@ -410,7 +410,7 @@ undo_log:
|
|||
undo_blocks:
|
||||
if (blocks > 0) {
|
||||
(void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS,
|
||||
blocks, rsvd);
|
||||
(int64_t)blocks, rsvd);
|
||||
tp->t_blk_res = 0;
|
||||
}
|
||||
|
||||
|
@ -432,7 +432,7 @@ void
|
|||
xfs_trans_mod_sb(
|
||||
xfs_trans_t *tp,
|
||||
uint field,
|
||||
long delta)
|
||||
int64_t delta)
|
||||
{
|
||||
|
||||
switch (field) {
|
||||
|
@ -663,62 +663,62 @@ xfs_trans_unreserve_and_mod_sb(
|
|||
if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
|
||||
if (tp->t_icount_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_ICOUNT;
|
||||
msbp->msb_delta = (int)tp->t_icount_delta;
|
||||
msbp->msb_delta = tp->t_icount_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_ifree_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_IFREE;
|
||||
msbp->msb_delta = (int)tp->t_ifree_delta;
|
||||
msbp->msb_delta = tp->t_ifree_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_fdblocks_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_FDBLOCKS;
|
||||
msbp->msb_delta = (int)tp->t_fdblocks_delta;
|
||||
msbp->msb_delta = tp->t_fdblocks_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_frextents_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_FREXTENTS;
|
||||
msbp->msb_delta = (int)tp->t_frextents_delta;
|
||||
msbp->msb_delta = tp->t_frextents_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_dblocks_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_DBLOCKS;
|
||||
msbp->msb_delta = (int)tp->t_dblocks_delta;
|
||||
msbp->msb_delta = tp->t_dblocks_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_agcount_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_AGCOUNT;
|
||||
msbp->msb_delta = (int)tp->t_agcount_delta;
|
||||
msbp->msb_delta = tp->t_agcount_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_imaxpct_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_IMAX_PCT;
|
||||
msbp->msb_delta = (int)tp->t_imaxpct_delta;
|
||||
msbp->msb_delta = tp->t_imaxpct_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_rextsize_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_REXTSIZE;
|
||||
msbp->msb_delta = (int)tp->t_rextsize_delta;
|
||||
msbp->msb_delta = tp->t_rextsize_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_rbmblocks_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_RBMBLOCKS;
|
||||
msbp->msb_delta = (int)tp->t_rbmblocks_delta;
|
||||
msbp->msb_delta = tp->t_rbmblocks_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_rblocks_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_RBLOCKS;
|
||||
msbp->msb_delta = (int)tp->t_rblocks_delta;
|
||||
msbp->msb_delta = tp->t_rblocks_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_rextents_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_REXTENTS;
|
||||
msbp->msb_delta = (int)tp->t_rextents_delta;
|
||||
msbp->msb_delta = tp->t_rextents_delta;
|
||||
msbp++;
|
||||
}
|
||||
if (tp->t_rextslog_delta != 0) {
|
||||
msbp->msb_field = XFS_SBS_REXTSLOG;
|
||||
msbp->msb_delta = (int)tp->t_rextslog_delta;
|
||||
msbp->msb_delta = tp->t_rextslog_delta;
|
||||
msbp++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,13 +39,9 @@ typedef struct xfs_trans_header {
|
|||
/*
|
||||
* Log item types.
|
||||
*/
|
||||
#define XFS_LI_5_3_BUF 0x1234 /* v1 bufs, 1-block inode buffers */
|
||||
#define XFS_LI_5_3_INODE 0x1235 /* 1-block inode buffers */
|
||||
#define XFS_LI_EFI 0x1236
|
||||
#define XFS_LI_EFD 0x1237
|
||||
#define XFS_LI_IUNLINK 0x1238
|
||||
#define XFS_LI_6_1_INODE 0x1239 /* 4K non-aligned inode bufs */
|
||||
#define XFS_LI_6_1_BUF 0x123a /* v1, 4K inode buffers */
|
||||
#define XFS_LI_INODE 0x123b /* aligned ino chunks, var-size ibufs */
|
||||
#define XFS_LI_BUF 0x123c /* v2 bufs, variable sized inode bufs */
|
||||
#define XFS_LI_DQUOT 0x123d
|
||||
|
@ -354,25 +350,25 @@ typedef struct xfs_trans {
|
|||
xfs_trans_callback_t t_callback; /* transaction callback */
|
||||
void *t_callarg; /* callback arg */
|
||||
unsigned int t_flags; /* misc flags */
|
||||
long t_icount_delta; /* superblock icount change */
|
||||
long t_ifree_delta; /* superblock ifree change */
|
||||
long t_fdblocks_delta; /* superblock fdblocks chg */
|
||||
long t_res_fdblocks_delta; /* on-disk only chg */
|
||||
long t_frextents_delta;/* superblock freextents chg*/
|
||||
long t_res_frextents_delta; /* on-disk only chg */
|
||||
int64_t t_icount_delta; /* superblock icount change */
|
||||
int64_t t_ifree_delta; /* superblock ifree change */
|
||||
int64_t t_fdblocks_delta; /* superblock fdblocks chg */
|
||||
int64_t t_res_fdblocks_delta; /* on-disk only chg */
|
||||
int64_t t_frextents_delta;/* superblock freextents chg*/
|
||||
int64_t t_res_frextents_delta; /* on-disk only chg */
|
||||
#ifdef DEBUG
|
||||
long t_ag_freeblks_delta; /* debugging counter */
|
||||
long t_ag_flist_delta; /* debugging counter */
|
||||
long t_ag_btree_delta; /* debugging counter */
|
||||
int64_t t_ag_freeblks_delta; /* debugging counter */
|
||||
int64_t t_ag_flist_delta; /* debugging counter */
|
||||
int64_t t_ag_btree_delta; /* debugging counter */
|
||||
#endif
|
||||
long t_dblocks_delta;/* superblock dblocks change */
|
||||
long t_agcount_delta;/* superblock agcount change */
|
||||
long t_imaxpct_delta;/* superblock imaxpct change */
|
||||
long t_rextsize_delta;/* superblock rextsize chg */
|
||||
long t_rbmblocks_delta;/* superblock rbmblocks chg */
|
||||
long t_rblocks_delta;/* superblock rblocks change */
|
||||
long t_rextents_delta;/* superblocks rextents chg */
|
||||
long t_rextslog_delta;/* superblocks rextslog chg */
|
||||
int64_t t_dblocks_delta;/* superblock dblocks change */
|
||||
int64_t t_agcount_delta;/* superblock agcount change */
|
||||
int64_t t_imaxpct_delta;/* superblock imaxpct change */
|
||||
int64_t t_rextsize_delta;/* superblock rextsize chg */
|
||||
int64_t t_rbmblocks_delta;/* superblock rbmblocks chg */
|
||||
int64_t t_rblocks_delta;/* superblock rblocks change */
|
||||
int64_t t_rextents_delta;/* superblocks rextents chg */
|
||||
int64_t t_rextslog_delta;/* superblocks rextslog chg */
|
||||
unsigned int t_items_free; /* log item descs free */
|
||||
xfs_log_item_chunk_t t_items; /* first log item desc chunk */
|
||||
xfs_trans_header_t t_header; /* header for in-log trans */
|
||||
|
@ -936,9 +932,9 @@ typedef struct xfs_trans {
|
|||
#define xfs_trans_set_sync(tp) ((tp)->t_flags |= XFS_TRANS_SYNC)
|
||||
|
||||
#ifdef DEBUG
|
||||
#define xfs_trans_agblocks_delta(tp, d) ((tp)->t_ag_freeblks_delta += (long)d)
|
||||
#define xfs_trans_agflist_delta(tp, d) ((tp)->t_ag_flist_delta += (long)d)
|
||||
#define xfs_trans_agbtree_delta(tp, d) ((tp)->t_ag_btree_delta += (long)d)
|
||||
#define xfs_trans_agblocks_delta(tp, d) ((tp)->t_ag_freeblks_delta += (int64_t)d)
|
||||
#define xfs_trans_agflist_delta(tp, d) ((tp)->t_ag_flist_delta += (int64_t)d)
|
||||
#define xfs_trans_agbtree_delta(tp, d) ((tp)->t_ag_btree_delta += (int64_t)d)
|
||||
#else
|
||||
#define xfs_trans_agblocks_delta(tp, d)
|
||||
#define xfs_trans_agflist_delta(tp, d)
|
||||
|
@ -954,7 +950,7 @@ xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint);
|
|||
xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
|
||||
int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
|
||||
uint, uint);
|
||||
void xfs_trans_mod_sb(xfs_trans_t *, uint, long);
|
||||
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
|
||||
struct xfs_buf *xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t,
|
||||
int, uint);
|
||||
int xfs_trans_read_buf(struct xfs_mount *, xfs_trans_t *,
|
||||
|
|
|
@ -90,7 +90,7 @@ xfs_trans_push_ail(
|
|||
int flush_log;
|
||||
SPLDECL(s);
|
||||
|
||||
#define XFS_TRANS_PUSH_AIL_RESTARTS 10
|
||||
#define XFS_TRANS_PUSH_AIL_RESTARTS 1000
|
||||
|
||||
AIL_LOCK(mp,s);
|
||||
lip = xfs_trans_first_ail(mp, &gen);
|
||||
|
|
|
@ -640,7 +640,7 @@ xfs_quiesce_fs(
|
|||
* we can write the unmount record.
|
||||
*/
|
||||
do {
|
||||
xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, 0, NULL);
|
||||
xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, NULL);
|
||||
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
|
||||
if (!pincount) {
|
||||
delay(50);
|
||||
|
@ -806,7 +806,7 @@ xfs_statvfs(
|
|||
|
||||
statp->f_type = XFS_SB_MAGIC;
|
||||
|
||||
xfs_icsb_sync_counters_lazy(mp);
|
||||
xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
|
||||
s = XFS_SB_LOCK(mp);
|
||||
statp->f_bsize = sbp->sb_blocksize;
|
||||
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
|
||||
|
@ -872,6 +872,10 @@ xfs_statvfs(
|
|||
* this by simply making sure the log gets flushed
|
||||
* if SYNC_BDFLUSH is set, and by actually writing it
|
||||
* out otherwise.
|
||||
* SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete
|
||||
* before we return (including direct I/O). Forms the drain
|
||||
* side of the write barrier needed to safely quiesce the
|
||||
* filesystem.
|
||||
*
|
||||
*/
|
||||
/*ARGSUSED*/
|
||||
|
@ -883,27 +887,20 @@ xfs_sync(
|
|||
{
|
||||
xfs_mount_t *mp = XFS_BHVTOM(bdp);
|
||||
|
||||
if (unlikely(flags == SYNC_QUIESCE))
|
||||
return xfs_quiesce_fs(mp);
|
||||
else
|
||||
return xfs_syncsub(mp, flags, 0, NULL);
|
||||
return xfs_syncsub(mp, flags, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* xfs sync routine for internal use
|
||||
*
|
||||
* This routine supports all of the flags defined for the generic vfs_sync
|
||||
* interface as explained above under xfs_sync. In the interests of not
|
||||
* changing interfaces within the 6.5 family, additional internally-
|
||||
* required functions are specified within a separate xflags parameter,
|
||||
* only available by calling this routine.
|
||||
* interface as explained above under xfs_sync.
|
||||
*
|
||||
*/
|
||||
int
|
||||
xfs_sync_inodes(
|
||||
xfs_mount_t *mp,
|
||||
int flags,
|
||||
int xflags,
|
||||
int *bypassed)
|
||||
{
|
||||
xfs_inode_t *ip = NULL;
|
||||
|
@ -1176,6 +1173,13 @@ xfs_sync_inodes(
|
|||
}
|
||||
|
||||
}
|
||||
/*
|
||||
* When freezing, we need to wait ensure all I/O (including direct
|
||||
* I/O) is complete to ensure no further data modification can take
|
||||
* place after this point
|
||||
*/
|
||||
if (flags & SYNC_IOWAIT)
|
||||
vn_iowait(vp);
|
||||
|
||||
if (flags & SYNC_BDFLUSH) {
|
||||
if ((flags & SYNC_ATTR) &&
|
||||
|
@ -1412,17 +1416,13 @@ xfs_sync_inodes(
|
|||
* xfs sync routine for internal use
|
||||
*
|
||||
* This routine supports all of the flags defined for the generic vfs_sync
|
||||
* interface as explained above under xfs_sync. In the interests of not
|
||||
* changing interfaces within the 6.5 family, additional internally-
|
||||
* required functions are specified within a separate xflags parameter,
|
||||
* only available by calling this routine.
|
||||
* interface as explained above under xfs_sync.
|
||||
*
|
||||
*/
|
||||
int
|
||||
xfs_syncsub(
|
||||
xfs_mount_t *mp,
|
||||
int flags,
|
||||
int xflags,
|
||||
int *bypassed)
|
||||
{
|
||||
int error = 0;
|
||||
|
@ -1444,7 +1444,7 @@ xfs_syncsub(
|
|||
if (flags & SYNC_BDFLUSH)
|
||||
xfs_finish_reclaim_all(mp, 1);
|
||||
else
|
||||
error = xfs_sync_inodes(mp, flags, xflags, bypassed);
|
||||
error = xfs_sync_inodes(mp, flags, bypassed);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1958,15 +1958,26 @@ xfs_showargs(
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Second stage of a freeze. The data is already frozen, now we have to take
|
||||
* care of the metadata. New transactions are already blocked, so we need to
|
||||
* wait for any remaining transactions to drain out before proceding.
|
||||
*/
|
||||
STATIC void
|
||||
xfs_freeze(
|
||||
bhv_desc_t *bdp)
|
||||
{
|
||||
xfs_mount_t *mp = XFS_BHVTOM(bdp);
|
||||
|
||||
/* wait for all modifications to complete */
|
||||
while (atomic_read(&mp->m_active_trans) > 0)
|
||||
delay(100);
|
||||
|
||||
/* flush inodes and push all remaining buffers out to disk */
|
||||
xfs_quiesce_fs(mp);
|
||||
|
||||
ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
|
||||
|
||||
/* Push the superblock and write an unmount record */
|
||||
xfs_log_unmount_write(mp);
|
||||
xfs_unmountfs_writesb(mp);
|
||||
|
|
|
@ -51,7 +51,6 @@
|
|||
#include "xfs_refcache.h"
|
||||
#include "xfs_trans_space.h"
|
||||
#include "xfs_log_priv.h"
|
||||
#include "xfs_mac.h"
|
||||
|
||||
STATIC int
|
||||
xfs_open(
|
||||
|
@ -1381,7 +1380,7 @@ xfs_inactive_symlink_rmt(
|
|||
/*
|
||||
* Commit the first transaction. This logs the EFI and the inode.
|
||||
*/
|
||||
if ((error = xfs_bmap_finish(&tp, &free_list, first_block, &committed)))
|
||||
if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
|
||||
goto error1;
|
||||
/*
|
||||
* The transaction must have been committed, since there were
|
||||
|
@ -1790,8 +1789,7 @@ xfs_inactive(
|
|||
* Just ignore errors at this point. There is
|
||||
* nothing we can do except to try to keep going.
|
||||
*/
|
||||
(void) xfs_bmap_finish(&tp, &free_list, first_block,
|
||||
&committed);
|
||||
(void) xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
(void) xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
|
||||
}
|
||||
/*
|
||||
|
@ -2022,7 +2020,7 @@ xfs_create(
|
|||
IHOLD(ip);
|
||||
vp = XFS_ITOV(ip);
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
xfs_bmap_cancel(&free_list);
|
||||
goto abort_rele;
|
||||
|
@ -2507,7 +2505,7 @@ xfs_remove(
|
|||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
REMOVE_DEBUG_TRACE(__LINE__);
|
||||
goto error_rele;
|
||||
|
@ -2715,7 +2713,7 @@ xfs_link(
|
|||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
error = xfs_bmap_finish (&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish (&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
xfs_bmap_cancel(&free_list);
|
||||
goto abort_return;
|
||||
|
@ -2932,7 +2930,7 @@ xfs_mkdir(
|
|||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
IRELE(cdp);
|
||||
goto error2;
|
||||
|
@ -3183,7 +3181,7 @@ xfs_rmdir(
|
|||
xfs_trans_set_sync(tp);
|
||||
}
|
||||
|
||||
error = xfs_bmap_finish (&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish (&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
xfs_bmap_cancel(&free_list);
|
||||
xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
|
||||
|
@ -3533,7 +3531,7 @@ xfs_symlink(
|
|||
*/
|
||||
IHOLD(ip);
|
||||
|
||||
error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
goto error2;
|
||||
}
|
||||
|
@ -4145,7 +4143,7 @@ retry:
|
|||
/*
|
||||
* Complete the transaction
|
||||
*/
|
||||
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
goto error0;
|
||||
}
|
||||
|
@ -4452,7 +4450,7 @@ xfs_free_file_space(
|
|||
/*
|
||||
* complete the transaction
|
||||
*/
|
||||
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
|
||||
error = xfs_bmap_finish(&tp, &free_list, &committed);
|
||||
if (error) {
|
||||
goto error0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue